@@ -24,6 +24,6 @@ extern int bbdev_la12xx_logtype;
/* DP Logs, toggled out at compile time if level lower than current level */
#define rte_bbdev_dp_log(level, fmt, args...) \
- RTE_LOG_DP(level, BBDEV_LA12XX, fmt, ## args)
+ RTE_LOG_DP_LINE(level, BBDEV_LA12XX, fmt, ## args)
#endif /* _BBDEV_LA12XX_PMD_LOGS_H_ */
@@ -34,7 +34,7 @@
* DP logs, toggled out at compile time if level lower than current level.
*/
#define CPT_LOG_DP(level, fmt, args...) \
- RTE_LOG_DP(level, CPT, fmt "\n", ## args)
+ RTE_LOG_DP_LINE(level, CPT, fmt, ## args)
#define CPT_LOG_DP_DEBUG(fmt, args...) \
CPT_LOG_DP(DEBUG, fmt, ## args)
@@ -2589,7 +2589,7 @@ fill_sess_aead(struct rte_crypto_sym_xform *xform,
sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
sess->cpt_op |= CPT_OP_AUTH_VERIFY;
} else {
- CPT_LOG_DP_ERR("Unknown aead operation\n");
+ CPT_LOG_DP_ERR("Unknown aead operation");
return -1;
}
switch (aead_form->algo) {
@@ -2658,7 +2658,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform,
ctx->dec_auth = 1;
}
} else {
- CPT_LOG_DP_ERR("Unknown cipher operation\n");
+ CPT_LOG_DP_ERR("Unknown cipher operation");
return -1;
}
@@ -348,13 +348,9 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
- "iv-len=%d data_off: 0x%x\n",
- sym_op->aead.data.offset,
- sym_op->aead.data.length,
- sess->digest_length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d",
+ sym_op->aead.data.offset, sym_op->aead.data.length, sess->digest_length);
+ DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x", sess->iv.length, sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_SG_EXT(op_fle);
@@ -506,13 +502,9 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
- "iv-len=%d data_off: 0x%x\n",
- sym_op->aead.data.offset,
- sym_op->aead.data.length,
- sess->digest_length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d",
+ sym_op->aead.data.offset, sym_op->aead.data.length, sess->digest_length);
+ DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x", sess->iv.length, sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -630,16 +622,11 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- DPAA2_SEC_DP_DEBUG(
- "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sess->digest_length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG("AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d",
+ sym_op->auth.data.offset, sym_op->auth.data.length, sess->digest_length);
+ DPAA2_SEC_DP_DEBUG("cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x",
+ sym_op->cipher.data.offset, sym_op->cipher.data.length, sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_SG_EXT(op_fle);
@@ -790,16 +777,11 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- DPAA2_SEC_DP_DEBUG(
- "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sess->digest_length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG("AUTHENC: auth_off: 0x%x/length %d, digest-len=%d",
+ sym_op->auth.data.offset, sym_op->auth.data.length, sess->digest_length);
+ DPAA2_SEC_DP_DEBUG("cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x",
+ sym_op->cipher.data.offset, sym_op->cipher.data.length,
+ sess->iv.length, sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -1144,13 +1126,8 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
flc = &priv->flc_desc[0].flc;
- DPAA2_SEC_DP_DEBUG(
- "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
- " data_off: 0x%x\n",
- data_offset,
- data_len,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG("CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%ddata_off: 0x%x",
+ data_offset, data_len, sess->iv.length, sym_op->m_src->data_off);
/* o/p fle */
DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -1171,10 +1148,8 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
}
DPAA2_SET_FLE_FIN(sge);
- DPAA2_SEC_DP_DEBUG(
- "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG("CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d",
+ flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
/* i/p fle */
mbuf = sym_op->m_src;
@@ -1210,14 +1185,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- DPAA2_SEC_DP_DEBUG(
- "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
- " off =%d, len =%d\n",
- DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG("CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d",
+ DPAA2_GET_FD_ADDR(fd), DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -1290,22 +1261,15 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- DPAA2_SEC_DP_DEBUG(
- "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
- " data_off: 0x%x\n",
- data_offset,
- data_len,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG("CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
+ data_offset, data_len, sess->iv.length, sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, rte_pktmbuf_iova(dst) + data_offset);
fle->length = data_len + sess->iv.length;
- DPAA2_SEC_DP_DEBUG(
- "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG("CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+ flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
fle++;
@@ -1324,14 +1288,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
- DPAA2_SEC_DP_DEBUG(
- "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
- " off =%d, len =%d\n",
- DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG("CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d",
+ DPAA2_GET_FD_ADDR(fd), DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -1348,12 +1308,12 @@ build_sec_fd(struct rte_crypto_op *op,
} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
sess = SECURITY_GET_SESS_PRIV(op->sym->session);
} else {
- DPAA2_SEC_DP_ERR("Session type invalid\n");
+ DPAA2_SEC_DP_ERR("Session type invalid");
return -ENOTSUP;
}
if (!sess) {
- DPAA2_SEC_DP_ERR("Session not available\n");
+ DPAA2_SEC_DP_ERR("Session not available");
return -EINVAL;
}
@@ -1475,7 +1435,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
if (ret) {
- DPAA2_SEC_DP_DEBUG("FD build failed\n");
+ DPAA2_SEC_DP_DEBUG("FD build failed");
goto skip_tx;
}
ops++;
@@ -1493,7 +1453,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
num_tx += loop;
nb_ops -= loop;
- DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
+ DPAA2_SEC_DP_DEBUG("Enqueue fail");
/* freeing the fle buffers */
while (loop < frames_to_send) {
free_fle(&fd_arr[loop],
@@ -1569,7 +1529,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
- DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x",
fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
/* we are using the first FLE entry to store Mbuf.
@@ -1602,7 +1562,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
}
DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
- " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
+ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d",
(void *)dst,
dst->buf_addr,
DPAA2_GET_FD_ADDR(fd),
@@ -1824,7 +1784,7 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
if (ret) {
- DPAA2_SEC_DP_DEBUG("FD build failed\n");
+ DPAA2_SEC_DP_DEBUG("FD build failed");
goto skip_tx;
}
ops++;
@@ -1841,7 +1801,7 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
num_tx += loop;
nb_ops -= loop;
- DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
+ DPAA2_SEC_DP_DEBUG("Enqueue fail");
/* freeing the fle buffers */
while (loop < frames_to_send) {
free_fle(&fd_arr[loop],
@@ -1937,7 +1897,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
status = (uint8_t)qbman_result_DQ_flags(dq_storage);
if (unlikely(
(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
- DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+ DPAA2_SEC_DP_DEBUG("No frame is delivered");
continue;
}
}
@@ -1948,7 +1908,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
- DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
+ DPAA2_SEC_DP_ERR("SEC returned Error - %x",
fd->simple.frc);
if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
dpaa2_sec_dump(ops[num_rx]);
@@ -1966,7 +1926,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa2_qp->rx_vq.rx_pkts += num_rx;
- DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
+ DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64, num_rx,
dpaa2_qp->rx_vq.err_pkts);
/*Return the total number of packets received to DPAA2 app*/
return num_rx;
@@ -30,7 +30,7 @@ extern int dpaa2_logtype_sec;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA2_SEC_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA2_SEC, fmt, ## args)
+ RTE_LOG_DP_LINE(level, DPAA2_SEC, fmt, ## args)
#define DPAA2_SEC_DP_DEBUG(fmt, args...) \
DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args)
@@ -604,11 +604,8 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
flc = &priv->flc_desc[0].flc;
- DPAA2_SEC_DP_DEBUG(
- "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
- data_offset,
- data_len,
- sess->iv.length);
+ DPAA2_SEC_DP_DEBUG("RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d",
+ data_offset, data_len, sess->iv.length);
/* o/p fle */
DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -641,10 +638,8 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
}
DPAA2_SET_FLE_FIN(sge);
- DPAA2_SEC_DP_DEBUG(
- "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG("RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d",
+ flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
/* i/p fle */
sge++;
@@ -677,11 +672,9 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- DPAA2_SEC_DP_DEBUG(
- "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
- DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG("RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d",
+ DPAA2_GET_FD_ADDR(fd), DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -811,7 +804,7 @@ sec_fd_to_userdata(const struct qbman_fd *fd)
void *userdata;
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
- DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x",
fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
/* free the fle memory */
@@ -900,7 +893,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
status = (uint8_t)qbman_result_DQ_flags(dq_storage);
if (unlikely(
(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
- DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
+ DPAA2_SEC_DP_DEBUG("No frame is delivered");
continue;
}
}
@@ -929,7 +922,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
*dequeue_status = 1;
*n_success = num_rx;
- DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets", num_rx);
/*Return the total number of packets received to DPAA2 app*/
return num_rx;
}
@@ -102,7 +102,7 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
- DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
+ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x",
fq->fqid, msg->ern.rc, msg->ern.seqnum);
}
@@ -849,8 +849,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
- DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
- ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
dpaa_sec_dump(ctx, qp);
}
@@ -1921,8 +1920,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
ses = SECURITY_GET_SESS_PRIV(op->sym->session);
break;
default:
- DPAA_SEC_DP_ERR(
- "sessionless crypto op not supported");
+ DPAA_SEC_DP_ERR("sessionless crypto op not supported");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
@@ -1943,8 +1941,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
}
} else if (unlikely(ses->qp[rte_lcore_id() %
MAX_DPAA_CORES] != qp)) {
- DPAA_SEC_DP_ERR("Old:sess->qp = %p"
- " New qp = %p\n",
+ DPAA_SEC_DP_ERR("Old:sess->qp = %p New qp = %p",
ses->qp[rte_lcore_id() %
MAX_DPAA_CORES], qp);
frames_to_send = loop;
@@ -2054,7 +2051,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
fd->cmd = 0x80000000 |
*((uint32_t *)((uint8_t *)op +
ses->pdcp.hfn_ovd_offset));
- DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
+ DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u",
*((uint32_t *)((uint8_t *)op +
ses->pdcp.hfn_ovd_offset)),
ses->pdcp.hfn_ovd);
@@ -2095,7 +2092,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa_qp->rx_pkts += num_rx;
dpaa_qp->rx_errs += nb_ops - num_rx;
- DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets", num_rx);
return num_rx;
}
@@ -30,7 +30,7 @@ extern int dpaa_logtype_sec;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA_SEC_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA_SEC, fmt, ## args)
+ RTE_LOG_DP_LINE(level, DPAA_SEC, fmt, ## args)
#define DPAA_SEC_DP_DEBUG(fmt, args...) \
DPAA_SEC_DP_LOG(DEBUG, fmt, ## args)
@@ -761,7 +761,7 @@ build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
fd->cmd = 0x80000000 |
*((uint32_t *)((uint8_t *)userdata +
ses->pdcp.hfn_ovd_offset));
- DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
+ DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u",
*((uint32_t *)((uint8_t *)userdata +
ses->pdcp.hfn_ovd_offset)),
ses->pdcp.hfn_ovd);
@@ -805,8 +805,7 @@ dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
}
} else if (unlikely(ses->qp[rte_lcore_id() %
MAX_DPAA_CORES] != dpaa_qp)) {
- DPAA_SEC_DP_ERR("Old:sess->qp = %p"
- " New qp = %p\n",
+ DPAA_SEC_DP_ERR("Old:sess->qp = %p New qp = %p",
ses->qp[rte_lcore_id() %
MAX_DPAA_CORES], dpaa_qp);
frames_to_send = loop;
@@ -955,7 +954,7 @@ dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
*dequeue_status = 1;
*n_success = num_rx;
- DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets", num_rx);
return num_rx;
}
@@ -211,7 +211,7 @@ otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
static __rte_always_inline void *
get_cpt_inst(struct command_queue *cqueue)
{
- CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx);
+ CPT_LOG_DP_DEBUG("CPT queue idx %u", cqueue->idx);
return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE];
}
@@ -305,9 +305,9 @@ check_nb_command_id(struct cpt_request_info *user_req,
" error, MC completion code : 0x%x", user_req,
ret);
}
- CPT_LOG_DP_DEBUG("MC status %.8x\n",
+ CPT_LOG_DP_DEBUG("MC status %.8x",
*((volatile uint32_t *)user_req->alternate_caddr));
- CPT_LOG_DP_DEBUG("HW status %.8x\n",
+ CPT_LOG_DP_DEBUG("HW status %.8x",
*((volatile uint32_t *)user_req->completion_addr));
} else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) ||
(cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) {
@@ -779,7 +779,7 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
(dma_addr_t)dst, (dma_addr_t)src,
length, NULL, NULL);
if (!fsl_comp) {
- DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n");
+ DPAA_QDMA_DP_DEBUG("fsl_comp is NULL");
return -1;
}
ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
@@ -27,7 +27,7 @@ extern int dpaa_qdma_logtype;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA_QDMA_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA_QDMA, "dpaa_qdma: " fmt "\n", ## args)
+ RTE_LOG_DP_LINE(level, DPAA_QDMA, "dpaa_qdma: " fmt, ## args)
#define DPAA_QDMA_DP_DEBUG(fmt, args...) \
DPAA_QDMA_DP_LOG(DEBUG, fmt, ## args)
@@ -607,8 +607,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
}
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- DPAA2_QDMA_DP_WARN(
- "VDQ command not issued.QBMAN busy\n");
+ DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
/* Portal was busy, try again */
continue;
}
@@ -683,8 +682,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
/* issue a volatile dequeue command for next pull */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- DPAA2_QDMA_DP_WARN(
- "VDQ command is not issued. QBMAN is busy (2)\n");
+ DPAA2_QDMA_DP_WARN("VDQ command is not issued. QBMAN is busy (2)");
continue;
}
break;
@@ -756,8 +754,7 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- DPAA2_QDMA_DP_WARN(
- "VDQ command not issued. QBMAN busy");
+ DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
/* Portal was busy, try again */
continue;
}
@@ -31,7 +31,7 @@ extern int dpaa2_qdma_logtype;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA2_QDMA_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA2_QDMA, "dpaa2_qdma: " fmt "\n", ## args)
+ RTE_LOG_DP_LINE(level, DPAA2_QDMA, "dpaa2_qdma: " fmt, ## args)
#define DPAA2_QDMA_DP_DEBUG(fmt, args...) \
DPAA2_QDMA_DP_LOG(DEBUG, fmt, ## args)
@@ -297,7 +297,7 @@ dsw_pmd_priv(const struct rte_eventdev *eventdev)
}
#define DSW_LOG_DP(level, fmt, args...) \
- RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
+ RTE_LOG_DP_LINE(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
DSW_PMD_NAME, \
__func__, __LINE__, ## args)
@@ -55,7 +55,7 @@ dsw_port_acquire_credits(struct dsw_evdev *dsw, struct dsw_port *port,
return false;
}
- DSW_LOG_DP_PORT(DEBUG, port->id, "Acquired %d tokens from pool.\n",
+ DSW_LOG_DP_PORT(DEBUG, port->id, "Acquired %d tokens from pool.",
acquired_credits);
port->inflight_credits += acquired_credits;
@@ -81,7 +81,7 @@ dsw_port_return_credits(struct dsw_evdev *dsw, struct dsw_port *port,
__ATOMIC_RELAXED);
DSW_LOG_DP_PORT(DEBUG, port->id,
- "Returned %d tokens to pool.\n",
+ "Returned %d tokens to pool.",
return_credits);
}
}
@@ -257,7 +257,7 @@ dsw_port_add_paused_flows(struct dsw_port *port, struct dsw_queue_flow *qfs,
struct dsw_queue_flow *qf = &qfs[i];
DSW_LOG_DP_PORT(DEBUG, port->id,
- "Pausing queue_id %d flow_hash %d.\n",
+ "Pausing queue_id %d flow_hash %d.",
qf->queue_id, qf->flow_hash);
port->paused_flows[port->paused_flows_len] = *qf;
@@ -283,7 +283,7 @@ dsw_port_remove_paused_flow(struct dsw_port *port,
port->paused_flows_len--;
DSW_LOG_DP_PORT(DEBUG, port->id,
- "Unpausing queue_id %d flow_hash %d.\n",
+ "Unpausing queue_id %d flow_hash %d.",
target_qf->queue_id,
target_qf->flow_hash);
@@ -292,7 +292,7 @@ dsw_port_remove_paused_flow(struct dsw_port *port,
}
DSW_LOG_DP_PORT(ERR, port->id,
- "Failed to unpause queue_id %d flow_hash %d.\n",
+ "Failed to unpause queue_id %d flow_hash %d.",
target_qf->queue_id, target_qf->flow_hash);
}
@@ -515,7 +515,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
DSW_LOG_DP_PORT(DEBUG, source_port->id, "Selected queue_id %d "
"flow_hash %d (with flow load %d) for migration "
- "to port %d.\n", candidate_qf->queue_id,
+ "to port %d.", candidate_qf->queue_id,
candidate_qf->flow_hash,
DSW_LOAD_TO_PERCENT(candidate_flow_load),
candidate_port_id);
@@ -560,7 +560,7 @@ dsw_select_emigration_targets(struct dsw_evdev *dsw,
if (*targets_len == 0)
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"For the %d flows considered, no target port "
- "was found.\n", num_bursts);
+ "was found.", num_bursts);
}
static uint8_t
@@ -578,7 +578,7 @@ dsw_schedule(struct dsw_evdev *dsw, uint8_t queue_id, uint16_t flow_hash)
port_id = queue->serving_ports[0];
DSW_LOG_DP(DEBUG, "Event with queue_id %d flow_hash %d is scheduled "
- "to port %d.\n", queue_id, flow_hash, port_id);
+ "to port %d.", queue_id, flow_hash, port_id);
return port_id;
}
@@ -767,7 +767,7 @@ dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port,
}
DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for "
- "queue_id %d flow_hash %d.\n", queue_id,
+ "queue_id %d flow_hash %d.", queue_id,
flow_hash);
}
@@ -835,11 +835,11 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
if (dsw->num_ports == 1)
return;
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n");
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.");
if (seen_events_len < DSW_MAX_EVENTS_RECORDED) {
DSW_LOG_DP_PORT(DEBUG, source_port->id, "Not enough events "
- "are recorded to allow for a migration.\n");
+ "are recorded to allow for a migration.");
return;
}
@@ -857,7 +857,7 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
*/
if (source_port->paused_events_len > 0) {
DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are "
- "events in the paus buffer.\n");
+ "events in the paus buffer.");
return;
}
@@ -871,7 +871,7 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
if (source_port->migration_state != DSW_MIGRATION_STATE_IDLE) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
- "Emigration already in progress.\n");
+ "Emigration already in progress.");
return;
}
@@ -881,7 +881,7 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
*/
if (source_port->in_buffer_len > 0) {
DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are still "
- "events in the input buffer.\n");
+ "events in the input buffer.");
return;
}
@@ -889,7 +889,7 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
__atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
- "Load %d is below threshold level %d.\n",
+ "Load %d is below threshold level %d.",
DSW_LOAD_TO_PERCENT(source_port_load),
DSW_LOAD_TO_PERCENT(DSW_MIN_SOURCE_LOAD_FOR_MIGRATION));
return;
@@ -904,7 +904,7 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
if (!any_port_below_limit) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Candidate target ports are all too highly "
- "loaded.\n");
+ "loaded.");
return;
}
@@ -916,7 +916,7 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
*/
if (num_bursts < 2) {
DSW_LOG_DP_PORT(DEBUG, source_port->id, "Only a single flow "
- "queue_id %d flow_hash %d has been seen.\n",
+ "queue_id %d flow_hash %d has been seen.",
bursts[0].queue_flow.queue_id,
bursts[0].queue_flow.flow_hash);
return;
@@ -1260,7 +1260,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
uint16_t i;
DSW_LOG_DP_PORT(DEBUG, source_port->id, "Attempting to enqueue %d "
- "events.\n", events_len);
+ "events.", events_len);
dsw_port_bg_process(dsw, source_port);
@@ -1329,7 +1329,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
}
DSW_LOG_DP_PORT(DEBUG, source_port->id, "%d non-release events "
- "accepted.\n", num_non_release);
+ "accepted.", num_non_release);
return (num_non_release + num_release);
}
@@ -1505,7 +1505,7 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,
dsw_port_note_op(source_port, dequeued);
if (dequeued > 0) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Dequeued %d events.\n",
+ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Dequeued %d events.",
dequeued);
dsw_port_return_credits(dsw, source_port, dequeued);
@@ -313,7 +313,7 @@ dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64 " vs Available %u\n",
+ DPAA_MEMPOOL_DPDEBUG("Req size %" PRIx64 " vs Available %u",
(uint64_t)len, total_elt_sz * mp->size);
/* Detect pool area has sufficient space for elements in this memzone */
@@ -67,7 +67,8 @@ extern struct dpaa_bp_info *rte_dpaa_bpid_info;
#define MEMPOOL_INIT_FUNC_TRACE() DPAA_MEMPOOL_LOG(DEBUG, " >>")
#define DPAA_MEMPOOL_DPDEBUG(fmt, args...) \
- RTE_LOG_DP(DEBUG, DPAA_MEMPOOL, fmt, ## args)
+ RTE_LOG_DP_LINE(DEBUG, DPAA_MEMPOOL, fmt, ## args)
+
#define DPAA_MEMPOOL_DEBUG(fmt, args...) \
DPAA_MEMPOOL_LOG(DEBUG, fmt, ## args)
#define DPAA_MEMPOOL_ERR(fmt, args...) \
@@ -378,7 +378,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
obj_table[n] = (struct rte_mbuf *)
(bufs[i] - bp_info->meta_data_size);
DPAA2_MEMPOOL_DP_DEBUG(
- "Acquired %p address %p from BMAN\n",
+ "Acquired %p address %p from BMAN",
(void *)bufs[i], (void *)obj_table[n]);
n++;
}
@@ -386,7 +386,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
alloc += n;
- DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+ DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d",
alloc, count, n);
#endif
return 0;
@@ -439,7 +439,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
return 0;
}
- DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
+ DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u", num_of_bufs);
return num_of_bufs;
}
@@ -26,7 +26,7 @@ extern int dpaa2_logtype_mempool;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA2_MEMPOOL_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA2_MEMPOOL, fmt, ## args)
+ RTE_LOG_DP_LINE(level, DPAA2_MEMPOOL, fmt, ## args)
#define DPAA2_MEMPOOL_DP_DEBUG(fmt, args...) \
DPAA2_MEMPOOL_DP_LOG(DEBUG, fmt, ## args)
@@ -15,10 +15,10 @@ extern int atl_logtype_init;
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, ATL_DRIVER, "%s(): " fmt "\n", __func__, ## args)
+ RTE_LOG_DP_LINE(level, ATL_DRIVER, "%s(): " fmt, __func__, ## args)
#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, ATL_DRIVER, "%s(): " fmt "\n", __func__, ## args)
+ RTE_LOG_DP_LINE(level, ATL_DRIVER, "%s(): " fmt, __func__, ## args)
extern int atl_logtype_driver;
#define RTE_LOGTYPE_ATL_DRIVER atl_logtype_driver
@@ -233,6 +233,6 @@ extern int dpaa_logtype_pmd;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA_PMD, fmt, ## args)
+ RTE_LOG_DP_LINE(level, DPAA_PMD, fmt, ## args)
#endif
@@ -29,7 +29,7 @@ extern int dpaa2_logtype_pmd;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA2_PMD_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA2_PMD, fmt, ## args)
+ RTE_LOG_DP_LINE(level, DPAA2_PMD, fmt, ## args)
#define DPAA2_PMD_DP_DEBUG(fmt, args...) \
DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args)
@@ -399,7 +399,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
- "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d",
mbuf, mbuf->buf_addr, mbuf->data_off,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
@@ -446,7 +446,7 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
} else {
temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
if (temp == NULL) {
- DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
+ DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table");
return -ENOMEM;
}
DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
@@ -543,7 +543,7 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
- "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d",
mbuf, mbuf->buf_addr, mbuf->data_off,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
@@ -587,7 +587,7 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
if (rte_dpaa2_mbuf_alloc_bulk(
rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
- DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
+ DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer");
return -1;
}
m = (struct rte_mbuf *)mb;
@@ -609,7 +609,7 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
#endif
DPAA2_PMD_DP_DEBUG(
"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
- " meta: %d, off: %d, len: %d\n",
+ " meta: %d, off: %d, len: %d",
(void *)mbuf,
mbuf->buf_addr,
DPAA2_GET_FD_ADDR(fd),
@@ -640,7 +640,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
+ DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d",
rte_gettid());
return;
}
@@ -656,7 +656,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy");
/* Portal was busy, try again */
continue;
}
@@ -691,7 +691,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
fas = hw_annot_addr;
- DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
+ DPAA2_PMD_ERR("[%d] error packet on port[%d]:"
" fd_off: %d, fd_err: %x, fas_status: %x",
rte_lcore_id(), eth_data->port_id,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
@@ -765,7 +765,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
- " QBMAN is busy (1)\n");
+ " QBMAN is busy (1)");
/* Portal was busy, try again */
continue;
}
@@ -861,7 +861,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
- "QBMAN is busy (2)\n");
+ "QBMAN is busy (2)");
continue;
}
break;
@@ -976,7 +976,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ret = dpaa2_affine_qbman_swp();
if (ret) {
DPAA2_PMD_ERR(
- "Failed to allocate IO portal, tid: %d\n",
+ "Failed to allocate IO portal, tid: %d",
rte_gettid());
return 0;
}
@@ -1002,7 +1002,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
DPAA2_PMD_DP_DEBUG(
- "VDQ command is not issued.QBMAN is busy\n");
+ "VDQ command is not issued.QBMAN is busy");
/* Portal was busy, try again */
continue;
}
@@ -1107,7 +1107,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
ret = dpaa2_affine_qbman_swp();
if (ret) {
DPAA2_PMD_ERR(
- "Failed to allocate IO portal, tid: %d\n",
+ "Failed to allocate IO portal, tid: %d",
rte_gettid());
return 0;
}
@@ -1126,7 +1126,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
- "QBMAN is busy\n");
+ "QBMAN is busy");
/* Portal was busy, try again */
continue;
}
@@ -1256,14 +1256,14 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ret = dpaa2_affine_qbman_swp();
if (ret) {
DPAA2_PMD_ERR(
- "Failed to allocate IO portal, tid: %d\n",
+ "Failed to allocate IO portal, tid: %d",
rte_gettid());
return 0;
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
+ DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
eth_data, dpaa2_q->fqid);
#ifdef RTE_LIBRTE_IEEE1588
@@ -1573,7 +1573,7 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
ret = dpaa2_affine_qbman_swp();
if (ret) {
DPAA2_PMD_ERR(
- "Failed to allocate IO portal, tid: %d\n",
+ "Failed to allocate IO portal, tid: %d",
rte_gettid());
return 0;
}
@@ -1598,7 +1598,7 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
}
}
- DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
+ DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
eth_data, dpaa2_q[loop]->fqid);
/* Check if the queue is congested */
@@ -1747,14 +1747,14 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ret = dpaa2_affine_qbman_swp();
if (ret) {
DPAA2_PMD_ERR(
- "Failed to allocate IO portal, tid: %d\n",
+ "Failed to allocate IO portal, tid: %d",
rte_gettid());
return 0;
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
+ DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
eth_data, dpaa2_q->fqid);
/* This would also handle normal and atomic queues as any type
@@ -1989,7 +1989,7 @@ dpaa2_dev_loopback_rx(void *queue,
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
DPAA2_PMD_DP_DEBUG(
- "VDQ command not issued.QBMAN busy\n");
+ "VDQ command not issued.QBMAN busy");
/* Portal was busy, try again */
continue;
}
@@ -2071,7 +2071,7 @@ dpaa2_dev_loopback_rx(void *queue,
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
- "QBMAN is busy (2)\n");
+ "QBMAN is busy (2)");
continue;
}
break;
@@ -31,7 +31,7 @@ extern int enetc_logtype_pmd;
/* DP Logs, toggled out at compile time if level lower than current level */
#define ENETC_PMD_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, ENETC_PMD, fmt, ## args)
+ RTE_LOG_DP_LINE(level, ENETC_PMD, fmt, ## args)
#define ENETC_PMD_DP_DEBUG(fmt, args...) \
ENETC_PMD_DP_LOG(DEBUG, fmt, ## args)
@@ -255,7 +255,7 @@ static inline void enetc_slow_parsing(struct rte_mbuf *m,
static inline void __rte_hot
enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
{
- ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
+ ENETC_PMD_DP_DEBUG("parse summary = 0x%x", parse_results);
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD;
switch (parse_results) {
@@ -29,6 +29,6 @@ extern int enetfec_logtype_pmd;
/* DP Logs, toggled out at compile time if level lower than current level */
#define ENETFEC_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, ENETFEC_PMD, fmt, ## args)
+ RTE_LOG_DP_LINE(level, ENETFEC_PMD, fmt, ## args)
#endif /* _ENETFEC_LOGS_H_ */
@@ -468,7 +468,7 @@ extern int mana_logtype_init;
__func__, ## args)
#define DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, MANA_DRIVER, fmt "\n", ## args)
+ RTE_LOG_DP_LINE(level, MANA_DRIVER, fmt, ## args)
#define PMD_INIT_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, mana_logtype_init, "%s(): " fmt "\n",\
@@ -27,6 +27,6 @@ extern int pfe_logtype_pmd;
/* DP Logs, toggled out at compile time if level lower than current level */
#define PFE_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, PFE_PMD, fmt, ## args)
+ RTE_LOG_DP_LINE(level, PFE_PMD, fmt, ## args)
#endif /* _PFE_LOGS_H_ */
@@ -67,9 +67,7 @@ dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_CMDIF_ERR(
- "Failed to allocate IO portal, tid: %d\n",
- rte_gettid());
+ DPAA2_CMDIF_ERR("Failed to allocate IO portal, tid: %d", rte_gettid());
return 0;
}
}
@@ -106,7 +104,7 @@ dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
if (ret < 0)
return ret;
- DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
+ DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet");
return 1;
}
@@ -152,7 +150,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
+ DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy");
/* Portal was busy, try again */
continue;
}
@@ -169,7 +167,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
/* Check for valid frame. */
status = (uint8_t)qbman_result_DQ_flags(dq_storage);
if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
- DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
+ DPAA2_CMDIF_DP_DEBUG("No frame is delivered");
return 0;
}
@@ -181,7 +179,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
- DPAA2_CMDIF_DP_DEBUG("packet received\n");
+ DPAA2_CMDIF_DP_DEBUG("packet received");
return 1;
}
@@ -31,7 +31,7 @@ extern int dpaa2_cmdif_logtype;
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA2_CMDIF_DP_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, DPAA2_CMDIF, "dpaa2_cmdif: " fmt "\n", ## args)
+ RTE_LOG_DP_LINE(level, DPAA2_CMDIF, "dpaa2_cmdif: " fmt, ## args)
#define DPAA2_CMDIF_DP_DEBUG(fmt, args...) \
DPAA2_CMDIF_DP_LOG(DEBUG, fmt, ## args)