@@ -6,6 +6,7 @@
#include <rte_dmadev_pmd.h>
#include "dpaa_qdma.h"
+#include "dpaa_qdma_logs.h"
static inline int
ilog2(int x)
@@ -114,6 +115,7 @@ static struct fsl_qdma_queue
for (i = 0; i < queue_num; i++) {
if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
goto fail;
}
queue_temp = queue_head + i + (j * queue_num);
@@ -163,6 +165,7 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
status_size = QDMA_STATUS_SIZE;
if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ DPAA_QDMA_ERR("Get wrong status_size.\n");
return NULL;
}
@@ -250,8 +253,10 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
/* Try to halt the qDMA engine first. */
ret = fsl_qdma_halt(fsl_qdma);
- if (ret)
+ if (ret) {
+ DPAA_QDMA_ERR("DMA halt failed!");
return ret;
+ }
for (j = 0; j < fsl_qdma->num_blocks; j++) {
block = fsl_qdma->block_base +
@@ -375,8 +380,10 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
}
ccsr_qdma_fd = open("/dev/mem", O_RDWR);
- if (unlikely(ccsr_qdma_fd < 0))
+ if (unlikely(ccsr_qdma_fd < 0)) {
+ DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
goto err;
+ }
regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
phys_addr = QDMA_CCSR_BASE;
@@ -385,8 +392,11 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
ccsr_qdma_fd, phys_addr);
close(ccsr_qdma_fd);
- if (fsl_qdma->ctrl_base == MAP_FAILED)
+ if (fsl_qdma->ctrl_base == MAP_FAILED) {
+ DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
+ "size %d\n", phys_addr, regs_size);
goto err;
+ }
fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
@@ -408,6 +418,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
+ DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
munmap(fsl_qdma->ctrl_base, regs_size);
goto err;
}
@@ -431,8 +442,10 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
rte_socket_id(),
sizeof(struct fsl_qdma_engine));
- if (!dmadev)
+ if (!dmadev) {
+ DPAA_QDMA_ERR("Unable to allocate dmadevice");
return -EINVAL;
+ }
dpaa_dev->dmadev = dmadev;
@@ -478,3 +491,4 @@ static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
};
RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
+RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
new file mode 100644
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DPAA_QDMA_LOGS_H__
+#define __DPAA_QDMA_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa_qdma_logtype;
+
+#define DPAA_QDMA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_qdma_logtype, "dpaa_qdma: " \
+ fmt "\n", ## args)
+
+#define DPAA_QDMA_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_qdma_logtype, "dpaa_qdma: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA_QDMA_FUNC_TRACE() DPAA_QDMA_DEBUG(">>")
+
+#define DPAA_QDMA_INFO(fmt, args...) \
+ DPAA_QDMA_LOG(INFO, fmt, ## args)
+#define DPAA_QDMA_ERR(fmt, args...) \
+ DPAA_QDMA_LOG(ERR, fmt, ## args)
+#define DPAA_QDMA_WARN(fmt, args...) \
+ DPAA_QDMA_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_QDMA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa_qdma: " fmt "\n", ## args)
+
+#define DPAA_QDMA_DP_DEBUG(fmt, args...) \
+ DPAA_QDMA_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA_QDMA_DP_INFO(fmt, args...) \
+ DPAA_QDMA_DP_LOG(INFO, fmt, ## args)
+#define DPAA_QDMA_DP_WARN(fmt, args...) \
+ DPAA_QDMA_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA_QDMA_LOGS_H__ */