@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
[mlx5](@ref rte_pmd_mlx5.h),
[dpaa2_mempool](@ref rte_dpaa2_mempool.h),
[dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
- [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+ [dpaax](@ref rte_pmd_dpaax_qdma.h),
[crypto_scheduler](@ref rte_cryptodev_scheduler.h),
[dlb2](@ref rte_pmd_dlb2.h),
[ifpga](@ref rte_pmd_ifpga.h)
@@ -8,7 +8,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/drivers/bus/vdev \
@TOPDIR@/drivers/crypto/cnxk \
@TOPDIR@/drivers/crypto/scheduler \
- @TOPDIR@/drivers/dma/dpaa2 \
+ @TOPDIR@/drivers/common/dpaax \
@TOPDIR@/drivers/event/dlb2 \
@TOPDIR@/drivers/event/cnxk \
@TOPDIR@/drivers/mempool/cnxk \
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
if not is_linux
build = false
@@ -16,3 +16,4 @@ endif
if cc.has_argument('-Wno-pointer-arith')
cflags += '-Wno-pointer-arith'
endif
+headers = files('rte_pmd_dpaax_qdma.h')
new file mode 100644
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+ RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+ (RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+ (((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+ ((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
@@ -10,7 +10,7 @@
#include <mc/fsl_dpdmai.h>
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
#include "dpaa2_qdma.h"
#include "dpaa2_qdma_logs.h"
@@ -212,16 +212,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
}
/* source frame list to source buffer */
DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+ /** IOMMU is always on for either VA or PA mode,
+ * so Bypass Memory Translation should be disabled.
+ *
+ * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+ * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+ */
fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
/* destination frame list to destination buffer */
DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
/* Final bit: 1, for last frame list */
@@ -235,23 +235,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
- for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+ for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
/* source SG */
src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+ /** IOMMU is always on for either VA or PA mode,
+ * so Bypass Memory Translation should be disabled.
+ */
src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
/* destination SG */
dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+ /** IOMMU is always on for either VA or PA mode,
+ * so Bypass Memory Translation should be disabled.
+ */
dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
}
}
@@ -350,21 +348,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
src_sge->data_len.data_len_sl0 = src[i].length;
src_sge->ctrl.sl = QDMA_SG_SL_LONG;
src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+ /** IOMMU is always on for either VA or PA mode,
+ * so Bypass Memory Translation should be disabled.
+ */
src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
dst_sge->addr_lo = (uint32_t)dst[i].addr;
dst_sge->addr_hi = (dst[i].addr >> 32);
dst_sge->data_len.data_len_sl0 = dst[i].length;
dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+ /** IOMMU is always on for either VA or PA mode,
+ * so Bypass Memory Translation should be disabled.
+ */
dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
total_len += src[i].length;
if (i == (nb_sge - 1)) {
@@ -444,17 +440,16 @@ fle_populate(struct qbman_fle fle[],
}
/* source frame list to source buffer */
DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+ /** IOMMU is always on for either VA or PA mode,
+ * so Bypass Memory Translation should be disabled.
+ * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+ * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+ */
fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
/* destination frame list to destination buffer */
DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
@@ -560,7 +555,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
DPAA2_QDMA_INFO("long format/SG format, job number:%d",
cntx_sg->job_nb);
if (!cntx_sg->job_nb ||
- cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+ cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
DPAA2_QDMA_ERR("Invalid SG job number:%d",
cntx_sg->job_nb);
return;
@@ -610,9 +605,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
return -EINVAL;
}
- if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+ if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
- nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+ nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
return -EINVAL;
}
@@ -631,11 +626,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
cntx_sg->cntx_idx[i] = idx_addr[i];
}
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
- cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+ cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
fle = cntx_sg->fle_sdd.fle;
fle_iova = cntx_iova +
@@ -667,8 +658,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
offsetof(struct qdma_cntx_sg, sg_src_entry);
dst_sge_iova = cntx_iova +
offsetof(struct qdma_cntx_sg, sg_dst_entry);
- len = sg_entry_populate(src, dst,
- cntx_sg, nb_src);
+ len = sg_entry_populate(src, dst, cntx_sg, nb_src);
fle_populate(fle, sdd, sdd_iova,
&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1011,7 +1001,7 @@ dpaa2_qdma_dequeue(void *dev_private,
q_storage->last_num_pkts);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
- (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ DPAA2_VADDR_TO_IOVA(dq_storage), 1);
if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
while (!qbman_check_command_complete(
get_swp_active_dqs(
@@ -1046,7 +1036,7 @@ dpaa2_qdma_dequeue(void *dev_private,
qbman_pull_desc_set_numframes(&pulldesc, pull_size);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
- (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+ DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
/* Check if the previous issued command is completed.
* Also seems like the SWP is shared between the Ethernet Driver
@@ -1078,7 +1068,7 @@ dpaa2_qdma_dequeue(void *dev_private,
}
fd = qbman_result_DQ_fd(dq_storage);
ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
- if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+ if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
pending = 0;
dq_storage++;
@@ -1131,11 +1121,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
RTE_DMA_CAPA_SILENT |
RTE_DMA_CAPA_OPS_COPY |
RTE_DMA_CAPA_OPS_COPY_SG;
- dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+ dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
dev_info->max_vchans = dpdmai_dev->num_queues;
dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
- dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+ dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
dev_info->dev_name = dev->device->name;
if (dpdmai_dev->qdma_dev)
dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1317,6 +1307,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
char pool_name[64];
int ret;
char *env = NULL;
+ uint64_t iova, va;
DPAA2_QDMA_FUNC_TRACE();
@@ -1359,6 +1350,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
DPAA2_QDMA_ERR("%s create failed", pool_name);
return -ENOMEM;
}
+ iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+ va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+ qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
if (qdma_dev->is_silent) {
ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
struct qdma_cntx_sg {
struct qdma_cntx_fle_sdd fle_sdd;
- struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
- struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
- uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+ struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+ struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+ uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
uint16_t job_nb;
uint16_t rsv[3];
} __rte_packed;
#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
- ((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+ ((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
- ((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+ ((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
/** Represents a DPDMAI device */
struct dpaa2_dpdmai_dev {
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
if not is_linux
build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
if cc.has_argument('-Wno-pointer-arith')
cflags += '-Wno-pointer-arith'
endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
deleted file mode 100644
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
- RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
- (RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
- (((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
- ((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */