@@ -285,6 +285,7 @@ CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n
#
CONFIG_RTE_LIBRTE_DPAA2_COMMON=n
CONFIG_RTE_LIBRTE_DPAA2_POOL=n
+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
#
# Compile NXP DPAA2 FSL-MC Bus
@@ -50,6 +50,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
CONFIG_RTE_LIBRTE_DPAA2_COMMON=y
CONFIG_RTE_LIBRTE_DPAA2_POOL=n
CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa2"
+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
#
# Compile NXP DPAA2 FSL-MC Bus
@@ -175,6 +175,72 @@ struct qbman_fle {
*/
#define DPAA2_EQ_RESP_ALWAYS 1
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
+/* todo - this is costly, need to write a fast coversion routine */
+static void *dpaa2_mem_ptov(phys_addr_t paddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ int i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (paddr >= memseg[i].phys_addr &&
+ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+ return (void *)(memseg[i].addr_64
+ + (paddr - memseg[i].phys_addr));
+ }
+ return NULL;
+}
+
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ int i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (vaddr >= memseg[i].addr_64 &&
+ vaddr < memseg[i].addr_64 + memseg[i].len)
+ return memseg[i].phys_addr
+ + (vaddr - memseg[i].addr_64);
+ }
+ return (phys_addr_t)(NULL);
+}
+
+/**
+ * When we are using Physical addresses as IO Virtual Addresses,
+ * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
+ * whereever required.
+ * These routines are called with help of below MACRO's
+ */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
+
+/**
+ * macro to convert Virtual address to IOVA
+ */
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
+
+/**
+ * macro to convert IOVA to Virtual address
+ */
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
+
+/**
+ * macro to convert modify the memory containing IOVA to Virtual address
+ */
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
+
+#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
+
+#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
@@ -76,7 +76,7 @@
memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
- tc_cfg.key_cfg_iova = (uint64_t)(p_params);
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
@@ -119,7 +119,7 @@ int dpaa2_remove_flow_dist(
memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
- tc_cfg.key_cfg_iova = (uint64_t)(p_params);
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
tc_cfg.dist_size = 0;
tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
@@ -136,7 +136,7 @@ static inline struct rte_mbuf *__attribute__((hot))
eth_fd_to_mbuf(const struct qbman_fd *fd)
{
struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
- DPAA2_GET_FD_ADDR(fd),
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
/* need to repopulated some of the fields,
@@ -151,10 +151,11 @@ static inline struct rte_mbuf *__attribute__((hot))
/* Parse the packet */
/* parse results are after the private - sw annotation area */
mbuf->packet_type = dpaa2_dev_rx_parse(
- (uint64_t)(DPAA2_GET_FD_ADDR(fd))
+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ DPAA2_FD_PTA_SIZE);
- dpaa2_dev_rx_offload((uint64_t)(DPAA2_GET_FD_ADDR(fd)) +
+ dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)) +
DPAA2_FD_PTA_SIZE, mbuf);
mbuf->next = NULL;
@@ -177,7 +178,7 @@ static void __attribute__ ((noinline)) __attribute__((hot))
/*Resetting the buffer pool id and offset field*/
fd->simple.bpid_offset = 0;
- DPAA2_SET_FD_ADDR(fd, (mbuf->buf_addr));
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
DPAA2_SET_FD_LEN(fd, mbuf->data_len);
DPAA2_SET_FD_BPID(fd, bpid);
DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
@@ -218,7 +219,7 @@ static inline int __attribute__((hot))
/*Resetting the buffer pool id and offset field*/
fd->simple.bpid_offset = 0;
- DPAA2_SET_FD_ADDR(fd, (m->buf_addr));
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
DPAA2_SET_FD_LEN(fd, mbuf->data_len);
DPAA2_SET_FD_BPID(fd, bpid);
DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
@@ -270,7 +271,7 @@ static inline int __attribute__((hot))
qbman_pull_desc_set_fq(&pulldesc, fqid);
/* todo optimization - we can have dq_storage_phys available*/
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
- (dma_addr_t)(dq_storage), 1);
+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
/*Issue a volatile dequeue command. */
while (1) {
@@ -311,7 +312,8 @@ static inline int __attribute__((hot))
}
fd = qbman_result_DQ_fd(dq_storage);
- mbuf = (struct rte_mbuf *)(DPAA2_GET_FD_ADDR(fd)
+ mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)
- bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
/* Prefeth mbuf */
rte_prefetch0(mbuf);
@@ -203,9 +203,14 @@ void dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
n = count % DPAA2_MBUF_MAX_ACQ_REL;
/* convert mbuf to buffers for the remainder*/
- for (i = 0; i < n ; i++)
+ for (i = 0; i < n ; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i])
+ + meta_data_size;
+#else
bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
-
+#endif
+ }
/* feed them to bman*/
do {
ret = qbman_swp_release(swp, &releasedesc, bufs, n);
@@ -214,8 +219,15 @@ void dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
/* if there are more buffers to free */
while (n < count) {
/* convert mbuf to buffers */
- for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++)
+ for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)
+ rte_mempool_virt2phy(pool, obj_table[n + i])
+ + meta_data_size;
+#else
bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
+#endif
+ }
do {
ret = qbman_swp_release(swp, &releasedesc, bufs,
@@ -288,6 +300,7 @@ int hw_mbuf_alloc_bulk(struct rte_mempool *pool,
* i.e. first buffer is valid,
* remaining 6 buffers may be null
*/
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
obj_table[n] = (struct rte_mbuf *)(bufs[i] - mbuf_size);
rte_mbuf_refcnt_set((struct rte_mbuf *)obj_table[n], 0);
PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",