@@ -37,9 +37,6 @@ extern "C" {
#include <fslmc_vfio.h>
-#include "portal/dpaa2_hw_pvt.h"
-#include "portal/dpaa2_hw_dpio.h"
-
#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
#define DPAA2_INVALID_MBUF_SEQN 0
@@ -149,6 +146,32 @@ struct rte_dpaa2_driver {
rte_dpaa2_remove_t remove;
};
+int
+rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size);
+__rte_internal
+int
+rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size);
+__rte_internal
+uint64_t
+rte_fslmc_cold_mem_vaddr_to_iova(void *vaddr,
+ uint64_t size);
+__rte_internal
+void *
+rte_fslmc_cold_mem_iova_to_vaddr(uint64_t iova,
+ uint64_t size);
+__rte_internal
+__hot uint64_t
+rte_fslmc_mem_vaddr_to_iova(void *vaddr);
+__rte_internal
+__hot void *
+rte_fslmc_mem_iova_to_vaddr(uint64_t iova);
+__rte_internal
+uint64_t
+rte_fslmc_io_vaddr_to_iova(void *vaddr);
+__rte_internal
+void *
+rte_fslmc_io_iova_to_vaddr(uint64_t iova);
+
/**
* Register a DPAA2 driver.
*
@@ -27,7 +27,6 @@
#define FSLMC_BUS_NAME fslmc
struct rte_fslmc_bus rte_fslmc_bus;
-uint8_t dpaa2_virt_mode;
#define DPAA2_SEQN_DYNFIELD_NAME "dpaa2_seqn_dynfield"
int dpaa2_seqn_dynfield_offset = -1;
@@ -457,22 +456,6 @@ rte_fslmc_probe(void)
probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST;
- /* In case of PA, the FD addresses returned by qbman APIs are physical
- * addresses, which need conversion into equivalent VA address for
- * rte_mbuf. For that, a table (a serial array, in memory) is used to
- * increase translation efficiency.
- * This has to be done before probe as some device initialization
- * (during) probe allocate memory (dpaa2_sec) which needs to be pinned
- * to this table.
- *
- * Error is ignored as relevant logs are handled within dpaax and
- * handling for unavailable dpaax table too is transparent to caller.
- *
- * And, the IOVA table is only applicable in case of PA mode.
- */
- if (rte_eal_iova_mode() == RTE_IOVA_PA)
- dpaax_iova_table_populate();
-
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
ret = rte_fslmc_match(drv, dev);
@@ -507,9 +490,6 @@ rte_fslmc_probe(void)
}
}
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- dpaa2_virt_mode = 1;
-
return 0;
}
@@ -558,12 +538,6 @@ rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
void
rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
{
- /* Cleanup the PA->VA Translation table; From wherever this function
- * is called from.
- */
- if (rte_eal_iova_mode() == RTE_IOVA_PA)
- dpaax_iova_table_depopulate();
-
TAILQ_REMOVE(&rte_fslmc_bus.driver_list, driver, next);
}
@@ -599,13 +573,12 @@ rte_dpaa2_get_iommu_class(void)
bool is_vfio_noiommu_enabled = 1;
bool has_iova_va;
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ return RTE_IOVA_PA;
+
if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
return RTE_IOVA_DC;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- return RTE_IOVA_PA;
-#endif
-
/* check if all devices on the bus support Virtual addressing or not */
has_iova_va = fslmc_all_device_support_iova();
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP
+ * Copyright 2016-2023 NXP
*
*/
@@ -10,7 +10,8 @@
extern int dpaa2_logtype_bus;
#define DPAA2_BUS_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, \
+ "fslmc " # level ": " fmt "\n", \
##args)
/* Debug logs are with Function names */
@@ -19,6 +19,7 @@
#include <libgen.h>
#include <dirent.h>
#include <sys/eventfd.h>
+#include <ctype.h>
#include <eal_filesystem.h>
#include <rte_mbuf.h>
@@ -49,9 +50,41 @@
*/
static struct fslmc_vfio_container s_vfio_container;
/* Currently we only support single group/process. */
-const char *fslmc_group; /* dprc.x*/
+static const char *fslmc_group; /* dprc.x*/
static uint32_t *msi_intr_vaddr;
-void *(*rte_mcp_ptr_list);
+static void *(*rte_mcp_ptr_list);
+
+struct fslmc_dmaseg {
+ uint64_t vaddr;
+ uint64_t iova;
+ uint64_t size;
+
+ TAILQ_ENTRY(fslmc_dmaseg) next;
+};
+
+TAILQ_HEAD(fslmc_dmaseg_list, fslmc_dmaseg);
+
+struct fslmc_dmaseg_list fslmc_memsegs =
+ TAILQ_HEAD_INITIALIZER(fslmc_memsegs);
+struct fslmc_dmaseg_list fslmc_iosegs =
+ TAILQ_HEAD_INITIALIZER(fslmc_iosegs);
+
+static uint64_t fslmc_mem_va2iova = RTE_BAD_IOVA;
+static int fslmc_mem_map_num;
+
+struct fslmc_mem_param {
+ struct vfio_mp_param mp_param;
+ struct fslmc_dmaseg_list memsegs;
+ struct fslmc_dmaseg_list iosegs;
+ uint64_t mem_va2iova;
+ int mem_map_num;
+};
+
+enum {
+ FSLMC_VFIO_SOCKET_REQ_CONTAINER = 0x100,
+ FSLMC_VFIO_SOCKET_REQ_GROUP,
+ FSLMC_VFIO_SOCKET_REQ_MEM
+};
void *
dpaa2_get_mcp_ptr(int portal_idx)
@@ -65,6 +98,64 @@ dpaa2_get_mcp_ptr(int portal_idx)
static struct rte_dpaa2_object_list dpaa2_obj_list =
TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
+static uint64_t
+fslmc_io_virt2phy(const void *virtaddr)
+{
+ FILE *fp = fopen("/proc/self/maps", "r");
+ char *line = NULL;
+ size_t linesz;
+ uint64_t start, end, phy;
+ const uint64_t va = (const uint64_t)virtaddr;
+ char tmp[1024];
+ int ret;
+
+ if (!fp)
+ return RTE_BAD_IOVA;
+ while (getdelim(&line, &linesz, '\n', fp) > 0) {
+ char *ptr = line;
+ int n;
+
+ /** Parse virtual address range.*/
+ n = 0;
+ while (*ptr && !isspace(*ptr)) {
+ tmp[n] = *ptr;
+ ptr++;
+ n++;
+ }
+ tmp[n] = 0;
+ ret = sscanf(tmp, "%" SCNx64 "-%" SCNx64, &start, &end);
+ if (ret != 2)
+ continue;
+ if (va < start || va >= end)
+ continue;
+
+ /** This virtual address is in this segment.*/
+ while (*ptr == ' ' || *ptr == 'r' ||
+ *ptr == 'w' || *ptr == 's' ||
+ *ptr == 'p' || *ptr == 'x' ||
+ *ptr == '-')
+ ptr++;
+
+ /** Extract phy address*/
+ n = 0;
+ while (*ptr && !isspace(*ptr)) {
+ tmp[n] = *ptr;
+ ptr++;
+ n++;
+ }
+ tmp[n] = 0;
+ phy = strtoul(tmp, 0, 16);
+ if (!phy)
+ continue;
+
+ fclose(fp);
+ return phy + va - start;
+ }
+
+ fclose(fp);
+ return RTE_BAD_IOVA;
+}
+
/*register a fslmc bus based dpaa2 driver */
void
rte_fslmc_object_register(struct rte_dpaa2_object *object)
@@ -271,7 +362,7 @@ fslmc_get_group_id(const char *group_name,
ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
group_name, groupid);
if (ret <= 0) {
- DPAA2_BUS_ERR("Unable to find %s IOMMU group", group_name);
+ DPAA2_BUS_ERR("Find %s IOMMU group", group_name);
if (ret < 0)
return ret;
@@ -314,7 +405,7 @@ fslmc_vfio_open_group_fd(const char *group_name)
/* if we're in a secondary process, request group fd from the primary
* process via mp channel.
*/
- p->req = SOCKET_REQ_GROUP;
+ p->req = FSLMC_VFIO_SOCKET_REQ_GROUP;
p->group_num = iommu_group_num;
strcpy(mp_req.name, FSLMC_VFIO_MP);
mp_req.len_param = sizeof(*p);
@@ -408,7 +499,7 @@ fslmc_vfio_open_container_fd(void)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
if (vfio_container_fd < 0) {
- DPAA2_BUS_ERR("Cannot open VFIO container(%s), err(%d)",
+ DPAA2_BUS_ERR("Open VFIO container(%s), err(%d)",
VFIO_CONTAINER_PATH, vfio_container_fd);
ret = vfio_container_fd;
goto err_exit;
@@ -417,7 +508,7 @@ fslmc_vfio_open_container_fd(void)
/* check VFIO API version */
ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
if (ret < 0) {
- DPAA2_BUS_ERR("Could not get VFIO API version(%d)",
+ DPAA2_BUS_ERR("Get VFIO API version(%d)",
ret);
} else if (ret != VFIO_API_VERSION) {
DPAA2_BUS_ERR("Unsupported VFIO API version(%d)",
@@ -431,7 +522,7 @@ fslmc_vfio_open_container_fd(void)
ret = fslmc_vfio_check_extensions(vfio_container_fd);
if (ret) {
- DPAA2_BUS_ERR("No supported IOMMU extensions found(%d)",
+ DPAA2_BUS_ERR("Unsupported IOMMU extensions found(%d)",
ret);
close(vfio_container_fd);
goto err_exit;
@@ -443,7 +534,7 @@ fslmc_vfio_open_container_fd(void)
* if we're in a secondary process, request container fd from the
* primary process via mp channel
*/
- p->req = SOCKET_REQ_CONTAINER;
+ p->req = FSLMC_VFIO_SOCKET_REQ_CONTAINER;
strcpy(mp_req.name, FSLMC_VFIO_MP);
mp_req.len_param = sizeof(*p);
mp_req.num_fds = 0;
@@ -473,7 +564,7 @@ fslmc_vfio_open_container_fd(void)
err_exit:
if (mp_reply.msgs)
free(mp_reply.msgs);
- DPAA2_BUS_ERR("Cannot request container fd err(%d)", ret);
+ DPAA2_BUS_ERR("Open container fd err(%d)", ret);
return ret;
}
@@ -506,17 +597,19 @@ fslmc_vfio_mp_primary(const struct rte_mp_msg *msg,
struct rte_mp_msg reply;
struct vfio_mp_param *r = (void *)reply.param;
const struct vfio_mp_param *m = (const void *)msg->param;
+ struct fslmc_mem_param *map;
if (msg->len_param != sizeof(*m)) {
- DPAA2_BUS_ERR("fslmc vfio received invalid message!");
+ DPAA2_BUS_ERR("Invalid msg size(%d) for req(%d)",
+ msg->len_param, m->req);
return -EINVAL;
}
memset(&reply, 0, sizeof(reply));
switch (m->req) {
- case SOCKET_REQ_GROUP:
- r->req = SOCKET_REQ_GROUP;
+ case FSLMC_VFIO_SOCKET_REQ_GROUP:
+ r->req = FSLMC_VFIO_SOCKET_REQ_GROUP;
r->group_num = m->group_num;
fd = fslmc_vfio_group_fd_by_id(m->group_num);
if (fd < 0) {
@@ -530,9 +623,10 @@ fslmc_vfio_mp_primary(const struct rte_mp_msg *msg,
reply.num_fds = 1;
reply.fds[0] = fd;
}
+ reply.len_param = sizeof(*r);
break;
- case SOCKET_REQ_CONTAINER:
- r->req = SOCKET_REQ_CONTAINER;
+ case FSLMC_VFIO_SOCKET_REQ_CONTAINER:
+ r->req = FSLMC_VFIO_SOCKET_REQ_CONTAINER;
fd = fslmc_vfio_container_fd();
if (fd <= 0) {
r->result = SOCKET_ERR;
@@ -541,20 +635,73 @@ fslmc_vfio_mp_primary(const struct rte_mp_msg *msg,
reply.num_fds = 1;
reply.fds[0] = fd;
}
+ reply.len_param = sizeof(*r);
+ break;
+ case FSLMC_VFIO_SOCKET_REQ_MEM:
+ map = (void *)reply.param;
+ r = &map->mp_param;
+ r->req = FSLMC_VFIO_SOCKET_REQ_MEM;
+ r->result = SOCKET_OK;
+ rte_memcpy(&map->memsegs, &fslmc_memsegs,
+ sizeof(struct fslmc_dmaseg_list));
+ rte_memcpy(&map->iosegs, &fslmc_iosegs,
+ sizeof(struct fslmc_dmaseg_list));
+ map->mem_va2iova = fslmc_mem_va2iova;
+ map->mem_map_num = fslmc_mem_map_num;
+ reply.len_param = sizeof(struct fslmc_mem_param);
break;
default:
- DPAA2_BUS_ERR("fslmc vfio received invalid message(%08x)",
+ DPAA2_BUS_ERR("VFIO received invalid message(%08x)",
m->req);
return -ENOTSUP;
}
strcpy(reply.name, FSLMC_VFIO_MP);
- reply.len_param = sizeof(*r);
ret = rte_mp_reply(&reply, peer);
return ret;
}
+static int
+fslmc_vfio_mp_sync_mem_req(void)
+{
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply = {0};
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ int ret = 0;
+ struct vfio_mp_param *mp_param;
+ struct fslmc_mem_param *mem_rsp;
+
+ mp_param = (void *)mp_req.param;
+ memset(&mp_req, 0, sizeof(struct rte_mp_msg));
+ mp_param->req = FSLMC_VFIO_SOCKET_REQ_MEM;
+ strcpy(mp_req.name, FSLMC_VFIO_MP);
+ mp_req.len_param = sizeof(struct vfio_mp_param);
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ mem_rsp = (struct fslmc_mem_param *)mp_rep->param;
+ if (mem_rsp->mp_param.result == SOCKET_OK) {
+ rte_memcpy(&fslmc_memsegs,
+ &mem_rsp->memsegs,
+ sizeof(struct fslmc_dmaseg_list));
+ rte_memcpy(&fslmc_memsegs,
+ &mem_rsp->memsegs,
+ sizeof(struct fslmc_dmaseg_list));
+ fslmc_mem_va2iova = mem_rsp->mem_va2iova;
+ fslmc_mem_map_num = mem_rsp->mem_map_num;
+ } else {
+ DPAA2_BUS_ERR("Bad MEM SEG");
+ ret = -EINVAL;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ free(mp_reply.msgs);
+
+ return ret;
+}
+
static int
fslmc_vfio_mp_sync_setup(void)
{
@@ -565,6 +712,10 @@ fslmc_vfio_mp_sync_setup(void)
fslmc_vfio_mp_primary);
if (ret && rte_errno != ENOTSUP)
return ret;
+ } else {
+ ret = fslmc_vfio_mp_sync_mem_req();
+ if (ret)
+ return ret;
}
return 0;
@@ -585,30 +736,34 @@ vfio_connect_container(int vfio_container_fd,
iommu_type = fslmc_vfio_iommu_type(vfio_group_fd);
if (iommu_type < 0) {
- DPAA2_BUS_ERR("Failed to get iommu type(%d)",
- iommu_type);
+ DPAA2_BUS_ERR("Get iommu type(%d)", iommu_type);
return iommu_type;
}
/* Check whether support for SMMU type IOMMU present or not */
- if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, iommu_type)) {
- /* Connect group to container */
- ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
+ ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, iommu_type);
+ if (ret <= 0) {
+ DPAA2_BUS_ERR("Unsupport IOMMU type(%d) ret(%d), err(%d)",
+ iommu_type, ret, -errno);
+ return -EINVAL;
+ }
+
+ ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
&vfio_container_fd);
- if (ret) {
- DPAA2_BUS_ERR("Failed to setup group container");
- return -errno;
- }
+ if (ret) {
+ DPAA2_BUS_ERR("Set group container ret(%d), err(%d)",
+ ret, -errno);
- ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, iommu_type);
- if (ret) {
- DPAA2_BUS_ERR("Failed to setup VFIO iommu");
- return -errno;
- }
- } else {
- DPAA2_BUS_ERR("No supported IOMMU available");
- return -EINVAL;
+ return ret;
+ }
+
+ ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, iommu_type);
+ if (ret) {
+ DPAA2_BUS_ERR("Set iommu ret(%d), err(%d)",
+ ret, -errno);
+
+ return ret;
}
return fslmc_vfio_connect_container(vfio_group_fd);
@@ -629,11 +784,11 @@ static int vfio_map_irq_region(void)
fd = fslmc_vfio_group_fd_by_name(group_name);
if (fd <= 0) {
- DPAA2_BUS_ERR("%s failed to open group fd(%d)",
- __func__, fd);
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, fd);
if (fd < 0)
return fd;
- return -rte_errno;
+ return -EIO;
}
if (!fslmc_vfio_container_connected(fd)) {
DPAA2_BUS_ERR("Container is not connected");
@@ -643,8 +798,8 @@ static int vfio_map_irq_region(void)
vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
PROT_READ, MAP_SHARED, fd, 0x6030000);
if (vaddr == MAP_FAILED) {
- DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
- return -errno;
+ DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
+ return -ENOMEM;
}
msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
@@ -654,141 +809,200 @@ static int vfio_map_irq_region(void)
return 0;
DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
- return -errno;
-}
-
-static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
-static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
-
-static void
-fslmc_memevent_cb(enum rte_mem_event type, const void *addr,
- size_t len, void *arg __rte_unused)
-{
- struct rte_memseg_list *msl;
- struct rte_memseg *ms;
- size_t cur_len = 0, map_len = 0;
- uint64_t virt_addr;
- rte_iova_t iova_addr;
- int ret;
-
- msl = rte_mem_virt2memseg_list(addr);
-
- while (cur_len < len) {
- const void *va = RTE_PTR_ADD(addr, cur_len);
-
- ms = rte_mem_virt2memseg(va, msl);
- iova_addr = ms->iova;
- virt_addr = ms->addr_64;
- map_len = ms->len;
-
- DPAA2_BUS_DEBUG("Request for %s, va=%p, "
- "virt_addr=0x%" PRIx64 ", "
- "iova=0x%" PRIx64 ", map_len=%zu",
- type == RTE_MEM_EVENT_ALLOC ?
- "alloc" : "dealloc",
- va, virt_addr, iova_addr, map_len);
-
- /* iova_addr may be set to RTE_BAD_IOVA */
- if (iova_addr == RTE_BAD_IOVA) {
- DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
- cur_len += map_len;
- continue;
- }
-
- if (type == RTE_MEM_EVENT_ALLOC)
- ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
- else
- ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
-
- if (ret != 0) {
- DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
- "Map=%d, addr=%p, len=%zu, err:(%d)",
- type, va, map_len, ret);
- return;
- }
-
- cur_len += map_len;
- }
-
- if (type == RTE_MEM_EVENT_ALLOC)
- DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
- addr, len);
- else
- DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
- addr, len);
+ return ret;
}
static int
-fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr,
- size_t len)
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len)
{
struct vfio_iommu_type1_dma_map dma_map = {
.argsz = sizeof(struct vfio_iommu_type1_dma_map),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
};
- int ret, fd;
+ int ret, fd, is_io = 0;
const char *group_name = fslmc_vfio_get_group_name();
+ struct fslmc_dmaseg *dmaseg = NULL;
+ uint64_t phy = 0;
+
+ if (rte_eal_iova_mode() == RTE_IOVA_VA) {
+ if (vaddr != iovaddr) {
+ DPAA2_BUS_ERR("IOVA:VA(%" PRIx64 " : %" PRIx64 ") %s",
+ iovaddr, vaddr,
+ "should be 1:1 for VA mode");
+
+ return -EINVAL;
+ }
+ }
+ phy = rte_mem_virt2phy((const void *)(uintptr_t)vaddr);
+ if (phy == RTE_BAD_IOVA) {
+ phy = fslmc_io_virt2phy((const void *)(uintptr_t)vaddr);
+ if (phy == RTE_BAD_IOVA)
+ return -ENOMEM;
+ is_io = 1;
+ } else if (fslmc_mem_va2iova != RTE_BAD_IOVA &&
+ fslmc_mem_va2iova != (iovaddr - vaddr)) {
+ DPAA2_BUS_WARN("Multiple MEM PA<->VA conversions.");
+ }
+ DPAA2_BUS_DEBUG("%s(%zu): VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")",
+ is_io ? "DMA IO map size" : "DMA MEM map size",
+ len, vaddr, iovaddr, phy);
+
+ if (is_io)
+ goto io_mapping_check;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (!((vaddr + len) <= dmaseg->vaddr ||
+ (dmaseg->vaddr + dmaseg->size) <= vaddr)) {
+ DPAA2_BUS_ERR("MEM: New VA Range(%" PRIx64 " ~ %" PRIx64 ")",
+ vaddr, vaddr + len);
+ DPAA2_BUS_ERR("MEM: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->vaddr,
+ dmaseg->vaddr + dmaseg->size);
+ return -EEXIST;
+ }
+ if (!((iovaddr + len) <= dmaseg->iova ||
+ (dmaseg->iova + dmaseg->size) <= iovaddr)) {
+ DPAA2_BUS_ERR("MEM: New IOVA Range(%" PRIx64 " ~ %" PRIx64 ")",
+ iovaddr, iovaddr + len);
+ DPAA2_BUS_ERR("MEM: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->iova,
+ dmaseg->iova + dmaseg->size);
+ return -EEXIST;
+ }
+ }
+ goto start_mapping;
+
+io_mapping_check:
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if (!((vaddr + len) <= dmaseg->vaddr ||
+ (dmaseg->vaddr + dmaseg->size) <= vaddr)) {
+ DPAA2_BUS_ERR("IO: New VA Range (%" PRIx64 " ~ %" PRIx64 ")",
+ vaddr, vaddr + len);
+ DPAA2_BUS_ERR("IO: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->vaddr,
+ dmaseg->vaddr + dmaseg->size);
+ return -EEXIST;
+ }
+ if (!((iovaddr + len) <= dmaseg->iova ||
+ (dmaseg->iova + dmaseg->size) <= iovaddr)) {
+ DPAA2_BUS_ERR("IO: New IOVA Range(%" PRIx64 " ~ %" PRIx64 ")",
+ iovaddr, iovaddr + len);
+ DPAA2_BUS_ERR("IO: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->iova,
+ dmaseg->iova + dmaseg->size);
+ return -EEXIST;
+ }
+ }
+
+start_mapping:
fd = fslmc_vfio_group_fd_by_name(group_name);
if (fd <= 0) {
- DPAA2_BUS_ERR("%s failed to open group fd(%d)",
- __func__, fd);
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, fd);
if (fd < 0)
return fd;
- return -rte_errno;
+ return -EIO;
}
if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) {
DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
- return 0;
+ if (phy != iovaddr) {
+ DPAA2_BUS_ERR("IOVA should support with IOMMU");
+ return -EIO;
+ }
+ goto end_mapping;
}
dma_map.size = len;
dma_map.vaddr = vaddr;
dma_map.iova = iovaddr;
-#ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- if (vaddr != iovaddr) {
- DPAA2_BUS_WARN("vaddr(0x%lx) != iovaddr(0x%lx)",
- vaddr, iovaddr);
- }
-#endif
-
/* SET DMA MAP for IOMMU */
if (!fslmc_vfio_container_connected(fd)) {
- DPAA2_BUS_ERR("Container is not connected ");
+ DPAA2_BUS_ERR("Container is not connected");
return -EIO;
}
- DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
- (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_MAP_DMA,
&dma_map);
if (ret) {
- DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
- errno);
+ DPAA2_BUS_ERR("%s(%d) VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")",
+ is_io ? "DMA IO map err" : "DMA MEM map err",
+ errno, vaddr, iovaddr, phy);
return ret;
}
+end_mapping:
+ dmaseg = malloc(sizeof(struct fslmc_dmaseg));
+ if (!dmaseg) {
+ DPAA2_BUS_ERR("DMA segment malloc failed!");
+ return -ENOMEM;
+ }
+ dmaseg->vaddr = vaddr;
+ dmaseg->iova = iovaddr;
+ dmaseg->size = len;
+ if (is_io) {
+ TAILQ_INSERT_TAIL(&fslmc_iosegs, dmaseg, next);
+ } else {
+ fslmc_mem_map_num++;
+ if (fslmc_mem_map_num == 1)
+ fslmc_mem_va2iova = iovaddr - vaddr;
+ else
+ fslmc_mem_va2iova = RTE_BAD_IOVA;
+ TAILQ_INSERT_TAIL(&fslmc_memsegs, dmaseg, next);
+ }
+ DPAA2_BUS_LOG(NOTICE,
+ "%s(%zx): VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")",
+ is_io ? "DMA I/O map size" : "DMA MEM map size",
+ len, vaddr, iovaddr, phy);
+
return 0;
}
static int
-fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
+fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr, size_t len)
{
struct vfio_iommu_type1_dma_unmap dma_unmap = {
.argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
.flags = 0,
};
- int ret, fd;
+ int ret, fd, is_io = 0;
const char *group_name = fslmc_vfio_get_group_name();
+ struct fslmc_dmaseg *dmaseg = NULL;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (((vaddr && dmaseg->vaddr == vaddr) || !vaddr) &&
+ dmaseg->iova == iovaddr &&
+ dmaseg->size == len) {
+ is_io = 0;
+ break;
+ }
+ }
+
+ if (!dmaseg) {
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if (((vaddr && dmaseg->vaddr == vaddr) || !vaddr) &&
+ dmaseg->iova == iovaddr &&
+ dmaseg->size == len) {
+ is_io = 1;
+ break;
+ }
+ }
+ }
+
+ if (!dmaseg) {
+ DPAA2_BUS_ERR("IOVA(%" PRIx64 ") with length(%zx) not mapped",
+ iovaddr, len);
+ return 0;
+ }
fd = fslmc_vfio_group_fd_by_name(group_name);
if (fd <= 0) {
- DPAA2_BUS_ERR("%s failed to open group fd(%d)",
- __func__, fd);
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, fd);
if (fd < 0)
return fd;
- return -rte_errno;
+ return -EIO;
}
if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) {
DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
@@ -796,7 +1010,7 @@ fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
}
dma_unmap.size = len;
- dma_unmap.iova = vaddr;
+ dma_unmap.iova = iovaddr;
/* SET DMA MAP for IOMMU */
if (!fslmc_vfio_container_connected(fd)) {
@@ -804,19 +1018,162 @@ fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
return -EIO;
}
- DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
- (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_UNMAP_DMA,
&dma_unmap);
if (ret) {
- DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
- errno);
- return -1;
+ DPAA2_BUS_ERR("DMA un-map IOVA(%" PRIx64 " ~ %" PRIx64 ") err(%d)",
+ iovaddr, iovaddr + len, errno);
+ return ret;
+ }
+
+ if (is_io) {
+ TAILQ_REMOVE(&fslmc_iosegs, dmaseg, next);
+ } else {
+ TAILQ_REMOVE(&fslmc_memsegs, dmaseg, next);
+ fslmc_mem_map_num--;
+ if (TAILQ_EMPTY(&fslmc_memsegs))
+ fslmc_mem_va2iova = RTE_BAD_IOVA;
}
+ free(dmaseg);
+
return 0;
}
+uint64_t
+rte_fslmc_cold_mem_vaddr_to_iova(void *vaddr,
+ uint64_t size)
+{
+ struct fslmc_dmaseg *dmaseg;
+ uint64_t va;
+
+ va = (uint64_t)vaddr;
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (va >= dmaseg->vaddr &&
+ (va + size) < (dmaseg->vaddr + dmaseg->size)) {
+ return dmaseg->iova + va - dmaseg->vaddr;
+ }
+ }
+
+ return RTE_BAD_IOVA;
+}
+
+void *
+rte_fslmc_cold_mem_iova_to_vaddr(uint64_t iova,
+ uint64_t size)
+{
+ struct fslmc_dmaseg *dmaseg;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (iova >= dmaseg->iova &&
+ (iova + size) < (dmaseg->iova + dmaseg->size))
+ return (void *)((uintptr_t)dmaseg->vaddr + (uintptr_t)(iova - dmaseg->iova));
+ }
+
+ return NULL;
+}
+
+__hot uint64_t
+rte_fslmc_mem_vaddr_to_iova(void *vaddr)
+{
+ if (likely(fslmc_mem_va2iova != RTE_BAD_IOVA))
+ return (uint64_t)vaddr + fslmc_mem_va2iova;
+
+ return rte_fslmc_cold_mem_vaddr_to_iova(vaddr, 0);
+}
+
+__hot void *
+rte_fslmc_mem_iova_to_vaddr(uint64_t iova)
+{
+ if (likely(fslmc_mem_va2iova != RTE_BAD_IOVA))
+ return (void *)((uintptr_t)iova - (uintptr_t)fslmc_mem_va2iova);
+
+ return rte_fslmc_cold_mem_iova_to_vaddr(iova, 0);
+}
+
+uint64_t
+rte_fslmc_io_vaddr_to_iova(void *vaddr)
+{
+ struct fslmc_dmaseg *dmaseg = NULL;
+ uint64_t va = (uint64_t)vaddr;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if ((va >= dmaseg->vaddr) &&
+ va < dmaseg->vaddr + dmaseg->size)
+ return dmaseg->iova + va - dmaseg->vaddr;
+ }
+
+ return RTE_BAD_IOVA;
+}
+
+void *
+rte_fslmc_io_iova_to_vaddr(uint64_t iova)
+{
+ struct fslmc_dmaseg *dmaseg = NULL;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if ((iova >= dmaseg->iova) &&
+ iova < dmaseg->iova + dmaseg->size)
+ return (void *)((uintptr_t)dmaseg->vaddr + (uintptr_t)(iova - dmaseg->iova));
+ }
+
+ return NULL;
+}
+
+static void
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ uint64_t virt_addr;
+ rte_iova_t iova_addr;
+ int ret;
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ iova_addr = ms->iova;
+ virt_addr = ms->addr_64;
+ map_len = ms->len;
+
+ DPAA2_BUS_DEBUG("%s, va=%p, virt=%" PRIx64 ", iova=%" PRIx64 ", len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ? "alloc" : "dealloc",
+ va, virt_addr, iova_addr, map_len);
+
+ /* iova_addr may be set to RTE_BAD_IOVA */
+ if (iova_addr == RTE_BAD_IOVA) {
+ DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
+ cur_len += map_len;
+ continue;
+ }
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
+ else
+ ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
+
+ if (ret != 0) {
+ DPAA2_BUS_ERR("%s: Map=%d, addr=%p, len=%zu, err:(%d)",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "DMA Mapping failed. " :
+ "DMA Unmapping failed. ",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+
+ DPAA2_BUS_DEBUG("Total %s: addr=%p, len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ? "Mapped" : "Unmapped",
+ addr, len);
+}
+
static int
fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
const struct rte_memseg *ms, void *arg)
@@ -848,7 +1205,7 @@ __rte_internal
int
rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size)
{
- return fslmc_unmap_dma(iova, 0, size);
+ return fslmc_unmap_dma(0, iova, size);
}
int rte_fslmc_vfio_dmamap(void)
@@ -858,9 +1215,10 @@ int rte_fslmc_vfio_dmamap(void)
/* Lock before parsing and registering callback to memory subsystem */
rte_mcfg_mem_read_lock();
- if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
+ ret = rte_memseg_walk(fslmc_dmamap_seg, &i);
+ if (ret) {
rte_mcfg_mem_read_unlock();
- return -1;
+ return ret;
}
ret = rte_mem_event_callback_register("fslmc_memevent_clb",
@@ -899,6 +1257,14 @@ fslmc_vfio_setup_device(const char *dev_addr,
const char *group_name = fslmc_vfio_get_group_name();
vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
+ if (vfio_group_fd <= 0) {
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, vfio_group_fd);
+ if (vfio_group_fd < 0)
+ return vfio_group_fd;
+ return -EIO;
+ }
+
if (!fslmc_vfio_container_connected(vfio_group_fd)) {
DPAA2_BUS_ERR("Container is not connected");
return -EIO;
@@ -1007,8 +1373,7 @@ int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret)
- DPAA2_BUS_ERR(
- "Error disabling dpaa2 interrupts for fd %d",
+ DPAA2_BUS_ERR("Error disabling dpaa2 interrupts for fd %d",
rte_intr_fd_get(intr_handle));
return ret;
@@ -1033,7 +1398,7 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
if (ret < 0) {
DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
i, errno, strerror(errno));
- return -1;
+ return ret;
}
/* if this vector cannot be used with eventfd,
@@ -1047,8 +1412,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (fd < 0) {
DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
- errno, strerror(errno));
- return -1;
+ errno, strerror(errno));
+ return fd;
}
if (rte_intr_fd_set(intr_handle, fd))
@@ -1064,7 +1429,7 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
}
/* if we're here, we haven't found a suitable interrupt vector */
- return -1;
+ return -EIO;
}
static void
@@ -1238,6 +1603,13 @@ fslmc_vfio_close_group(void)
const char *group_name = fslmc_vfio_get_group_name();
vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
+ if (vfio_group_fd <= 0) {
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, vfio_group_fd);
+ if (vfio_group_fd < 0)
+ return vfio_group_fd;
+ return -EIO;
+ }
RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
if (dev->device.devargs &&
@@ -1329,7 +1701,7 @@ fslmc_vfio_process_group(void)
ret = fslmc_process_mcp(dev);
if (ret) {
DPAA2_BUS_ERR("Unable to map MC Portal");
- return -1;
+ return ret;
}
found_mportal = 1;
}
@@ -1346,7 +1718,7 @@ fslmc_vfio_process_group(void)
/* Cannot continue if there is not even a single mportal */
if (!found_mportal) {
DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
- return -1;
+ return -EIO;
}
/* Search for DPRC device next as it updates endpoint of
@@ -1358,7 +1730,7 @@ fslmc_vfio_process_group(void)
ret = fslmc_process_iodevices(dev);
if (ret) {
DPAA2_BUS_ERR("Unable to process dprc");
- return -1;
+ return ret;
}
TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
}
@@ -1415,7 +1787,7 @@ fslmc_vfio_process_group(void)
if (ret) {
DPAA2_BUS_DEBUG("Dev (%s) init failed",
dev->device.name);
- return -1;
+ return ret;
}
break;
@@ -1439,7 +1811,7 @@ fslmc_vfio_process_group(void)
if (ret) {
DPAA2_BUS_DEBUG("Dev (%s) init failed",
dev->device.name);
- return -1;
+ return ret;
}
break;
@@ -1468,9 +1840,9 @@ fslmc_vfio_setup_group(void)
vfio_container_fd = fslmc_vfio_container_fd();
if (vfio_container_fd <= 0) {
vfio_container_fd = fslmc_vfio_open_container_fd();
- if (vfio_container_fd <= 0) {
+ if (vfio_container_fd < 0) {
DPAA2_BUS_ERR("Failed to create MC VFIO container");
- return -rte_errno;
+ return vfio_container_fd;
}
}
@@ -1483,6 +1855,8 @@ fslmc_vfio_setup_group(void)
if (vfio_group_fd <= 0) {
vfio_group_fd = fslmc_vfio_open_group_fd(group_name);
if (vfio_group_fd <= 0) {
+ DPAA2_BUS_ERR("%s: open group name(%s) failed(%d)",
+ __func__, group_name, vfio_group_fd);
if (!vfio_group_fd)
close(vfio_group_fd);
DPAA2_BUS_ERR("Failed to create MC VFIO group");
@@ -11,6 +11,10 @@
#include <rte_compat.h>
#include <rte_vfio.h>
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
/* Pathname of FSL-MC devices directory. */
#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
#define DPAA2_MC_DPNI_DEVID 7
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016,2020 NXP
+ * Copyright 2016,2020-2023 NXP
*
*/
@@ -28,7 +28,6 @@
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-
TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
static struct dpbp_dev_list dpbp_dev_list
= TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
@@ -332,9 +332,8 @@ dpaa2_affine_qbman_swp(void)
}
RTE_PER_LCORE(_dpaa2_io).dpio_dev = dpio_dev;
- DPAA2_BUS_INFO(
- "DPAA Portal=%p (%d) is affined to thread %" PRIu64,
- dpio_dev, dpio_dev->index, tid);
+ DPAA2_BUS_DEBUG("Portal[%d] is affined to thread %" PRIu64,
+ dpio_dev->index, tid);
}
return 0;
}
@@ -354,9 +353,8 @@ dpaa2_affine_qbman_ethrx_swp(void)
}
RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev = dpio_dev;
- DPAA2_BUS_INFO(
- "DPAA Portal=%p (%d) is affined for eth rx to thread %"
- PRIu64, dpio_dev, dpio_dev->index, tid);
+ DPAA2_BUS_DEBUG("Portal_eth_rx[%d] is affined to thread %" PRIu64,
+ dpio_dev->index, tid);
}
return 0;
}
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2023 NXP
*
*/
@@ -12,6 +12,7 @@
#include <mc/fsl_mc_sys.h>
#include <rte_compat.h>
+#include <dpaa2_hw_pvt.h>
struct dpaa2_io_portal_t {
struct dpaa2_dpio_dev *dpio_dev;
@@ -14,6 +14,7 @@
#include <mc/fsl_mc_sys.h>
#include <fsl_qbman_portal.h>
+#include <bus_fslmc_driver.h>
#ifndef false
#define false 0
@@ -80,6 +81,8 @@
#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
#define DPAA2_DPCI_MAX_QUEUES 2
+#define DPAA2_INVALID_FLOW_ID 0xffff
+#define DPAA2_INVALID_CGID 0xff
struct dpaa2_queue;
@@ -365,83 +368,63 @@ enum qbman_fd_format {
*/
#define DPAA2_EQ_RESP_ALWAYS 1
-/* Various structures representing contiguous memory maps */
-struct dpaa2_memseg {
- TAILQ_ENTRY(dpaa2_memseg) next;
- char *vaddr;
- rte_iova_t iova;
- size_t len;
-};
-
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-extern uint8_t dpaa2_virt_mode;
-static void *dpaa2_mem_ptov(phys_addr_t paddr) __rte_unused;
-
-static void *dpaa2_mem_ptov(phys_addr_t paddr)
+static inline uint64_t
+dpaa2_mem_va_to_iova(void *va)
{
- void *va;
-
- if (dpaa2_virt_mode)
- return (void *)(size_t)paddr;
-
- va = (void *)dpaax_iova_table_get_va(paddr);
- if (likely(va != NULL))
- return va;
-
- /* If not, Fallback to full memseg list searching */
- va = rte_mem_iova2virt(paddr);
+ if (likely(rte_eal_iova_mode() == RTE_IOVA_VA))
+ return (uint64_t)va;
- return va;
+ return rte_fslmc_mem_vaddr_to_iova(va);
}
-static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __rte_unused;
-
-static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
+static inline void *
+dpaa2_mem_iova_to_va(uint64_t iova)
{
- const struct rte_memseg *memseg;
-
- if (dpaa2_virt_mode)
- return vaddr;
+ if (likely(rte_eal_iova_mode() == RTE_IOVA_VA))
+ return (void *)(uintptr_t)iova;
- memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
- if (memseg)
- return memseg->iova + RTE_PTR_DIFF(vaddr, memseg->addr);
- return (size_t)NULL;
+ return rte_fslmc_mem_iova_to_vaddr(iova);
}
-/**
- * When we are using Physical addresses as IO Virtual Addresses,
- * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
- * wherever required.
- * These routines are called with help of below MACRO's
- */
-
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
-
-/**
- * macro to convert Virtual address to IOVA
- */
-#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
-
-/**
- * macro to convert IOVA to Virtual address
- */
-#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
-
-/**
- * macro to convert modify the memory containing IOVA to Virtual address
- */
+#define DPAA2_VADDR_TO_IOVA(_vaddr) \
+ dpaa2_mem_va_to_iova((void *)(uintptr_t)_vaddr)
+#define DPAA2_IOVA_TO_VADDR(_iova) \
+ dpaa2_mem_iova_to_va((uint64_t)_iova)
#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
- {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
+ {_mem = (_type)DPAA2_IOVA_TO_VADDR(_mem); }
+
+#define DPAA2_VAMODE_VADDR_TO_IOVA(_vaddr) ((uint64_t)_vaddr)
+#define DPAA2_VAMODE_IOVA_TO_VADDR(_iova) ((void *)_iova)
+#define DPAA2_VAMODE_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)(_mem); }
+
+#define DPAA2_PAMODE_VADDR_TO_IOVA(_vaddr) \
+ rte_fslmc_mem_vaddr_to_iova((void *)_vaddr)
+#define DPAA2_PAMODE_IOVA_TO_VADDR(_iova) \
+ rte_fslmc_mem_iova_to_vaddr((uint64_t)_iova)
+#define DPAA2_PAMODE_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)rte_fslmc_mem_iova_to_vaddr(_mem); }
+
+static inline uint64_t
+dpaa2_mem_va_to_iova_check(void *va, uint64_t size)
+{
+ uint64_t iova = rte_fslmc_cold_mem_vaddr_to_iova(va, size);
-#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+ if (iova == RTE_BAD_IOVA)
+ return RTE_BAD_IOVA;
-#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
-#define DPAA2_VADDR_TO_IOVA(_vaddr) (phys_addr_t)(_vaddr)
-#define DPAA2_IOVA_TO_VADDR(_iova) (void *)(_iova)
-#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
+ /** Double check the iova is valid.*/
+ if (iova != rte_mem_virt2iova(va))
+ return RTE_BAD_IOVA;
+
+ return iova;
+}
-#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+#define DPAA2_VADDR_TO_IOVA_AND_CHECK(_vaddr, size) \
+ dpaa2_mem_va_to_iova_check(_vaddr, size)
+#define DPAA2_IOVA_TO_VADDR_AND_CHECK(_iova, size) \
+ rte_fslmc_cold_mem_iova_to_vaddr(_iova, size)
static inline
int check_swp_active_dqs(uint16_t dpio_index)
@@ -24,7 +24,6 @@ INTERNAL {
dpaa2_seqn_dynfield_offset;
dpaa2_seqn;
dpaa2_svr_family;
- dpaa2_virt_mode;
dpbp_disable;
dpbp_enable;
dpbp_get_attributes;
@@ -119,6 +118,12 @@ INTERNAL {
rte_fslmc_object_register;
rte_global_active_dqs_list;
rte_fslmc_vfio_mem_dmaunmap;
+ rte_fslmc_cold_mem_vaddr_to_iova;
+ rte_fslmc_cold_mem_iova_to_vaddr;
+ rte_fslmc_mem_vaddr_to_iova;
+ rte_fslmc_mem_iova_to_vaddr;
+ rte_fslmc_io_vaddr_to_iova;
+ rte_fslmc_io_iova_to_vaddr;
local: *;
};
@@ -10,6 +10,7 @@
#include <mc/fsl_dpdmai.h>
+#include <dpaa2_hw_dpio.h>
#include "rte_pmd_dpaa2_qdma.h"
#include "dpaa2_qdma.h"
#include "dpaa2_qdma_logs.h"