[v5,2/5] bus/cdx: add DMA map and unmap support
Checks
Commit Message
AMD CDX bus can use VFIO interface for mapping and unmapping
of DMA addresses in the IOMMU. This change adds the callback
support for map and unmap APIs as well as fetching the IOMMU
class.
Signed-off-by: Nipun Gupta <nipun.gupta@amd.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
drivers/bus/cdx/cdx.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
Comments
On Thu, May 25, 2023 at 12:09 PM Nipun Gupta <nipun.gupta@amd.com> wrote:
>
> AMD CDX bus can use VFIO interface for mapping and unmapping
> of DMA addresses in the IOMMU. This change adds the callback
> support for map and unmap APIs as well as fetching the IOMMU
> class.
>
> Signed-off-by: Nipun Gupta <nipun.gupta@amd.com>
> Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
> ---
> drivers/bus/cdx/cdx.c | 40 ++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 40 insertions(+)
>
> diff --git a/drivers/bus/cdx/cdx.c b/drivers/bus/cdx/cdx.c
> index 1ddb5a92f7..64ea879f3b 100644
> --- a/drivers/bus/cdx/cdx.c
> +++ b/drivers/bus/cdx/cdx.c
> @@ -502,12 +502,52 @@ cdx_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
> return NULL;
> }
>
> +static int
> +cdx_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
> +{
> + struct rte_cdx_device *cdx_dev = RTE_DEV_TO_CDX_DEV(dev);
> +
> + if (!cdx_dev) {
> + rte_errno = EINVAL;
> + return -1;
> + }
RTE_DEV_TO_CDX_DEV() applies an offset to dev.
Checking dev != NULL is probably a better check.
But on the other hand, calling this dma_map op will be done through
dev->bus->dma_map.
So checking dev is useless too.
> +
> + return rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
> + (uintptr_t)addr, iova, len);
> +}
> +
> +static int
> +cdx_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
> +{
> + struct rte_cdx_device *cdx_dev = RTE_DEV_TO_CDX_DEV(dev);
> +
> + if (!cdx_dev) {
> + rte_errno = EINVAL;
> + return -1;
> + }
Idem.
> +
> + return rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
> + (uintptr_t)addr, iova, len);
> +}
> +
> +static enum rte_iova_mode
> +cdx_get_iommu_class(void)
> +{
> + if (TAILQ_EMPTY(&rte_cdx_bus.device_list))
> + return RTE_IOVA_DC;
> +
> + return RTE_IOVA_VA;
> +}
> +
> struct rte_cdx_bus rte_cdx_bus = {
> .bus = {
> .scan = cdx_scan,
> .probe = cdx_probe,
> .find_device = cdx_find_device,
> .parse = cdx_parse,
> + .dma_map = cdx_dma_map,
> + .dma_unmap = cdx_dma_unmap,
> + .get_iommu_class = cdx_get_iommu_class,
> },
> .device_list = TAILQ_HEAD_INITIALIZER(rte_cdx_bus.device_list),
> .driver_list = TAILQ_HEAD_INITIALIZER(rte_cdx_bus.driver_list),
> --
> 2.17.1
>
@@ -502,12 +502,52 @@ cdx_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
return NULL;
}
+static int
+cdx_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
+{
+ struct rte_cdx_device *cdx_dev = RTE_DEV_TO_CDX_DEV(dev);
+
+ if (!cdx_dev) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ (uintptr_t)addr, iova, len);
+}
+
+static int
+cdx_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
+{
+ struct rte_cdx_device *cdx_dev = RTE_DEV_TO_CDX_DEV(dev);
+
+ if (!cdx_dev) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ (uintptr_t)addr, iova, len);
+}
+
+static enum rte_iova_mode
+cdx_get_iommu_class(void)
+{
+ if (TAILQ_EMPTY(&rte_cdx_bus.device_list))
+ return RTE_IOVA_DC;
+
+ return RTE_IOVA_VA;
+}
+
struct rte_cdx_bus rte_cdx_bus = {
.bus = {
.scan = cdx_scan,
.probe = cdx_probe,
.find_device = cdx_find_device,
.parse = cdx_parse,
+ .dma_map = cdx_dma_map,
+ .dma_unmap = cdx_dma_unmap,
+ .get_iommu_class = cdx_get_iommu_class,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_cdx_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_cdx_bus.driver_list),