[v10,04/16] dma/idxd: create dmadev instances on bus probe
Checks
Commit Message
When a suitable device is found during the bus scan/probe, create a dmadev
instance for each HW queue. Internal structures required for device
creation are also added.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
drivers/dma/idxd/idxd_bus.c | 19 ++++++++++
drivers/dma/idxd/idxd_common.c | 61 ++++++++++++++++++++++++++++++++
drivers/dma/idxd/idxd_hw_defs.h | 27 ++++++++++++++
drivers/dma/idxd/idxd_internal.h | 7 ++++
4 files changed, 114 insertions(+)
create mode 100644 drivers/dma/idxd/idxd_hw_defs.h
Comments
On 2021/10/19 22:10, Kevin Laatz wrote:
> When a suitable device is found during the bus scan/probe, create a dmadev
> instance for each HW queue. Internal structures required for device
> creation are also added.
>
[snip]
> static void *
> idxd_bus_mmap_wq(struct rte_dsa_device *dev)
> {
> @@ -206,6 +218,7 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
> return -1;
> idxd.max_batch_size = ret;
> idxd.qid = dev->addr.wq_id;
> + idxd.u.bus.dsa_id = dev->addr.device_id;
> idxd.sva_support = 1;
>
> idxd.portal = idxd_bus_mmap_wq(dev);
> @@ -214,6 +227,12 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
> return -ENOENT;
> }
>
> + ret = idxd_dmadev_create(dev->wq_name, &dev->device, &idxd, &idxd_bus_ops);
> + if (ret) {
> + IDXD_PMD_ERR("Failed to create rawdev %s", dev->wq_name);
rawdev -> dmadev
> + return ret;
> + }
> +
> return 0;
> }
>
> diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
> index e00ddbe5ef..5abff34292 100644
> --- a/drivers/dma/idxd/idxd_common.c
> +++ b/drivers/dma/idxd/idxd_common.c
> @@ -2,10 +2,71 @@
> * Copyright 2021 Intel Corporation
> */
>
> +#include <rte_malloc.h>
> +#include <rte_common.h>
> #include <rte_log.h>
>
> #include "idxd_internal.h"
>
> +#define IDXD_PMD_NAME_STR "dmadev_idxd"
> +
> +int
> +idxd_dmadev_create(const char *name, struct rte_device *dev,
> + const struct idxd_dmadev *base_idxd,
> + const struct rte_dma_dev_ops *ops)
> +{
> + struct idxd_dmadev *idxd = NULL;
> + struct rte_dma_dev *dmadev = NULL;
> + int ret = 0;
> +
> + if (!name) {
> + IDXD_PMD_ERR("Invalid name of the device!");
> + ret = -EINVAL;
> + goto cleanup;
> + }
> +
> + /* Allocate device structure */
> + dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
> + if (dmadev == NULL) {
> + IDXD_PMD_ERR("Unable to allocate raw device");
raw -> dma
Better check the 'raw' keyword in the patch set.
> + ret = -ENOMEM;
> + goto cleanup;
> + }
> + dmadev->dev_ops = ops;
> + dmadev->device = dev;
> +
> + idxd = dmadev->data->dev_private;
> + *idxd = *base_idxd; /* copy over the main fields already passed in */
> + idxd->dmadev = dmadev;
> +
> + /* allocate batch index ring and completion ring.
> + * The +1 is because we can never fully use
> + * the ring, otherwise read == write means both full and empty.
> + */
> + idxd->batch_comp_ring = rte_zmalloc(NULL, (sizeof(idxd->batch_idx_ring[0]) +
> + sizeof(idxd->batch_comp_ring[0])) * (idxd->max_batches + 1),
> + sizeof(idxd->batch_comp_ring[0]));
infer the batch_comp_ring will access by hardware, maybe better use rte_zmalloc_socket()
because rte_zmalloc will use rte_socket_id() and it may at diff socket when call.
> + if (idxd->batch_comp_ring == NULL) {
> + IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
> + ret = -ENOMEM;
>
[snip]
@@ -85,6 +85,18 @@ dsa_get_sysfs_path(void)
return path ? path : DSA_SYSFS_PATH;
}
+static int
+idxd_dev_close(struct rte_dma_dev *dev)
+{
+ struct idxd_dmadev *idxd = dev->data->dev_private;
+ munmap(idxd->portal, 0x1000);
+ return 0;
+}
+
+static const struct rte_dma_dev_ops idxd_bus_ops = {
+ .dev_close = idxd_dev_close,
+};
+
static void *
idxd_bus_mmap_wq(struct rte_dsa_device *dev)
{
@@ -206,6 +218,7 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
return -1;
idxd.max_batch_size = ret;
idxd.qid = dev->addr.wq_id;
+ idxd.u.bus.dsa_id = dev->addr.device_id;
idxd.sva_support = 1;
idxd.portal = idxd_bus_mmap_wq(dev);
@@ -214,6 +227,12 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
return -ENOENT;
}
+ ret = idxd_dmadev_create(dev->wq_name, &dev->device, &idxd, &idxd_bus_ops);
+ if (ret) {
+ IDXD_PMD_ERR("Failed to create rawdev %s", dev->wq_name);
+ return ret;
+ }
+
return 0;
}
@@ -2,10 +2,71 @@
* Copyright 2021 Intel Corporation
*/
+#include <rte_malloc.h>
+#include <rte_common.h>
#include <rte_log.h>
#include "idxd_internal.h"
+#define IDXD_PMD_NAME_STR "dmadev_idxd"
+
+int
+idxd_dmadev_create(const char *name, struct rte_device *dev,
+ const struct idxd_dmadev *base_idxd,
+ const struct rte_dma_dev_ops *ops)
+{
+ struct idxd_dmadev *idxd = NULL;
+ struct rte_dma_dev *dmadev = NULL;
+ int ret = 0;
+
+ if (!name) {
+ IDXD_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
+ if (dmadev == NULL) {
+ IDXD_PMD_ERR("Unable to allocate raw device");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ dmadev->dev_ops = ops;
+ dmadev->device = dev;
+
+ idxd = dmadev->data->dev_private;
+ *idxd = *base_idxd; /* copy over the main fields already passed in */
+ idxd->dmadev = dmadev;
+
+ /* allocate batch index ring and completion ring.
+ * The +1 is because we can never fully use
+ * the ring, otherwise read == write means both full and empty.
+ */
+ idxd->batch_comp_ring = rte_zmalloc(NULL, (sizeof(idxd->batch_idx_ring[0]) +
+ sizeof(idxd->batch_comp_ring[0])) * (idxd->max_batches + 1),
+ sizeof(idxd->batch_comp_ring[0]));
+ if (idxd->batch_comp_ring == NULL) {
+ IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ idxd->batch_idx_ring = (void *)&idxd->batch_comp_ring[idxd->max_batches+1];
+ idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
+
+ dmadev->fp_obj->dev_private = idxd;
+
+ idxd->dmadev->state = RTE_DMA_DEV_READY;
+
+ return 0;
+
+cleanup:
+ if (dmadev)
+ rte_dma_pmd_release(name);
+
+ return ret;
+}
+
int idxd_pmd_logtype;
RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);
new file mode 100644
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 Intel Corporation
+ */
+
+#ifndef _IDXD_HW_DEFS_H_
+#define _IDXD_HW_DEFS_H_
+
+#define IDXD_COMP_STATUS_INCOMPLETE 0
+#define IDXD_COMP_STATUS_SUCCESS 1
+#define IDXD_COMP_STATUS_INVALID_OPCODE 0x10
+#define IDXD_COMP_STATUS_INVALID_SIZE 0x13
+#define IDXD_COMP_STATUS_SKIPPED 0xFF /* not official IDXD error, needed as placeholder */
+
+/**
+ * Completion record structure written back by DSA
+ */
+struct idxd_completion {
+ uint8_t status;
+ uint8_t result;
+ /* 16-bits pad here */
+ uint32_t completed_size; /* data length, or descriptors for batch */
+
+ rte_iova_t fault_address;
+ uint32_t invalid_flags;
+} __rte_aligned(32);
+
+#endif
@@ -5,6 +5,10 @@
#ifndef _IDXD_INTERNAL_H_
#define _IDXD_INTERNAL_H_
+#include <rte_dmadev_pmd.h>
+
+#include "idxd_hw_defs.h"
+
/**
* @file idxd_internal.h
*
@@ -58,4 +62,7 @@ struct idxd_dmadev {
} u;
};
+int idxd_dmadev_create(const char *name, struct rte_device *dev,
+ const struct idxd_dmadev *base_idxd, const struct rte_dma_dev_ops *ops);
+
#endif /* _IDXD_INTERNAL_H_ */