[v10,08/16] dma/idxd: add start and stop functions for pci devices

Message ID 20211019141041.1890983-9-kevin.laatz@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series add dmadev driver for idxd devices |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Kevin Laatz Oct. 19, 2021, 2:10 p.m. UTC
  Add device start/stop functions for DSA devices bound to vfio. For devices
bound to the IDXD kernel driver, these are not required since the IDXD
kernel driver takes care of this.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/dmadevs/idxd.rst |  3 +++
 drivers/dma/idxd/idxd_pci.c | 51 +++++++++++++++++++++++++++++++++++++
 2 files changed, 54 insertions(+)
  

Comments

fengchengwen Oct. 20, 2021, 8:04 a.m. UTC | #1
On 2021/10/19 22:10, Kevin Laatz wrote:
> Add device start/stop functions for DSA devices bound to vfio. For devices
> bound to the IDXD kernel driver, these are not required since the IDXD
> kernel driver takes care of this.
> 
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---

[snip]

>  
> +static int
> +idxd_pci_dev_stop(struct rte_dma_dev *dev)
> +{
> +	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
> +	uint8_t err_code;
> +
> +	if (!idxd_is_wq_enabled(idxd)) {
> +		IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
> +		return -EALREADY;

suggest return 0.

> +	}
> +
> +	err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
> +	if (err_code || idxd_is_wq_enabled(idxd)) {
> +		IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
> +				idxd->qid, err_code);
> +		return err_code == 0 ? -1 : -err_code;
> +	}
> +	IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
> +
> +	return 0;
> +}
> +
> +static int
> +idxd_pci_dev_start(struct rte_dma_dev *dev)
> +{
> +	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
> +	uint8_t err_code;
> +
> +	if (idxd_is_wq_enabled(idxd)) {
> +		IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
> +		return 0;
> +	}
> +
> +	if (idxd->desc_ring == NULL) {
> +		IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
> +		return -EINVAL;
> +	}
> +
> +	err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
> +	if (err_code || !idxd_is_wq_enabled(idxd)) {
> +		IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
> +				idxd->qid, err_code);
> +		return err_code == 0 ? -1 : -err_code;
> +	}
> +	IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
> +
> +	return 0;
> +}
> +
>  static int
>  idxd_pci_dev_close(struct rte_dma_dev *dev)
>  {
> @@ -87,6 +136,8 @@ static const struct rte_dma_dev_ops idxd_pci_ops = {
>  	.dev_configure = idxd_configure,
>  	.vchan_setup = idxd_vchan_setup,
>  	.dev_info_get = idxd_info_get,
> +	.dev_start = idxd_pci_dev_start,
> +	.dev_stop = idxd_pci_dev_stop,
>  };
>  
>  /* each portal uses 4 x 4k pages */
>
  

Patch

diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index 62ffd39ee0..711890bd9e 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -135,3 +135,6 @@  IDXD configuration requirements:
 * Only one ``vchan`` is supported per device (work queue).
 * IDXD devices do not support silent mode.
 * The transfer direction must be set to ``RTE_DMA_DIR_MEM_TO_MEM`` to copy from memory to memory.
+
+Once configured, the device can then be made ready for use by calling the
+``rte_dma_start()`` API.
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 681bb55efe..ed5bf99425 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -59,6 +59,55 @@  idxd_is_wq_enabled(struct idxd_dmadev *idxd)
 	return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
 }
 
+static int
+idxd_pci_dev_stop(struct rte_dma_dev *dev)
+{
+	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+	uint8_t err_code;
+
+	if (!idxd_is_wq_enabled(idxd)) {
+		IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
+		return -EALREADY;
+	}
+
+	err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
+	if (err_code || idxd_is_wq_enabled(idxd)) {
+		IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
+				idxd->qid, err_code);
+		return err_code == 0 ? -1 : -err_code;
+	}
+	IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
+
+	return 0;
+}
+
+static int
+idxd_pci_dev_start(struct rte_dma_dev *dev)
+{
+	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+	uint8_t err_code;
+
+	if (idxd_is_wq_enabled(idxd)) {
+		IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
+		return 0;
+	}
+
+	if (idxd->desc_ring == NULL) {
+		IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
+		return -EINVAL;
+	}
+
+	err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
+	if (err_code || !idxd_is_wq_enabled(idxd)) {
+		IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
+				idxd->qid, err_code);
+		return err_code == 0 ? -1 : -err_code;
+	}
+	IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
+
+	return 0;
+}
+
 static int
 idxd_pci_dev_close(struct rte_dma_dev *dev)
 {
@@ -87,6 +136,8 @@  static const struct rte_dma_dev_ops idxd_pci_ops = {
 	.dev_configure = idxd_configure,
 	.vchan_setup = idxd_vchan_setup,
 	.dev_info_get = idxd_info_get,
+	.dev_start = idxd_pci_dev_start,
+	.dev_stop = idxd_pci_dev_stop,
 };
 
 /* each portal uses 4 x 4k pages */