[v10,12/16] dma/idxd: add vchan status function

Message ID 20211019141041.1890983-13-kevin.laatz@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series add dmadev driver for idxd devices |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Kevin Laatz Oct. 19, 2021, 2:10 p.m. UTC
  When testing dmadev drivers, it is useful to have the HW device in a known
state. This patch adds the implementation of the function which will wait
for the device to be idle (all jobs completed) before proceeding.

Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 drivers/dma/idxd/idxd_bus.c      |  1 +
 drivers/dma/idxd/idxd_common.c   | 17 +++++++++++++++++
 drivers/dma/idxd/idxd_internal.h |  2 ++
 drivers/dma/idxd/idxd_pci.c      |  1 +
 4 files changed, 21 insertions(+)
  

Comments

Chengwen Feng Oct. 20, 2021, 9:30 a.m. UTC | #1
On 2021/10/19 22:10, Kevin Laatz wrote:
> When testing dmadev drivers, it is useful to have the HW device in a known
> state. This patch adds the implementation of the function which will wait
> for the device to be idle (all jobs completed) before proceeding.
> 
> Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---
>  drivers/dma/idxd/idxd_bus.c      |  1 +
>  drivers/dma/idxd/idxd_common.c   | 17 +++++++++++++++++
>  drivers/dma/idxd/idxd_internal.h |  2 ++
>  drivers/dma/idxd/idxd_pci.c      |  1 +
>  4 files changed, 21 insertions(+)
> 
> diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
> index b52ea02854..e6caa048a9 100644
> --- a/drivers/dma/idxd/idxd_bus.c
> +++ b/drivers/dma/idxd/idxd_bus.c
> @@ -101,6 +101,7 @@ static const struct rte_dma_dev_ops idxd_bus_ops = {
>  		.dev_info_get = idxd_info_get,
>  		.stats_get = idxd_stats_get,
>  		.stats_reset = idxd_stats_reset,
> +		.vchan_status = idxd_vchan_status,
>  };
>  
>  static void *
> diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
> index fd81418b7c..3c8cff15c0 100644
> --- a/drivers/dma/idxd/idxd_common.c
> +++ b/drivers/dma/idxd/idxd_common.c
> @@ -163,6 +163,23 @@ get_comp_status(struct idxd_completion *c)
>  	}
>  }
>  
> +int
> +idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
> +		enum rte_dma_vchan_status *status)
> +{
> +	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
> +	uint16_t last_batch_write = idxd->batch_idx_write == 0 ? idxd->max_batches :
> +			idxd->batch_idx_write - 1;
> +	uint8_t bstatus = (idxd->batch_comp_ring[last_batch_write].status != 0);
> +
> +	/* An IDXD device will always be either active or idle.
> +	 * RTE_DMA_VCHAN_HALTED_ERROR is therefore not supported by IDXD.
> +	 */
> +	*status = bstatus ? RTE_DMA_VCHAN_IDLE : RTE_DMA_VCHAN_ACTIVE;

why not use stats.submitted and completed ? or I miss some thing about this API?

does this api must called after rte_dma_submit() ? If not the following seq will function fail:
  enqueue multiple copy request
  submit to hardware
  enqueue multiple copy request
  invoke rte_dma_vchan_status to query status  --because the copy requests not submit, the last comp will be non-zero, so it will return IDLE.

> +
> +	return 0;
> +}
> +
>  static __rte_always_inline int
>  batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
>  {
> diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
> index a85a1fb79e..50acb82d3d 100644
> --- a/drivers/dma/idxd/idxd_internal.h
> +++ b/drivers/dma/idxd/idxd_internal.h
> @@ -102,5 +102,7 @@ uint16_t idxd_completed_status(void *dev_private, uint16_t qid __rte_unused,
>  int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
>  		struct rte_dma_stats *stats, uint32_t stats_sz);
>  int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
> +int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
> +		enum rte_dma_vchan_status *status);
>  
>  #endif /* _IDXD_INTERNAL_H_ */
> diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
> index 9d7f0531d5..23c10c0fb0 100644
> --- a/drivers/dma/idxd/idxd_pci.c
> +++ b/drivers/dma/idxd/idxd_pci.c
> @@ -140,6 +140,7 @@ static const struct rte_dma_dev_ops idxd_pci_ops = {
>  	.stats_reset = idxd_stats_reset,
>  	.dev_start = idxd_pci_dev_start,
>  	.dev_stop = idxd_pci_dev_stop,
> +	.vchan_status = idxd_vchan_status,
>  };
>  
>  /* each portal uses 4 x 4k pages */
>
  
Bruce Richardson Oct. 20, 2021, 9:52 a.m. UTC | #2
On Wed, Oct 20, 2021 at 05:30:13PM +0800, fengchengwen wrote:
> On 2021/10/19 22:10, Kevin Laatz wrote:
> > When testing dmadev drivers, it is useful to have the HW device in a known
> > state. This patch adds the implementation of the function which will wait
> > for the device to be idle (all jobs completed) before proceeding.
> > 
> > Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
> > Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> > ---
> >  drivers/dma/idxd/idxd_bus.c      |  1 +
> >  drivers/dma/idxd/idxd_common.c   | 17 +++++++++++++++++
> >  drivers/dma/idxd/idxd_internal.h |  2 ++
> >  drivers/dma/idxd/idxd_pci.c      |  1 +
> >  4 files changed, 21 insertions(+)
> > 
> > diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
> > index b52ea02854..e6caa048a9 100644
> > --- a/drivers/dma/idxd/idxd_bus.c
> > +++ b/drivers/dma/idxd/idxd_bus.c
> > @@ -101,6 +101,7 @@ static const struct rte_dma_dev_ops idxd_bus_ops = {
> >  		.dev_info_get = idxd_info_get,
> >  		.stats_get = idxd_stats_get,
> >  		.stats_reset = idxd_stats_reset,
> > +		.vchan_status = idxd_vchan_status,
> >  };
> >  
> >  static void *
> > diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
> > index fd81418b7c..3c8cff15c0 100644
> > --- a/drivers/dma/idxd/idxd_common.c
> > +++ b/drivers/dma/idxd/idxd_common.c
> > @@ -163,6 +163,23 @@ get_comp_status(struct idxd_completion *c)
> >  	}
> >  }
> >  
> > +int
> > +idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
> > +		enum rte_dma_vchan_status *status)
> > +{
> > +	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
> > +	uint16_t last_batch_write = idxd->batch_idx_write == 0 ? idxd->max_batches :
> > +			idxd->batch_idx_write - 1;
> > +	uint8_t bstatus = (idxd->batch_comp_ring[last_batch_write].status != 0);
> > +
> > +	/* An IDXD device will always be either active or idle.
> > +	 * RTE_DMA_VCHAN_HALTED_ERROR is therefore not supported by IDXD.
> > +	 */
> > +	*status = bstatus ? RTE_DMA_VCHAN_IDLE : RTE_DMA_VCHAN_ACTIVE;
> 
> why not use stats.submitted and completed ? or I miss some thing about this API?
> 
> does this api must called after rte_dma_submit() ? If not the following seq will function fail:
>   enqueue multiple copy request
>   submit to hardware
>   enqueue multiple copy request
>   invoke rte_dma_vchan_status to query status  --because the copy requests not submit, the last comp will be non-zero, so it will return IDLE.
>
That is correct. Until the jobs are submitted the device HW is idle as it
is not processing any job. This API is to return the HW state, because that
is the key concern here, whether the HW is in the process of doing DMA or
not, since that is what can cause race conditions. The timing of sending
down jobs to the device is under app control.
  

Patch

diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index b52ea02854..e6caa048a9 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -101,6 +101,7 @@  static const struct rte_dma_dev_ops idxd_bus_ops = {
 		.dev_info_get = idxd_info_get,
 		.stats_get = idxd_stats_get,
 		.stats_reset = idxd_stats_reset,
+		.vchan_status = idxd_vchan_status,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index fd81418b7c..3c8cff15c0 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -163,6 +163,23 @@  get_comp_status(struct idxd_completion *c)
 	}
 }
 
+int
+idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+		enum rte_dma_vchan_status *status)
+{
+	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+	uint16_t last_batch_write = idxd->batch_idx_write == 0 ? idxd->max_batches :
+			idxd->batch_idx_write - 1;
+	uint8_t bstatus = (idxd->batch_comp_ring[last_batch_write].status != 0);
+
+	/* An IDXD device will always be either active or idle.
+	 * RTE_DMA_VCHAN_HALTED_ERROR is therefore not supported by IDXD.
+	 */
+	*status = bstatus ? RTE_DMA_VCHAN_IDLE : RTE_DMA_VCHAN_ACTIVE;
+
+	return 0;
+}
+
 static __rte_always_inline int
 batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
 {
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index a85a1fb79e..50acb82d3d 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -102,5 +102,7 @@  uint16_t idxd_completed_status(void *dev_private, uint16_t qid __rte_unused,
 int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
 		struct rte_dma_stats *stats, uint32_t stats_sz);
 int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
+int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
+		enum rte_dma_vchan_status *status);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 9d7f0531d5..23c10c0fb0 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -140,6 +140,7 @@  static const struct rte_dma_dev_ops idxd_pci_ops = {
 	.stats_reset = idxd_stats_reset,
 	.dev_start = idxd_pci_dev_start,
 	.dev_stop = idxd_pci_dev_stop,
+	.vchan_status = idxd_vchan_status,
 };
 
 /* each portal uses 4 x 4k pages */