[v2,17/18] raw/ioat: add xstats tracking for idxd devices

Message ID 20200821162944.29840-18-bruce.richardson@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series raw/ioat: enhancements and new hardware support |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Bruce Richardson Aug. 21, 2020, 4:29 p.m. UTC
  Add update of the relevant stats for the data path functions and point the
overall device struct xstats function pointers to the existing ioat
functions.

At this point, all necessary hooks for supporting the existing unit tests
are in place so call them for each device.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/raw/ioat/idxd_pci.c            |  3 +++
 drivers/raw/ioat/idxd_vdev.c           |  3 +++
 drivers/raw/ioat/ioat_rawdev_test.c    |  2 +-
 drivers/raw/ioat/rte_ioat_rawdev_fns.h | 30 +++++++++++++++-----------
 4 files changed, 25 insertions(+), 13 deletions(-)
  

Comments

Kevin Laatz Aug. 24, 2020, 9:56 a.m. UTC | #1
On 21/08/2020 17:29, Bruce Richardson wrote:
> Add update of the relevant stats for the data path functions and point the
> overall device struct xstats function pointers to the existing ioat
> functions.
>
> At this point, all necessary hooks for supporting the existing unit tests
> are in place so call them for each device.
>
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> ---
>   drivers/raw/ioat/idxd_pci.c            |  3 +++
>   drivers/raw/ioat/idxd_vdev.c           |  3 +++
>   drivers/raw/ioat/ioat_rawdev_test.c    |  2 +-
>   drivers/raw/ioat/rte_ioat_rawdev_fns.h | 30 +++++++++++++++-----------
>   4 files changed, 25 insertions(+), 13 deletions(-)
<snip>
> diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
> index 66e3f1a836..db8608fa6b 100644
> --- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h
> +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
> @@ -182,6 +182,8 @@ struct rte_idxd_user_hdl {
>    */
>   struct rte_idxd_rawdev {
>   	enum rte_ioat_dev_type type;
> +	struct rte_ioat_xstats xstats;
> +
>   	void *portal; /* address to write the batch descriptor */
>   
>   	/* counters to track the batches and the individual op handles */
> @@ -330,19 +332,15 @@ __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
>   			IDXD_FLAG_CACHE_CONTROL;
>   
>   	/* check for room in the handle ring */
> -	if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl) {
> -		rte_errno = ENOSPC;
> -		return 0;
> -	}
> -	if (b->op_count >= BATCH_SIZE) {
> -		rte_errno = ENOSPC;
> -		return 0;
> -	}
> +	if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
> +		goto failed;
> +
> +	if (b->op_count >= BATCH_SIZE)
> +		goto failed;
> +
>   	/* check that we can actually use the current batch */
> -	if (b->submitted) {
> -		rte_errno = ENOSPC;
> -		return 0;
> -	}
> +	if (b->submitted)
> +		goto failed;

This 'cleanup' can be done when initially adding the function in patch 
"raw/ioat: add data path for idxd devices", allowing for this patch to 
be more concise.

/Kevin
  

Patch

diff --git a/drivers/raw/ioat/idxd_pci.c b/drivers/raw/ioat/idxd_pci.c
index 1ae20bc04f..4b97b5b5fd 100644
--- a/drivers/raw/ioat/idxd_pci.c
+++ b/drivers/raw/ioat/idxd_pci.c
@@ -106,6 +106,9 @@  static const struct rte_rawdev_ops idxd_pci_ops = {
 		.dev_start = idxd_pci_dev_start,
 		.dev_stop = idxd_pci_dev_stop,
 		.dev_info_get = idxd_dev_info_get,
+		.xstats_get = ioat_xstats_get,
+		.xstats_get_names = ioat_xstats_get_names,
+		.xstats_reset = ioat_xstats_reset,
 };
 
 /* each portal uses 4 x 4k pages */
diff --git a/drivers/raw/ioat/idxd_vdev.c b/drivers/raw/ioat/idxd_vdev.c
index 3d6aa31f48..febc5919f4 100644
--- a/drivers/raw/ioat/idxd_vdev.c
+++ b/drivers/raw/ioat/idxd_vdev.c
@@ -35,6 +35,9 @@  static const struct rte_rawdev_ops idxd_vdev_ops = {
 		.dump = idxd_dev_dump,
 		.dev_configure = idxd_dev_configure,
 		.dev_info_get = idxd_dev_info_get,
+		.xstats_get = ioat_xstats_get,
+		.xstats_get_names = ioat_xstats_get_names,
+		.xstats_reset = ioat_xstats_reset,
 };
 
 static void *
diff --git a/drivers/raw/ioat/ioat_rawdev_test.c b/drivers/raw/ioat/ioat_rawdev_test.c
index 082b3091c4..db10178871 100644
--- a/drivers/raw/ioat/ioat_rawdev_test.c
+++ b/drivers/raw/ioat/ioat_rawdev_test.c
@@ -274,5 +274,5 @@  int
 idxd_rawdev_test(uint16_t dev_id)
 {
 	rte_rawdev_dump(dev_id, stdout);
-	return 0;
+	return ioat_rawdev_test(dev_id);
 }
diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
index 66e3f1a836..db8608fa6b 100644
--- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h
+++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
@@ -182,6 +182,8 @@  struct rte_idxd_user_hdl {
  */
 struct rte_idxd_rawdev {
 	enum rte_ioat_dev_type type;
+	struct rte_ioat_xstats xstats;
+
 	void *portal; /* address to write the batch descriptor */
 
 	/* counters to track the batches and the individual op handles */
@@ -330,19 +332,15 @@  __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
 			IDXD_FLAG_CACHE_CONTROL;
 
 	/* check for room in the handle ring */
-	if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl) {
-		rte_errno = ENOSPC;
-		return 0;
-	}
-	if (b->op_count >= BATCH_SIZE) {
-		rte_errno = ENOSPC;
-		return 0;
-	}
+	if (((idxd->next_free_hdl + 1) & (idxd->hdl_ring_sz - 1)) == idxd->next_ret_hdl)
+		goto failed;
+
+	if (b->op_count >= BATCH_SIZE)
+		goto failed;
+
 	/* check that we can actually use the current batch */
-	if (b->submitted) {
-		rte_errno = ENOSPC;
-		return 0;
-	}
+	if (b->submitted)
+		goto failed;
 
 	/* write the descriptor */
 	b->ops[b->op_count++] = (struct rte_idxd_hw_desc){
@@ -361,7 +359,13 @@  __idxd_enqueue_copy(int dev_id, rte_iova_t src, rte_iova_t dst,
 	if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
 		idxd->next_free_hdl = 0;
 
+	idxd->xstats.enqueued++;
 	return 1;
+
+failed:
+	idxd->xstats.enqueue_failed++;
+	rte_errno = ENOSPC;
+	return 0;
 }
 
 static __rte_always_inline void
@@ -388,6 +392,7 @@  __idxd_perform_ops(int dev_id)
 
 	if (++idxd->next_batch == idxd->batch_ring_sz)
 		idxd->next_batch = 0;
+	idxd->xstats.started = idxd->xstats.enqueued;
 }
 
 static __rte_always_inline int
@@ -424,6 +429,7 @@  __idxd_completed_ops(int dev_id, uint8_t max_ops,
 
 	idxd->next_ret_hdl = h_idx;
 
+	idxd->xstats.completed += n;
 	return n;
 }