[v9,6/8] net/ring: use generic SW stats

Message ID 20240521201801.126886-7-stephen@networkplumber.org (mailing list archive)
State Changes Requested
Delegated to: Thomas Monjalon
Headers
Series Common statistics for SW PMD's |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Stephen Hemminger May 21, 2024, 8:16 p.m. UTC
  Use generic per-queue infrastructure.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/ring/rte_eth_ring.c | 63 +++++++++++----------------------
 1 file changed, 20 insertions(+), 43 deletions(-)
  

Patch

diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index b16f5d55f2..36053e4038 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -7,6 +7,7 @@ 
 #include "rte_eth_ring.h"
 #include <rte_mbuf.h>
 #include <ethdev_driver.h>
+#include <ethdev_swstats.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 #include <rte_os_shim.h>
@@ -44,8 +45,8 @@  enum dev_action {
 
 struct ring_queue {
 	struct rte_ring *rng;
-	RTE_ATOMIC(uint64_t) rx_pkts;
-	RTE_ATOMIC(uint64_t) tx_pkts;
+
+	struct rte_eth_counters stats;
 };
 
 struct pmd_internals {
@@ -77,12 +78,12 @@  eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
 	void **ptrs = (void *)&bufs[0];
 	struct ring_queue *r = q;
-	const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
-			ptrs, nb_bufs, NULL);
-	if (r->rng->flags & RING_F_SC_DEQ)
-		r->rx_pkts += nb_rx;
-	else
-		rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed);
+	uint16_t nb_rx;
+
+	nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, ptrs, nb_bufs, NULL);
+
+	rte_counter64_add(&r->stats.packets, nb_rx);
+
 	return nb_rx;
 }
 
@@ -91,12 +92,12 @@  eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
 	void **ptrs = (void *)&bufs[0];
 	struct ring_queue *r = q;
-	const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
-			ptrs, nb_bufs, NULL);
-	if (r->rng->flags & RING_F_SP_ENQ)
-		r->tx_pkts += nb_tx;
-	else
-		rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed);
+	uint16_t nb_tx;
+
+	nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, ptrs, nb_bufs, NULL);
+
+	rte_counter64_add(&r->stats.packets, nb_tx);
+
 	return nb_tx;
 }
 
@@ -193,40 +194,16 @@  eth_dev_info(struct rte_eth_dev *dev,
 static int
 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-	unsigned int i;
-	unsigned long rx_total = 0, tx_total = 0;
-	const struct pmd_internals *internal = dev->data->dev_private;
-
-	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
-			i < dev->data->nb_rx_queues; i++) {
-		stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts;
-		rx_total += stats->q_ipackets[i];
-	}
-
-	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
-			i < dev->data->nb_tx_queues; i++) {
-		stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts;
-		tx_total += stats->q_opackets[i];
-	}
-
-	stats->ipackets = rx_total;
-	stats->opackets = tx_total;
-
-	return 0;
+	return rte_eth_counters_stats_get(dev, offsetof(struct ring_queue, stats),
+					  offsetof(struct ring_queue, stats),
+					  stats);
 }
 
 static int
 eth_stats_reset(struct rte_eth_dev *dev)
 {
-	unsigned int i;
-	struct pmd_internals *internal = dev->data->dev_private;
-
-	for (i = 0; i < dev->data->nb_rx_queues; i++)
-		internal->rx_ring_queues[i].rx_pkts = 0;
-	for (i = 0; i < dev->data->nb_tx_queues; i++)
-		internal->tx_ring_queues[i].tx_pkts = 0;
-
-	return 0;
+	return rte_eth_counters_reset(dev, offsetof(struct ring_queue, stats),
+				      offsetof(struct ring_queue, stats));
 }
 
 static void