[14/19] net/cpfl: add stats ops for representor

Message ID 20230809155134.539287-15-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series net/cpfl: support port representor |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Xing, Beilei Aug. 9, 2023, 3:51 p.m. UTC
  From: Beilei Xing <beilei.xing@intel.com>

Support stats_get and stats_reset ops fot port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |  8 +++++
 drivers/net/cpfl/cpfl_representor.c | 54 +++++++++++++++++++++++++++++
 2 files changed, 62 insertions(+)
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7813b9173e..33e810408b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -171,15 +171,23 @@  struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_repr_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t errors;
+};
+
 struct cpfl_repr_rx_queue {
 	struct cpfl_repr *repr;
 	struct rte_mempool *mb_pool;
 	struct rte_ring *rx_ring;
+	struct cpfl_repr_stats stats; /* Statistics */
 };
 
 struct cpfl_repr_tx_queue {
 	struct cpfl_repr *repr;
 	struct cpfl_tx_queue *txq;
+	struct cpfl_repr_stats stats; /* Statistics */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 862464602f..79cb7f76d4 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -425,6 +425,58 @@  cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+idpf_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct cpfl_repr_tx_queue *txq;
+	struct cpfl_repr_rx_queue *rxq;
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+		stats->opackets += __atomic_load_n(&txq->stats.packets, __ATOMIC_RELAXED);
+		stats->obytes += __atomic_load_n(&txq->stats.bytes, __ATOMIC_RELAXED);
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq)
+			continue;
+		stats->ipackets += __atomic_load_n(&rxq->stats.packets, __ATOMIC_RELAXED);
+		stats->ibytes += __atomic_load_n(&rxq->stats.bytes, __ATOMIC_RELAXED);
+		stats->ierrors += __atomic_load_n(&rxq->stats.errors, __ATOMIC_RELAXED);
+	}
+	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+	return 0;
+}
+
+static int
+idpf_repr_stats_reset(struct rte_eth_dev *dev)
+{
+	struct cpfl_repr_tx_queue *txq;
+	struct cpfl_repr_rx_queue *rxq;
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+		__atomic_store_n(&txq->stats.packets, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&txq->stats.bytes, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&txq->stats.errors, 0, __ATOMIC_RELAXED);
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq)
+			continue;
+		__atomic_store_n(&rxq->stats.packets, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&rxq->stats.bytes, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&rxq->stats.errors, 0, __ATOMIC_RELAXED);
+	}
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -435,6 +487,8 @@  static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 	.link_update		= cpfl_repr_link_update,
+	.stats_get		= idpf_repr_stats_get,
+	.stats_reset		= idpf_repr_stats_reset,
 };
 
 static int