From: Beilei Xing <beilei.xing@intel.com>
Support stats_get and stats_reset ops fot port representor.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.h | 8 +++++
drivers/net/cpfl/cpfl_representor.c | 54 +++++++++++++++++++++++++++++
2 files changed, 62 insertions(+)
@@ -171,15 +171,23 @@ struct cpfl_repr {
bool func_up; /* If the represented function is up */
};
+struct cpfl_repr_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+};
+
struct cpfl_repr_rx_queue {
struct cpfl_repr *repr;
struct rte_mempool *mb_pool;
struct rte_ring *rx_ring;
+ struct cpfl_repr_stats stats; /* Statistics */
};
struct cpfl_repr_tx_queue {
struct cpfl_repr *repr;
struct cpfl_tx_queue *txq;
+ struct cpfl_repr_stats stats; /* Statistics */
};
struct cpfl_adapter_ext {
@@ -425,6 +425,58 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
return 0;
}
+static int
+idpf_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct cpfl_repr_tx_queue *txq;
+ struct cpfl_repr_rx_queue *rxq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ stats->opackets += __atomic_load_n(&txq->stats.packets, __ATOMIC_RELAXED);
+ stats->obytes += __atomic_load_n(&txq->stats.bytes, __ATOMIC_RELAXED);
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ stats->ipackets += __atomic_load_n(&rxq->stats.packets, __ATOMIC_RELAXED);
+ stats->ibytes += __atomic_load_n(&rxq->stats.bytes, __ATOMIC_RELAXED);
+ stats->ierrors += __atomic_load_n(&rxq->stats.errors, __ATOMIC_RELAXED);
+ }
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+ return 0;
+}
+
+static int
+idpf_repr_stats_reset(struct rte_eth_dev *dev)
+{
+ struct cpfl_repr_tx_queue *txq;
+ struct cpfl_repr_rx_queue *rxq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ __atomic_store_n(&txq->stats.packets, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&txq->stats.bytes, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&txq->stats.errors, 0, __ATOMIC_RELAXED);
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ __atomic_store_n(&rxq->stats.packets, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&rxq->stats.bytes, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&rxq->stats.errors, 0, __ATOMIC_RELAXED);
+ }
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_repr_dev_ops = {
.dev_start = cpfl_repr_dev_start,
.dev_stop = cpfl_repr_dev_stop,
@@ -435,6 +487,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
.rx_queue_setup = cpfl_repr_rx_queue_setup,
.tx_queue_setup = cpfl_repr_tx_queue_setup,
.link_update = cpfl_repr_link_update,
+ .stats_get = idpf_repr_stats_get,
+ .stats_reset = idpf_repr_stats_reset,
};
static int