@@ -455,7 +455,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* Packet couldn't fit in the provided mbuf */
if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
- rxq->stats.ierrors++;
+ rte_eth_count_error(&rxq->stats);
continue;
}
@@ -467,7 +467,9 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
if (unlikely(!buf)) {
- rxq->stats.rx_nombuf++;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->in_port];
+ ++dev->data->rx_mbuf_alloc_failed;
+
/* No new buf has been allocated: do nothing */
if (!new_tail || !seg)
goto end;
@@ -512,8 +514,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
num_rx_bytes += mbuf->pkt_len;
}
end:
- rxq->stats.ipackets += num_rx;
- rxq->stats.ibytes += num_rx_bytes;
+ rte_eth_count_packets(&rxq->stats, num_rx, num_rx_bytes);
if (trigger && num_rx < nb_pkts)
rxq->trigger_seen = trigger;
@@ -693,7 +694,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
tso_segsz = mbuf_in->tso_segsz + hdrs_len;
if (unlikely(tso_segsz == hdrs_len) ||
tso_segsz > *txq->mtu) {
- txq->stats.errs++;
+ rte_eth_count_error(&txq->stats);
break;
}
gso_ctx->gso_size = tso_segsz;
@@ -731,7 +732,8 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ret = tap_write_mbufs(txq, num_mbufs, mbuf,
&num_packets, &num_tx_bytes);
if (ret == -1) {
- txq->stats.errs++;
+ rte_eth_count_error(&txq->stats);
+
/* free tso mbufs */
if (num_tso_mbufs > 0)
rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
@@ -749,9 +751,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
- txq->stats.opackets += num_packets;
- txq->stats.errs += nb_pkts - num_tx;
- txq->stats.obytes += num_tx_bytes;
+ rte_eth_count_packets(&txq->stats, num_packets, num_tx_bytes);
return num_tx;
}
@@ -1055,64 +1055,15 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
{
- unsigned int i, imax;
- unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
- unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
- unsigned long rx_nombuf = 0, ierrors = 0;
- const struct pmd_internals *pmd = dev->data->dev_private;
-
- /* rx queue statistics */
- imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
- dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
- for (i = 0; i < imax; i++) {
- tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
- tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
- rx_total += tap_stats->q_ipackets[i];
- rx_bytes_total += tap_stats->q_ibytes[i];
- rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
- ierrors += pmd->rxq[i].stats.ierrors;
- }
-
- /* tx queue statistics */
- imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
- dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
-
- for (i = 0; i < imax; i++) {
- tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
- tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
- tx_total += tap_stats->q_opackets[i];
- tx_err_total += pmd->txq[i].stats.errs;
- tx_bytes_total += tap_stats->q_obytes[i];
- }
-
- tap_stats->ipackets = rx_total;
- tap_stats->ibytes = rx_bytes_total;
- tap_stats->ierrors = ierrors;
- tap_stats->rx_nombuf = rx_nombuf;
- tap_stats->opackets = tx_total;
- tap_stats->oerrors = tx_err_total;
- tap_stats->obytes = tx_bytes_total;
- return 0;
+ return rte_eth_counters_stats_get(dev, offsetof(struct tx_queue, stats),
+ offsetof(struct rx_queue, stats), tap_stats);
}
static int
tap_stats_reset(struct rte_eth_dev *dev)
{
- int i;
- struct pmd_internals *pmd = dev->data->dev_private;
-
- for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
- pmd->rxq[i].stats.ipackets = 0;
- pmd->rxq[i].stats.ibytes = 0;
- pmd->rxq[i].stats.ierrors = 0;
- pmd->rxq[i].stats.rx_nombuf = 0;
-
- pmd->txq[i].stats.opackets = 0;
- pmd->txq[i].stats.errs = 0;
- pmd->txq[i].stats.obytes = 0;
- }
-
- return 0;
+ return rte_eth_counters_reset(dev, offsetof(struct tx_queue, stats),
+ offsetof(struct rx_queue, stats));
}
static int
@@ -14,6 +14,7 @@
#include <linux/if_tun.h>
#include <ethdev_driver.h>
+#include <ethdev_swstats.h>
#include <rte_ether.h>
#include <rte_gso.h>
#include "tap_log.h"
@@ -32,23 +33,13 @@ enum rte_tuntap_type {
ETH_TUNTAP_TYPE_MAX,
};
-struct pkt_stats {
- uint64_t opackets; /* Number of output packets */
- uint64_t ipackets; /* Number of input packets */
- uint64_t obytes; /* Number of bytes on output */
- uint64_t ibytes; /* Number of bytes on input */
- uint64_t errs; /* Number of TX error packets */
- uint64_t ierrors; /* Number of RX error packets */
- uint64_t rx_nombuf; /* Nb of RX mbuf alloc failures */
-};
-
struct rx_queue {
struct rte_mempool *mp; /* Mempool for RX packets */
uint32_t trigger_seen; /* Last seen Rx trigger value */
uint16_t in_port; /* Port ID */
uint16_t queue_id; /* queue ID*/
- struct pkt_stats stats; /* Stats for this RX queue */
uint16_t nb_rx_desc; /* max number of mbufs available */
+ struct rte_eth_counters stats; /* Stats for this RX queue */
struct rte_eth_rxmode *rxmode; /* RX features */
struct rte_mbuf *pool; /* mbufs pool for this queue */
struct iovec (*iovecs)[]; /* descriptors for this queue */
@@ -59,7 +50,7 @@ struct tx_queue {
int type; /* Type field - TUN|TAP */
uint16_t *mtu; /* Pointer to MTU from dev_data */
uint16_t csum:1; /* Enable checksum offloading */
- struct pkt_stats stats; /* Stats for this TX queue */
+ struct rte_eth_counters stats; /* Stats for this TX queue */
struct rte_gso_ctx gso_ctx; /* GSO context */
uint16_t out_port; /* Port ID */
uint16_t queue_id; /* queue ID*/