[1/2] net/bnxt: use compiler atomics for stats
Checks
Commit Message
Converted rte_atomic usages to compiler atomic built-ins.
Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Kathleen Capella <kathleen.capella@arm.com>
---
drivers/net/bnxt/bnxt_rxq.c | 2 +-
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 9 +++++----
drivers/net/bnxt/bnxt_stats.c | 4 ++--
4 files changed, 9 insertions(+), 8 deletions(-)
@@ -378,7 +378,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
"ring_dma_zone_reserve for rx_ring failed!\n");
goto err;
}
- rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
+ rxq->rx_mbuf_alloc_fail = 0;
/* rxq 0 must not be stopped when used as async CPR */
if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- rte_atomic64_t rx_mbuf_alloc_fail;
+ uint64_t rx_mbuf_alloc_fail;
const struct rte_memzone *mz;
};
@@ -49,7 +49,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
return -ENOMEM;
}
@@ -84,7 +84,7 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
return -ENOMEM;
}
@@ -459,7 +459,7 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1369,7 +1369,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+ __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
+ __ATOMIC_RELAXED);
return -ENOMEM;
}
}
@@ -578,7 +578,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
+ __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -632,7 +632,7 @@ int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
- rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
+ rxq->rx_mbuf_alloc_fail = 0;
}
bnxt_clear_prev_stat(bp);