@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
return true;
}
return false;
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- uint64_t rx_mbuf_alloc_fail;
+ RTE_ATOMIC(uint64_t) rx_mbuf_alloc_fail;
uint8_t need_realloc;
const struct rte_memzone *mz;
};
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return -ENOMEM;
}
}
@@ -240,7 +240,7 @@
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
/* Use acquire fence to order loads of descriptor words. */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Reload lower 64b of descriptors to make it ordered after info3_v. */
rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
((void *)&cpr->cp_desc_ring[cons + 7],
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,