@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -189,7 +189,7 @@
evt_err("failed to allocate t->expected_flow_seq memory");
goto exp_nomem;
}
- __atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
t->err = false;
t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@
while (t->err == false) {
uint64_t new_cycles = rte_get_timer_cycles();
- int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+ int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+ rte_memory_order_relaxed);
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
@@ -48,7 +48,7 @@ struct __rte_cache_aligned test_order {
* The atomic_* is an expensive operation,Since it is a functional test,
* We are using the atomic_ operation to reduce the code complexity.
*/
- uint64_t outstand_pkts;
+ RTE_ATOMIC(uint64_t) outstand_pkts;
enum evt_test_result result;
uint32_t nb_flows;
uint64_t nb_pkts;
@@ -95,7 +95,7 @@ struct __rte_cache_aligned test_order {
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
- uint64_t *const outstand_pkts)
+ RTE_ATOMIC(uint64_t) *const outstand_pkts)
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
@@ -113,7 +113,7 @@ struct __rte_cache_aligned test_order {
*/
expected_flow_seq[flow]++;
rte_pktmbuf_free(ev->mbuf);
- __atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -132,7 +132,7 @@ struct __rte_cache_aligned test_order {
const uint8_t port = w->port_id;\
const uint32_t nb_flows = t->nb_flows;\
uint32_t *expected_flow_seq = t->expected_flow_seq;\
- uint64_t *outstand_pkts = &t->outstand_pkts;\
+ RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
if (opt->verbose_level > 1)\
printf("%s(): lcore %d dev_id %d port=%d\n",\
__func__, rte_lcore_id(), dev_id, port)
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -225,7 +225,7 @@ struct __rte_cache_aligned perf_elt {
* stored before updating the number of
* processed packets for worker lcores
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts += vec->nb_elem;
if (enable_fwd_latency) {