@@ -554,7 +554,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
ret = validate_event(&ev);
RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
rte_pktmbuf_free(ev.mbuf);
- __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
}
return 0;
@@ -916,7 +916,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
if (seqn_list_update(seqn) == 0) {
rte_pktmbuf_free(ev.mbuf);
- __atomic_sub_fetch(total_events, 1,
+ __atomic_fetch_sub(total_events, 1,
__ATOMIC_RELAXED);
} else {
plt_err("Failed to update seqn_list");
@@ -1072,7 +1072,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
if (seqn_list_update(seqn) == 0) {
rte_pktmbuf_free(ev.mbuf);
- __atomic_sub_fetch(total_events, 1,
+ __atomic_fetch_sub(total_events, 1,
__ATOMIC_RELAXED);
} else {
plt_err("Failed to update seqn_list");
@@ -1217,7 +1217,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
if (ev.sub_event_type == MAX_STAGES) { /* last stage */
rte_pktmbuf_free(ev.mbuf);
- __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
} else {
ev.event_type = RTE_EVENT_TYPE_CPU;
ev.sub_event_type++;
@@ -1293,7 +1293,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
if (ev.queue_id == nr_queues - 1) { /* last stage */
rte_pktmbuf_free(ev.mbuf);
- __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
} else {
ev.event_type = RTE_EVENT_TYPE_CPU;
ev.queue_id++;
@@ -1338,7 +1338,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
if (ev.queue_id == nr_queues - 1) { /* Last stage */
rte_pktmbuf_free(ev.mbuf);
- __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED);
} else {
ev.event_type = RTE_EVENT_TYPE_CPU;
ev.queue_id++;
@@ -102,19 +102,19 @@
static inline void
cnxk_tim_bkt_inc_nent(struct cnxk_tim_bkt *bktp)
{
- __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
}
static inline void
cnxk_tim_bkt_add_nent_relaxed(struct cnxk_tim_bkt *bktp, uint32_t v)
{
- __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
}
static inline void
cnxk_tim_bkt_add_nent(struct cnxk_tim_bkt *bktp, uint32_t v)
{
- __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELEASE);
+ __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELEASE);
}
static inline uint64_t
@@ -50,7 +50,7 @@
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_sub_fetch(&dsw->credits_on_loan, acquired_credits,
+ __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
__ATOMIC_RELAXED);
return false;
}
@@ -77,7 +77,7 @@
port->inflight_credits = leave_credits;
- __atomic_sub_fetch(&dsw->credits_on_loan, return_credits,
+ __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
__ATOMIC_RELAXED);
DSW_LOG_DP_PORT(DEBUG, port->id,
@@ -527,7 +527,7 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_add_fetch(&dsw->ports[candidate_port_id].immigration_load,
+ __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
candidate_flow_load, __ATOMIC_RELAXED);
return true;
@@ -108,7 +108,7 @@
static inline void
timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
{
- __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+ __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
}
static inline uint32_t
@@ -121,13 +121,13 @@
static inline void
timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
{
- __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
}
static inline void
timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
{
- __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
}
static inline uint64_t