[v2,25/45] event/dsw: use rte stdatomic API
Checks
Commit Message
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/dsw/dsw_evdev.h | 6 +++---
drivers/event/dsw/dsw_event.c | 34 +++++++++++++++++-----------------
drivers/event/dsw/dsw_xstats.c | 4 ++--
3 files changed, 22 insertions(+), 22 deletions(-)
Comments
On 2024-03-21 20:17, Tyler Retzlaff wrote:
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
> drivers/event/dsw/dsw_evdev.h | 6 +++---
> drivers/event/dsw/dsw_event.c | 34 +++++++++++++++++-----------------
> drivers/event/dsw/dsw_xstats.c | 4 ++--
> 3 files changed, 22 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
> index d745c89..20431d2 100644
> --- a/drivers/event/dsw/dsw_evdev.h
> +++ b/drivers/event/dsw/dsw_evdev.h
> @@ -227,9 +227,9 @@ struct dsw_port {
> struct rte_ring *ctl_in_ring __rte_cache_aligned;
>
> /* Estimate of current port load. */
> - int16_t load __rte_cache_aligned;
> + RTE_ATOMIC(int16_t) load __rte_cache_aligned;
> /* Estimate of flows currently migrating to this port. */
> - int32_t immigration_load __rte_cache_aligned;
> + RTE_ATOMIC(int32_t) immigration_load __rte_cache_aligned;
> } __rte_cache_aligned;
>
> struct dsw_queue {
> @@ -252,7 +252,7 @@ struct dsw_evdev {
> uint8_t num_queues;
> int32_t max_inflight;
>
> - int32_t credits_on_loan __rte_cache_aligned;
> + RTE_ATOMIC(int32_t) credits_on_loan __rte_cache_aligned;
> };
>
> #define DSW_CTL_PAUS_REQ (0)
> diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
> index 23488d9..6c17b44 100644
> --- a/drivers/event/dsw/dsw_event.c
> +++ b/drivers/event/dsw/dsw_event.c
> @@ -33,7 +33,7 @@
> }
>
> total_on_loan =
> - __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
Limit lines to 80 characters, like in the rest of this file.
> available = dsw->max_inflight - total_on_loan;
> acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
>
> @@ -45,13 +45,13 @@
> * allocation.
> */
> new_total_on_loan =
> - __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
> - __ATOMIC_RELAXED) + acquired_credits;
> + rte_atomic_fetch_add_explicit(&dsw->credits_on_loan, acquired_credits,
> + rte_memory_order_relaxed) + acquired_credits;
Format left-over arguments in the same way it's done in the rest of this
file.
Several other changes below suffer from the above two issues.
Provided the formatting is fixed,
Reviewed-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>
> if (unlikely(new_total_on_loan > dsw->max_inflight)) {
> /* Some other port took the last credits */
> - __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
> - __ATOMIC_RELAXED);
> + rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, acquired_credits,
> + rte_memory_order_relaxed);
> return false;
> }
>
> @@ -77,8 +77,8 @@
>
> port->inflight_credits = leave_credits;
>
> - __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
> - __ATOMIC_RELAXED);
> + rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, return_credits,
> + rte_memory_order_relaxed);
>
> DSW_LOG_DP_PORT(DEBUG, port->id,
> "Returned %d tokens to pool.\n",
> @@ -156,19 +156,19 @@
> int16_t period_load;
> int16_t new_load;
>
> - old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
> + old_load = rte_atomic_load_explicit(&port->load, rte_memory_order_relaxed);
>
> period_load = dsw_port_load_close_period(port, now);
>
> new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
> (DSW_OLD_LOAD_WEIGHT+1);
>
> - __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
> + rte_atomic_store_explicit(&port->load, new_load, rte_memory_order_relaxed);
>
> /* The load of the recently immigrated flows should hopefully
> * be reflected the load estimate by now.
> */
> - __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
> + rte_atomic_store_explicit(&port->immigration_load, 0, rte_memory_order_relaxed);
> }
>
> static void
> @@ -390,10 +390,10 @@ struct dsw_queue_flow_burst {
>
> for (i = 0; i < dsw->num_ports; i++) {
> int16_t measured_load =
> - __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&dsw->ports[i].load, rte_memory_order_relaxed);
> int32_t immigration_load =
> - __atomic_load_n(&dsw->ports[i].immigration_load,
> - __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
> + rte_memory_order_relaxed);
> int32_t load = measured_load + immigration_load;
>
> load = RTE_MIN(load, DSW_MAX_LOAD);
> @@ -523,8 +523,8 @@ struct dsw_queue_flow_burst {
> target_qfs[*targets_len] = *candidate_qf;
> (*targets_len)++;
>
> - __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
> - candidate_flow_load, __ATOMIC_RELAXED);
> + rte_atomic_fetch_add_explicit(&dsw->ports[candidate_port_id].immigration_load,
> + candidate_flow_load, rte_memory_order_relaxed);
>
> return true;
> }
> @@ -882,7 +882,7 @@ struct dsw_queue_flow_burst {
> }
>
> source_port_load =
> - __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&source_port->load, rte_memory_order_relaxed);
> if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
> DSW_LOG_DP_PORT(DEBUG, source_port->id,
> "Load %d is below threshold level %d.\n",
> @@ -1301,7 +1301,7 @@ struct dsw_queue_flow_burst {
> * above the water mark.
> */
> if (unlikely(num_new > 0 &&
> - __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
> + rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed) >
> source_port->new_event_threshold))
> return 0;
>
> diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
> index 2a83a28..f61dfd8 100644
> --- a/drivers/event/dsw/dsw_xstats.c
> +++ b/drivers/event/dsw/dsw_xstats.c
> @@ -48,7 +48,7 @@ struct dsw_xstats_port {
> static uint64_t
> dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
> {
> - return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
> + return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
> }
>
> static struct dsw_xstat_dev dsw_dev_xstats[] = {
> @@ -126,7 +126,7 @@ struct dsw_xstats_port {
> {
> int16_t load;
>
> - load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
> + load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
>
> return DSW_LOAD_TO_PERCENT(load);
> }
@@ -227,9 +227,9 @@ struct dsw_port {
struct rte_ring *ctl_in_ring __rte_cache_aligned;
/* Estimate of current port load. */
- int16_t load __rte_cache_aligned;
+ RTE_ATOMIC(int16_t) load __rte_cache_aligned;
/* Estimate of flows currently migrating to this port. */
- int32_t immigration_load __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) immigration_load __rte_cache_aligned;
} __rte_cache_aligned;
struct dsw_queue {
@@ -252,7 +252,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
- int32_t credits_on_loan __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) credits_on_loan __rte_cache_aligned;
};
#define DSW_CTL_PAUS_REQ (0)
@@ -33,7 +33,7 @@
}
total_on_loan =
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@@ -45,13 +45,13 @@
* allocation.
*/
new_total_on_loan =
- __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED) + acquired_credits;
+ rte_atomic_fetch_add_explicit(&dsw->credits_on_loan, acquired_credits,
+ rte_memory_order_relaxed) + acquired_credits;
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, acquired_credits,
+ rte_memory_order_relaxed);
return false;
}
@@ -77,8 +77,8 @@
port->inflight_credits = leave_credits;
- __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, return_credits,
+ rte_memory_order_relaxed);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@@ -156,19 +156,19 @@
int16_t period_load;
int16_t new_load;
- old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
+ old_load = rte_atomic_load_explicit(&port->load, rte_memory_order_relaxed);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
- __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->load, new_load, rte_memory_order_relaxed);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
- __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->immigration_load, 0, rte_memory_order_relaxed);
}
static void
@@ -390,10 +390,10 @@ struct dsw_queue_flow_burst {
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load =
- __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].load, rte_memory_order_relaxed);
int32_t immigration_load =
- __atomic_load_n(&dsw->ports[i].immigration_load,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
+ rte_memory_order_relaxed);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@@ -523,8 +523,8 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
- candidate_flow_load, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load, rte_memory_order_relaxed);
return true;
}
@@ -882,7 +882,7 @@ struct dsw_queue_flow_burst {
}
source_port_load =
- __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&source_port->load, rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@@ -1301,7 +1301,7 @@ struct dsw_queue_flow_burst {
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
+ rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed) >
source_port->new_event_threshold))
return 0;
@@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
- return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@@ -126,7 +126,7 @@ struct dsw_xstats_port {
{
int16_t load;
- load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
+ load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
return DSW_LOAD_TO_PERCENT(load);
}