drivers: use stdatomic API

Message ID 1699515243-29413-1-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Rejected, archived
Delegated to: Raslan Darawsheh
Headers
Series drivers: use stdatomic API |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/Intel-compilation warning apply issues

Commit Message

Tyler Retzlaff Nov. 9, 2023, 7:34 a.m. UTC
  Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 drivers/common/mlx5/linux/mlx5_nl.c     |   5 +-
 drivers/common/mlx5/mlx5_common.h       |   2 +-
 drivers/common/mlx5/mlx5_common_mr.c    |  16 ++--
 drivers/common/mlx5/mlx5_common_mr.h    |   2 +-
 drivers/common/mlx5/mlx5_common_utils.c |  32 ++++----
 drivers/common/mlx5/mlx5_common_utils.h |   6 +-
 drivers/common/mlx5/mlx5_malloc.c       |  58 +++++++--------
 drivers/net/mlx5/linux/mlx5_ethdev_os.c |   6 +-
 drivers/net/mlx5/linux/mlx5_verbs.c     |   9 ++-
 drivers/net/mlx5/mlx5.c                 |   9 ++-
 drivers/net/mlx5/mlx5.h                 |  66 ++++++++---------
 drivers/net/mlx5/mlx5_flow.c            |  37 +++++-----
 drivers/net/mlx5/mlx5_flow.h            |  12 +--
 drivers/net/mlx5/mlx5_flow_aso.c        |  35 +++++----
 drivers/net/mlx5/mlx5_flow_dv.c         | 126 ++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow_flex.c       |  14 ++--
 drivers/net/mlx5/mlx5_flow_hw.c         |  73 +++++++++---------
 drivers/net/mlx5/mlx5_flow_meter.c      |  30 ++++----
 drivers/net/mlx5/mlx5_flow_quota.c      |  32 ++++----
 drivers/net/mlx5/mlx5_hws_cnt.c         |  71 +++++++++---------
 drivers/net/mlx5/mlx5_hws_cnt.h         |  10 +--
 drivers/net/mlx5/mlx5_rx.h              |  14 ++--
 drivers/net/mlx5/mlx5_rxq.c             |  30 ++++----
 drivers/net/mlx5/mlx5_trigger.c         |   2 +-
 drivers/net/mlx5/mlx5_tx.h              |  18 ++---
 drivers/net/mlx5/mlx5_txpp.c            |  84 ++++++++++-----------
 drivers/net/mlx5/mlx5_txq.c             |  12 +--
 drivers/net/mlx5/mlx5_utils.c           |  10 +--
 drivers/net/mlx5/mlx5_utils.h           |   4 +-
 drivers/vdpa/mlx5/mlx5_vdpa.c           |  24 +++---
 drivers/vdpa/mlx5/mlx5_vdpa.h           |  14 ++--
 drivers/vdpa/mlx5/mlx5_vdpa_cthread.c   |  46 ++++++------
 drivers/vdpa/mlx5/mlx5_vdpa_lm.c        |   4 +-
 drivers/vdpa/mlx5/mlx5_vdpa_mem.c       |   4 +-
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c     |   4 +-
 35 files changed, 468 insertions(+), 453 deletions(-)
  

Comments

Tyler Retzlaff March 6, 2024, 7:48 p.m. UTC | #1
withdrawing this series.

i will submit a new series that tries to address atomics conversion in
all drivers.

On Wed, Nov 08, 2023 at 11:34:03PM -0800, Tyler Retzlaff wrote:
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional stdatomic API.
> 
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
>  drivers/common/mlx5/linux/mlx5_nl.c     |   5 +-
>  drivers/common/mlx5/mlx5_common.h       |   2 +-
>  drivers/common/mlx5/mlx5_common_mr.c    |  16 ++--
>  drivers/common/mlx5/mlx5_common_mr.h    |   2 +-
>  drivers/common/mlx5/mlx5_common_utils.c |  32 ++++----
>  drivers/common/mlx5/mlx5_common_utils.h |   6 +-
>  drivers/common/mlx5/mlx5_malloc.c       |  58 +++++++--------
>  drivers/net/mlx5/linux/mlx5_ethdev_os.c |   6 +-
>  drivers/net/mlx5/linux/mlx5_verbs.c     |   9 ++-
>  drivers/net/mlx5/mlx5.c                 |   9 ++-
>  drivers/net/mlx5/mlx5.h                 |  66 ++++++++---------
>  drivers/net/mlx5/mlx5_flow.c            |  37 +++++-----
>  drivers/net/mlx5/mlx5_flow.h            |  12 +--
>  drivers/net/mlx5/mlx5_flow_aso.c        |  35 +++++----
>  drivers/net/mlx5/mlx5_flow_dv.c         | 126 ++++++++++++++++----------------
>  drivers/net/mlx5/mlx5_flow_flex.c       |  14 ++--
>  drivers/net/mlx5/mlx5_flow_hw.c         |  73 +++++++++---------
>  drivers/net/mlx5/mlx5_flow_meter.c      |  30 ++++----
>  drivers/net/mlx5/mlx5_flow_quota.c      |  32 ++++----
>  drivers/net/mlx5/mlx5_hws_cnt.c         |  71 +++++++++---------
>  drivers/net/mlx5/mlx5_hws_cnt.h         |  10 +--
>  drivers/net/mlx5/mlx5_rx.h              |  14 ++--
>  drivers/net/mlx5/mlx5_rxq.c             |  30 ++++----
>  drivers/net/mlx5/mlx5_trigger.c         |   2 +-
>  drivers/net/mlx5/mlx5_tx.h              |  18 ++---
>  drivers/net/mlx5/mlx5_txpp.c            |  84 ++++++++++-----------
>  drivers/net/mlx5/mlx5_txq.c             |  12 +--
>  drivers/net/mlx5/mlx5_utils.c           |  10 +--
>  drivers/net/mlx5/mlx5_utils.h           |   4 +-
>  drivers/vdpa/mlx5/mlx5_vdpa.c           |  24 +++---
>  drivers/vdpa/mlx5/mlx5_vdpa.h           |  14 ++--
>  drivers/vdpa/mlx5/mlx5_vdpa_cthread.c   |  46 ++++++------
>  drivers/vdpa/mlx5/mlx5_vdpa_lm.c        |   4 +-
>  drivers/vdpa/mlx5/mlx5_vdpa_mem.c       |   4 +-
>  drivers/vdpa/mlx5/mlx5_vdpa_virtq.c     |   4 +-
>  35 files changed, 468 insertions(+), 453 deletions(-)
> 
> diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
> index 28a1f56..2fcd859 100644
> --- a/drivers/common/mlx5/linux/mlx5_nl.c
> +++ b/drivers/common/mlx5/linux/mlx5_nl.c
> @@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
>  	uint16_t state; /**< IB device port state (out). */
>  };
>  
> -uint32_t atomic_sn;
> +RTE_ATOMIC(uint32_t) atomic_sn;
>  
>  /* Generate Netlink sequence number. */
> -#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
> +#define MLX5_NL_SN_GENERATE \
> +	(rte_atomic_fetch_add_explicit(&atomic_sn, 1, rte_memory_order_relaxed) + 1)
>  
>  /**
>   * Opens a Netlink socket.
> diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
> index 9c80277..14c70ed 100644
> --- a/drivers/common/mlx5/mlx5_common.h
> +++ b/drivers/common/mlx5/mlx5_common.h
> @@ -195,7 +195,7 @@ enum mlx5_cqe_status {
>  	/* Prevent speculative reading of other fields in CQE until
>  	 * CQE is valid.
>  	 */
> -	rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
> +	rte_atomic_thread_fence(rte_memory_order_acquire);
>  
>  	if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
>  		     op_code == MLX5_CQE_REQ_ERR))
> diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
> index 40ff915..72e36ed 100644
> --- a/drivers/common/mlx5/mlx5_common_mr.c
> +++ b/drivers/common/mlx5/mlx5_common_mr.c
> @@ -35,7 +35,7 @@ struct mlx5_range {
>  /** Memory region for a mempool. */
>  struct mlx5_mempool_mr {
>  	struct mlx5_pmd_mr pmd_mr;
> -	uint32_t refcnt; /**< Number of mempools sharing this MR. */
> +	RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
>  };
>  
>  /* Mempool registration. */
> @@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
>  {
>  	struct mlx5_mprq_buf *buf = opaque;
>  
> -	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
> +	if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
>  		rte_mempool_put(buf->mp, buf);
> -	} else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
> -					       __ATOMIC_RELAXED) - 1 == 0)) {
> -		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> +	} else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
> +					       rte_memory_order_relaxed) - 1 == 0)) {
> +		rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
>  		rte_mempool_put(buf->mp, buf);
>  	}
>  }
> @@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
>  	unsigned int i;
>  
>  	for (i = 0; i < mpr->mrs_n; i++)
> -		__atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
>  }
>  
>  /**
> @@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
>  	bool ret = false;
>  
>  	for (i = 0; i < mpr->mrs_n; i++)
> -		ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
> -					  __ATOMIC_RELAXED) - 1 == 0;
> +		ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
> +					  rte_memory_order_relaxed) - 1 == 0;
>  	return ret;
>  }
>  
> diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
> index 8789d40..5bdf48a 100644
> --- a/drivers/common/mlx5/mlx5_common_mr.h
> +++ b/drivers/common/mlx5/mlx5_common_mr.h
> @@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
>  /* Multi-Packet RQ buffer header. */
>  struct mlx5_mprq_buf {
>  	struct rte_mempool *mp;
> -	uint16_t refcnt; /* Atomically accessed refcnt. */
> +	RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
>  	struct rte_mbuf_ext_shared_info shinfos[];
>  	/*
>  	 * Shared information per stride.
> diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
> index e69d068..4b95d35 100644
> --- a/drivers/common/mlx5/mlx5_common_utils.c
> +++ b/drivers/common/mlx5/mlx5_common_utils.c
> @@ -81,14 +81,14 @@ struct mlx5_list *
>  	while (entry != NULL) {
>  		if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
>  			if (reuse) {
> -				ret = __atomic_fetch_add(&entry->ref_cnt, 1,
> -							 __ATOMIC_RELAXED);
> +				ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
> +							 rte_memory_order_relaxed);
>  				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
>  					l_const->name, (void *)entry,
>  					entry->ref_cnt);
>  			} else if (lcore_index < MLX5_LIST_GLOBAL) {
> -				ret = __atomic_load_n(&entry->ref_cnt,
> -						      __ATOMIC_RELAXED);
> +				ret = rte_atomic_load_explicit(&entry->ref_cnt,
> +						      rte_memory_order_relaxed);
>  			}
>  			if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
>  				return entry;
> @@ -151,13 +151,13 @@ struct mlx5_list_entry *
>  {
>  	struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
>  	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
> -	uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
> -					       __ATOMIC_RELAXED);
> +	uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
> +					       rte_memory_order_relaxed);
>  
>  	while (inv_cnt != 0 && entry != NULL) {
>  		struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
>  
> -		if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
> +		if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
>  			LIST_REMOVE(entry, next);
>  			if (l_const->lcores_share)
>  				l_const->cb_clone_free(l_const->ctx, entry);
> @@ -217,7 +217,7 @@ struct mlx5_list_entry *
>  		entry->lcore_idx = (uint32_t)lcore_index;
>  		LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
>  				 entry, next);
> -		__atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
>  		DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
>  			l_const->name, lcore_index,
>  			(void *)entry, entry->ref_cnt);
> @@ -254,7 +254,7 @@ struct mlx5_list_entry *
>  	l_inconst->gen_cnt++;
>  	rte_rwlock_write_unlock(&l_inconst->lock);
>  	LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
> -	__atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
>  	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
>  		(void *)entry, entry->ref_cnt);
>  	return local_entry;
> @@ -285,7 +285,7 @@ struct mlx5_list_entry *
>  {
>  	struct mlx5_list_entry *gentry = entry->gentry;
>  
> -	if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
> +	if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
>  		return 1;
>  	if (entry->lcore_idx == (uint32_t)lcore_idx) {
>  		LIST_REMOVE(entry, next);
> @@ -294,23 +294,23 @@ struct mlx5_list_entry *
>  		else
>  			l_const->cb_remove(l_const->ctx, entry);
>  	} else {
> -		__atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
> +				   1, rte_memory_order_relaxed);
>  	}
>  	if (!l_const->lcores_share) {
> -		__atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
>  		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
>  			l_const->name, (void *)entry);
>  		return 0;
>  	}
> -	if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
> +	if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
>  		return 1;
>  	rte_rwlock_write_lock(&l_inconst->lock);
>  	if (likely(gentry->ref_cnt == 0)) {
>  		LIST_REMOVE(gentry, next);
>  		rte_rwlock_write_unlock(&l_inconst->lock);
>  		l_const->cb_remove(l_const->ctx, gentry);
> -		__atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
>  		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
>  			l_const->name, (void *)gentry);
>  		return 0;
> @@ -377,7 +377,7 @@ struct mlx5_list_entry *
>  mlx5_list_get_entry_num(struct mlx5_list *list)
>  {
>  	MLX5_ASSERT(list);
> -	return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
> +	return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
>  }
>  
>  /********************* Hash List **********************/
> diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
> index ae15119..cb4d104 100644
> --- a/drivers/common/mlx5/mlx5_common_utils.h
> +++ b/drivers/common/mlx5/mlx5_common_utils.h
> @@ -29,7 +29,7 @@
>   */
>  struct mlx5_list_entry {
>  	LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
> -	uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
> +	RTE_ATOMIC(uint32_t) ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
>  	uint32_t lcore_idx;
>  	union {
>  		struct mlx5_list_entry *gentry;
> @@ -39,7 +39,7 @@ struct mlx5_list_entry {
>  
>  struct mlx5_list_cache {
>  	LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
> -	uint32_t inv_cnt; /* Invalid entries counter. */
> +	RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
>  } __rte_cache_aligned;
>  
>  /**
> @@ -111,7 +111,7 @@ struct mlx5_list_const {
>  struct mlx5_list_inconst {
>  	rte_rwlock_t lock; /* read/write lock. */
>  	volatile uint32_t gen_cnt; /* List modification may update it. */
> -	volatile uint32_t count; /* number of entries in list. */
> +	volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
>  	struct mlx5_list_cache *cache[MLX5_LIST_MAX];
>  	/* Lcore cache, last index is the global cache. */
>  };
> diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
> index c58c41d..ef6dabe 100644
> --- a/drivers/common/mlx5/mlx5_malloc.c
> +++ b/drivers/common/mlx5/mlx5_malloc.c
> @@ -16,7 +16,7 @@ struct mlx5_sys_mem {
>  	uint32_t init:1; /* Memory allocator initialized. */
>  	uint32_t enable:1; /* System memory select. */
>  	uint32_t reserve:30; /* Reserve. */
> -	struct rte_memseg_list *last_msl;
> +	RTE_ATOMIC(struct rte_memseg_list *) last_msl;
>  	/* last allocated rte memory memseg list. */
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
>  	uint64_t malloc_sys;
> @@ -93,14 +93,14 @@ struct mlx5_sys_mem {
>  	 * different with the cached msl.
>  	 */
>  	if (addr && !mlx5_mem_check_msl(addr,
> -	    (struct rte_memseg_list *)__atomic_load_n
> -	    (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
> -		__atomic_store_n(&mlx5_sys_mem.last_msl,
> +	    (struct rte_memseg_list *)rte_atomic_load_explicit
> +	    (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
> +		rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
>  			rte_mem_virt2memseg_list(addr),
> -			__ATOMIC_RELAXED);
> +			rte_memory_order_relaxed);
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
> -		__atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
> -				   __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
> +				   rte_memory_order_relaxed);
>  #endif
>  	}
>  }
> @@ -122,11 +122,11 @@ struct mlx5_sys_mem {
>  	 * to check if the memory belongs to rte memory.
>  	 */
>  	if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
> -	    __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
> +	    rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
>  		if (!rte_mem_virt2memseg_list(addr))
>  			return false;
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
> -		__atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
>  #endif
>  	}
>  	return true;
> @@ -185,8 +185,8 @@ struct mlx5_sys_mem {
>  		mlx5_mem_update_msl(addr);
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
>  		if (addr)
> -			__atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
> -					   __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
> +					   rte_memory_order_relaxed);
>  #endif
>  		return addr;
>  	}
> @@ -199,8 +199,8 @@ struct mlx5_sys_mem {
>  		addr = malloc(size);
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
>  	if (addr)
> -		__atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
> -				   __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
> +				   rte_memory_order_relaxed);
>  #endif
>  	return addr;
>  }
> @@ -233,8 +233,8 @@ struct mlx5_sys_mem {
>  		mlx5_mem_update_msl(new_addr);
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
>  		if (new_addr)
> -			__atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
> -					   __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
> +					   rte_memory_order_relaxed);
>  #endif
>  		return new_addr;
>  	}
> @@ -246,8 +246,8 @@ struct mlx5_sys_mem {
>  	new_addr = realloc(addr, size);
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
>  	if (new_addr)
> -		__atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
> -				   __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
> +				   rte_memory_order_relaxed);
>  #endif
>  	return new_addr;
>  }
> @@ -259,14 +259,14 @@ struct mlx5_sys_mem {
>  		return;
>  	if (!mlx5_mem_is_rte(addr)) {
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
> -		__atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
> -				   __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
> +				   rte_memory_order_relaxed);
>  #endif
>  		mlx5_os_free(addr);
>  	} else {
>  #ifdef RTE_LIBRTE_MLX5_DEBUG
> -		__atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
> -				   __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
> +				   rte_memory_order_relaxed);
>  #endif
>  		rte_free(addr);
>  	}
> @@ -280,14 +280,14 @@ struct mlx5_sys_mem {
>  		" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
>  		" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
>  		" update:%"PRIi64"",
> -		__atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
> -		__atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
> -		__atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
> -		__atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
> -		__atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
> -		__atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
> -		__atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
> -		__atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
> +		rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
> +		rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
> +		rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
> +		rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
> +		rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
> +		rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
> +		rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
> +		rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
>  #endif
>  }
>  
> diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
> index dd5a0c5..d35cf82 100644
> --- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
> +++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
> @@ -1867,9 +1867,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
>  		return -ENOTSUP;
>  	}
>  	/* Check there is no concurrent mapping in other thread. */
> -	if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
> -					 base, false,
> -					 __ATOMIC_RELAXED, __ATOMIC_RELAXED))
> +	if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
> +					 base,
> +					 rte_memory_order_relaxed, rte_memory_order_relaxed))
>  		rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
>  	return 0;
>  }
> diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
> index b54f3cc..63da8f4 100644
> --- a/drivers/net/mlx5/linux/mlx5_verbs.c
> +++ b/drivers/net/mlx5/linux/mlx5_verbs.c
> @@ -1117,7 +1117,7 @@
>  		return 0;
>  	}
>  	/* Only need to check refcnt, 0 after "sh" is allocated. */
> -	if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
> +	if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
>  		MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
>  		priv->lb_used = 1;
>  		return 0;
> @@ -1163,7 +1163,7 @@
>  		claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
>  		sh->self_lb.ibv_cq = NULL;
>  	}
> -	__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
>  	return -rte_errno;
>  #else
>  	RTE_SET_USED(dev);
> @@ -1186,8 +1186,9 @@
>  
>  	if (!priv->lb_used)
>  		return;
> -	MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
> -	if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
> +	MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
> +	if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
> +	    rte_memory_order_relaxed) - 1)) {
>  		if (sh->self_lb.qp) {
>  			claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
>  			sh->self_lb.qp = NULL;
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
> index 2cf21a1..03944e3 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -854,8 +854,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
>  		ct_pool = mng->pools[idx];
>  		for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
>  			ct = &ct_pool->actions[i];
> -			val = __atomic_fetch_sub(&ct->refcnt, 1,
> -						 __ATOMIC_RELAXED);
> +			val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
> +						 rte_memory_order_relaxed);
>  			MLX5_ASSERT(val == 1);
>  			if (val > 1)
>  				cnt++;
> @@ -1081,7 +1081,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
>  		DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
>  		return -ENOTSUP;
>  	}
> -	if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
> +	if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
> +	    rte_memory_order_relaxed) + 1 > 1)
>  		return 0;
>  	priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
>  			sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
> @@ -1172,7 +1173,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
>  	struct mlx5_priv *priv = dev->data->dev_private;
>  	struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
>  
> -	if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
> +	if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
>  		return;
>  	mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
>  	mlx5_free(fp->flex.devx_fp);
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
> index ee13ad6..d76e4b3 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -370,7 +370,7 @@ struct mlx5_drop {
>  struct mlx5_lb_ctx {
>  	struct ibv_qp *qp; /* QP object. */
>  	void *ibv_cq; /* Completion queue. */
> -	uint16_t refcnt; /* Reference count for representors. */
> +	RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
>  };
>  
>  /* HW steering queue job descriptor type. */
> @@ -473,10 +473,10 @@ enum mlx5_counter_type {
>  
>  /* Counter age parameter. */
>  struct mlx5_age_param {
> -	uint16_t state; /**< Age state (atomically accessed). */
> +	RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
>  	uint16_t port_id; /**< Port id of the counter. */
>  	uint32_t timeout:24; /**< Aging timeout in seconds. */
> -	uint32_t sec_since_last_hit;
> +	RTE_ATOMIC(uint32_t) sec_since_last_hit;
>  	/**< Time in seconds since last hit (atomically accessed). */
>  	void *context; /**< Flow counter age context. */
>  };
> @@ -489,7 +489,7 @@ struct flow_counter_stats {
>  /* Shared counters information for counters. */
>  struct mlx5_flow_counter_shared {
>  	union {
> -		uint32_t refcnt; /* Only for shared action management. */
> +		RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
>  		uint32_t id; /* User counter ID for legacy sharing. */
>  	};
>  };
> @@ -580,7 +580,7 @@ struct mlx5_counter_stats_raw {
>  
>  /* Counter global management structure. */
>  struct mlx5_flow_counter_mng {
> -	volatile uint16_t n_valid; /* Number of valid pools. */
> +	volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
>  	uint16_t last_pool_idx; /* Last used pool index */
>  	int min_id; /* The minimum counter ID in the pools. */
>  	int max_id; /* The maximum counter ID in the pools. */
> @@ -646,7 +646,7 @@ struct mlx5_aso_sq {
>  struct mlx5_aso_age_action {
>  	LIST_ENTRY(mlx5_aso_age_action) next;
>  	void *dr_action;
> -	uint32_t refcnt;
> +	RTE_ATOMIC(uint32_t) refcnt;
>  	/* Following fields relevant only when action is active. */
>  	uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
>  	struct mlx5_age_param age_params;
> @@ -680,7 +680,7 @@ struct mlx5_geneve_tlv_option_resource {
>  	rte_be16_t option_class; /* geneve tlv opt class.*/
>  	uint8_t option_type; /* geneve tlv opt type.*/
>  	uint8_t length; /* geneve tlv opt length. */
> -	uint32_t refcnt; /* geneve tlv object reference counter */
> +	RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
>  };
>  
>  
> @@ -895,7 +895,7 @@ struct mlx5_flow_meter_policy {
>  	uint16_t group;
>  	/* The group. */
>  	rte_spinlock_t sl;
> -	uint32_t ref_cnt;
> +	RTE_ATOMIC(uint32_t) ref_cnt;
>  	/* Use count. */
>  	struct rte_flow_pattern_template *hws_item_templ;
>  	/* Hardware steering item templates. */
> @@ -1030,7 +1030,7 @@ struct mlx5_flow_meter_profile {
>  		struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
>  		/**< srtcm_rfc2697 struct. */
>  	};
> -	uint32_t ref_cnt; /**< Use count. */
> +	RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
>  	uint32_t g_support:1; /**< If G color will be generated. */
>  	uint32_t y_support:1; /**< If Y color will be generated. */
>  	uint32_t initialized:1; /**< Initialized. */
> @@ -1070,7 +1070,7 @@ struct mlx5_aso_mtr {
>  	enum mlx5_aso_mtr_type type;
>  	struct mlx5_flow_meter_info fm;
>  	/**< Pointer to the next aso flow meter structure. */
> -	uint8_t state; /**< ASO flow meter state. */
> +	RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
>  	uint32_t offset;
>  	enum rte_color init_color;
>  };
> @@ -1116,7 +1116,7 @@ struct mlx5_flow_mtr_mng {
>  	/* Default policy table. */
>  	uint32_t def_policy_id;
>  	/* Default policy id. */
> -	uint32_t def_policy_ref_cnt;
> +	RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
>  	/** def_policy meter use count. */
>  	struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
>  	/* Meter drop table. */
> @@ -1189,8 +1189,8 @@ struct mlx5_txpp_wq {
>  
>  /* Tx packet pacing internal timestamp. */
>  struct mlx5_txpp_ts {
> -	uint64_t ci_ts;
> -	uint64_t ts;
> +	RTE_ATOMIC(uint64_t) ci_ts;
> +	RTE_ATOMIC(uint64_t) ts;
>  };
>  
>  /* Tx packet pacing structure. */
> @@ -1213,12 +1213,12 @@ struct mlx5_dev_txpp {
>  	struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
>  	uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
>  	/* Statistics counters. */
> -	uint64_t err_miss_int; /* Missed service interrupt. */
> -	uint64_t err_rearm_queue; /* Rearm Queue errors. */
> -	uint64_t err_clock_queue; /* Clock Queue errors. */
> -	uint64_t err_ts_past; /* Timestamp in the past. */
> -	uint64_t err_ts_future; /* Timestamp in the distant future. */
> -	uint64_t err_ts_order; /* Timestamp not in ascending order. */
> +	RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
> +	RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
> +	RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
> +	RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
> +	RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
> +	RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
>  };
>  
>  /* Sample ID information of eCPRI flex parser structure. */
> @@ -1279,16 +1279,16 @@ struct mlx5_aso_ct_action {
>  	void *dr_action_orig;
>  	/* General action object for reply dir. */
>  	void *dr_action_rply;
> -	uint32_t refcnt; /* Action used count in device flows. */
> +	RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
>  	uint16_t offset; /* Offset of ASO CT in DevX objects bulk. */
>  	uint16_t peer; /* The only peer port index could also use this CT. */
> -	enum mlx5_aso_ct_state state; /* ASO CT state. */
> +	RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
>  	bool is_original; /* The direction of the DR action to be used. */
>  };
>  
>  /* CT action object state update. */
>  #define MLX5_ASO_CT_UPDATE_STATE(c, s) \
> -	__atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
> +	rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
>  
>  #ifdef PEDANTIC
>  #pragma GCC diagnostic ignored "-Wpedantic"
> @@ -1362,7 +1362,7 @@ struct mlx5_flex_pattern_field {
>  /* Port flex item context. */
>  struct mlx5_flex_item {
>  	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
> -	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
> +	RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
>  	enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
>  	uint32_t mapnum; /* Number of pattern translation entries. */
>  	struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
> @@ -1375,7 +1375,7 @@ struct mlx5_flex_item {
>  #define MLX5_SRV6_SAMPLE_NUM 5
>  /* Mlx5 internal flex parser profile structure. */
>  struct mlx5_internal_flex_parser_profile {
> -	uint32_t refcnt;
> +	RTE_ATOMIC(uint32_t) refcnt;
>  	struct mlx5_flex_item flex; /* Hold map info for modify field. */
>  };
>  
> @@ -1474,9 +1474,9 @@ struct mlx5_dev_ctx_shared {
>  #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
>  	struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
>  #endif
> -	struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
> -	struct mlx5_hlist *modify_cmds;
> -	struct mlx5_hlist *tag_table;
> +	RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
> +	RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
> +	RTE_ATOMIC(struct mlx5_hlist *) tag_table;
>  	struct mlx5_list *port_id_action_list; /* Port ID action list. */
>  	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
>  	struct mlx5_list *sample_action_list; /* List of sample actions. */
> @@ -1487,7 +1487,7 @@ struct mlx5_dev_ctx_shared {
>  	/* SW steering counters management structure. */
>  	void *default_miss_action; /* Default miss action. */
>  	struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
> -	struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
> +	RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
>  	/* Shared interrupt handler section. */
>  	struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
>  	struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
> @@ -1530,7 +1530,7 @@ struct mlx5_dev_ctx_shared {
>   * Caution, secondary process may rebuild the struct during port start.
>   */
>  struct mlx5_proc_priv {
> -	void *hca_bar;
> +	RTE_ATOMIC(void *) hca_bar;
>  	/* Mapped HCA PCI BAR area. */
>  	size_t uar_table_sz;
>  	/* Size of UAR register table. */
> @@ -1595,7 +1595,7 @@ struct mlx5_rxq_obj {
>  /* Indirection table. */
>  struct mlx5_ind_table_obj {
>  	LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
> -	uint32_t refcnt; /* Reference counter. */
> +	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
>  	union {
>  		void *ind_table; /**< Indirection table. */
>  		struct mlx5_devx_obj *rqt; /* DevX RQT object. */
> @@ -1746,7 +1746,7 @@ enum mlx5_quota_state {
>  };
>  
>  struct mlx5_quota {
> -	uint8_t state; /* object state */
> +	RTE_ATOMIC(uint8_t) state; /* object state */
>  	uint8_t mode;  /* metering mode */
>  	/**
>  	 * Keep track of application update types.
> @@ -1877,7 +1877,7 @@ struct mlx5_priv {
>  	uint32_t flex_item_map; /* Map of allocated flex item elements. */
>  	uint32_t nb_queue; /* HW steering queue number. */
>  	struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
> -	uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
> +	RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
>  	struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
>  	struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
>  #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
> @@ -1917,7 +1917,7 @@ struct mlx5_priv {
>  	/**< HW steering templates used to create control flow rules. */
>  #endif
>  	struct rte_eth_dev *shared_host; /* Host device for HW steering. */
> -	uint16_t shared_refcnt; /* HW steering host reference counter. */
> +	RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
>  };
>  
>  #define PORT_ID(priv) ((priv)->dev_data->port_id)
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 85e8c77..08b595a 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -4716,8 +4716,8 @@ struct mlx5_translated_action_handle {
>  			shared_rss = mlx5_ipool_get
>  				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
>  									   idx);
> -			__atomic_fetch_add(&shared_rss->refcnt, 1,
> -					   __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
> +					   rte_memory_order_relaxed);
>  			return idx;
>  		default:
>  			break;
> @@ -7533,7 +7533,7 @@ struct mlx5_list_entry *
>  	if (tunnel) {
>  		flow->tunnel = 1;
>  		flow->tunnel_id = tunnel->tunnel_id;
> -		__atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
>  		mlx5_free(default_miss_ctx.queue);
>  	}
>  	mlx5_flow_pop_thread_workspace();
> @@ -7544,10 +7544,10 @@ struct mlx5_list_entry *
>  	flow_mreg_del_copy_action(dev, flow);
>  	flow_drv_destroy(dev, flow);
>  	if (rss_desc->shared_rss)
> -		__atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
> +		rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
>  			mlx5_ipool_get
>  			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
> -			rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
> +			rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
>  	mlx5_ipool_free(priv->flows[type], idx);
>  	rte_errno = ret; /* Restore rte_errno. */
>  	ret = rte_errno;
> @@ -8050,7 +8050,8 @@ struct rte_flow *
>  
>  		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
>  		RTE_VERIFY(tunnel);
> -		if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
> +		if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
> +		    rte_memory_order_relaxed) - 1))
>  			mlx5_flow_tunnel_free(dev, tunnel);
>  	}
>  	flow_mreg_del_copy_action(dev, flow);
> @@ -9948,7 +9949,7 @@ struct mlx5_flow_workspace*
>  {
>  	uint32_t pools_n, us;
>  
> -	pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
> +	pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
>  	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
>  	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
>  	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
> @@ -10050,17 +10051,17 @@ struct mlx5_flow_workspace*
>  	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
>  		cnt = MLX5_POOL_GET_CNT(pool, i);
>  		age_param = MLX5_CNT_TO_AGE(cnt);
> -		if (__atomic_load_n(&age_param->state,
> -				    __ATOMIC_RELAXED) != AGE_CANDIDATE)
> +		if (rte_atomic_load_explicit(&age_param->state,
> +				    rte_memory_order_relaxed) != AGE_CANDIDATE)
>  			continue;
>  		if (cur->data[i].hits != prev->data[i].hits) {
> -			__atomic_store_n(&age_param->sec_since_last_hit, 0,
> -					 __ATOMIC_RELAXED);
> +			rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
> +					 rte_memory_order_relaxed);
>  			continue;
>  		}
> -		if (__atomic_fetch_add(&age_param->sec_since_last_hit,
> +		if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
>  				       time_delta,
> -				       __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
> +				       rte_memory_order_relaxed) + time_delta <= age_param->timeout)
>  			continue;
>  		/**
>  		 * Hold the lock first, or if between the
> @@ -10071,10 +10072,10 @@ struct mlx5_flow_workspace*
>  		priv = rte_eth_devices[age_param->port_id].data->dev_private;
>  		age_info = GET_PORT_AGE_INFO(priv);
>  		rte_spinlock_lock(&age_info->aged_sl);
> -		if (__atomic_compare_exchange_n(&age_param->state, &expected,
> -						AGE_TMOUT, false,
> -						__ATOMIC_RELAXED,
> -						__ATOMIC_RELAXED)) {
> +		if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
> +						AGE_TMOUT,
> +						rte_memory_order_relaxed,
> +						rte_memory_order_relaxed)) {
>  			TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
>  			MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
>  		}
> @@ -11904,7 +11905,7 @@ struct tunnel_db_element_release_ctx {
>  {
>  	struct tunnel_db_element_release_ctx *ctx = x;
>  	ctx->ret = 0;
> -	if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
> +	if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
>  		mlx5_flow_tunnel_free(dev, tunnel);
>  }
>  
> diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 094be12..5541244 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -1018,7 +1018,7 @@ struct mlx5_flow_tunnel {
>  	LIST_ENTRY(mlx5_flow_tunnel) chain;
>  	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
>  	uint32_t tunnel_id;			/** unique tunnel ID */
> -	uint32_t refctn;
> +	RTE_ATOMIC(uint32_t) refctn;
>  	struct rte_flow_action action;
>  	struct rte_flow_item item;
>  	struct mlx5_hlist *groups;		/** tunnel groups */
> @@ -1338,7 +1338,7 @@ struct rte_flow_pattern_template {
>  	struct mlx5dr_match_template *mt; /* mlx5 match template. */
>  	uint64_t item_flags; /* Item layer flags. */
>  	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
> -	uint32_t refcnt;  /* Reference counter. */
> +	RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
>  	/*
>  	 * If true, then rule pattern should be prepended with
>  	 * represented_port pattern item.
> @@ -1368,7 +1368,7 @@ struct rte_flow_actions_template {
>  	uint16_t reformat_off; /* Offset of DR reformat action. */
>  	uint16_t mhdr_off; /* Offset of DR modify header action. */
>  	uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
> -	uint32_t refcnt; /* Reference counter. */
> +	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
>  	uint8_t flex_item; /* flex item index. */
>  };
>  
> @@ -1388,7 +1388,7 @@ struct mlx5_hw_encap_decap_action {
>  	/* Is header_reformat action shared across flows in table. */
>  	uint32_t shared:1;
>  	uint32_t multi_pattern:1;
> -	volatile uint32_t *multi_pattern_refcnt;
> +	volatile RTE_ATOMIC(uint32_t) *multi_pattern_refcnt;
>  	size_t data_size; /* Action metadata size. */
>  	uint8_t data[]; /* Action data. */
>  };
> @@ -1411,7 +1411,7 @@ struct mlx5_hw_modify_header_action {
>  	/* Is MODIFY_HEADER action shared across flows in table. */
>  	uint32_t shared:1;
>  	uint32_t multi_pattern:1;
> -	volatile uint32_t *multi_pattern_refcnt;
> +	volatile RTE_ATOMIC(uint32_t) *multi_pattern_refcnt;
>  	/* Amount of modification commands stored in the precompiled buffer. */
>  	uint32_t mhdr_cmds_num;
>  	/* Precompiled modification commands. */
> @@ -1567,7 +1567,7 @@ struct rte_flow_template_table {
>  /* Shared RSS action structure */
>  struct mlx5_shared_action_rss {
>  	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
> -	uint32_t refcnt; /**< Atomically accessed refcnt. */
> +	RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
>  	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
>  	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
>  	struct mlx5_ind_table_obj *ind_tbl;
> diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
> index 3c08da0..d1f014e 100644
> --- a/drivers/net/mlx5/mlx5_flow_aso.c
> +++ b/drivers/net/mlx5/mlx5_flow_aso.c
> @@ -619,7 +619,7 @@
>  			uint8_t *u8addr;
>  			uint8_t hit;
>  
> -			if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
> +			if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
>  					    AGE_CANDIDATE)
>  				continue;
>  			byte = 63 - (j / 8);
> @@ -627,13 +627,13 @@
>  			u8addr = (uint8_t *)addr;
>  			hit = (u8addr[byte] >> offset) & 0x1;
>  			if (hit) {
> -				__atomic_store_n(&ap->sec_since_last_hit, 0,
> -						 __ATOMIC_RELAXED);
> +				rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
> +						 rte_memory_order_relaxed);
>  			} else {
>  				struct mlx5_priv *priv;
>  
> -				__atomic_fetch_add(&ap->sec_since_last_hit,
> -						   diff, __ATOMIC_RELAXED);
> +				rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
> +						   diff, rte_memory_order_relaxed);
>  				/* If timeout passed add to aged-out list. */
>  				if (ap->sec_since_last_hit <= ap->timeout)
>  					continue;
> @@ -641,12 +641,11 @@
>  				rte_eth_devices[ap->port_id].data->dev_private;
>  				age_info = GET_PORT_AGE_INFO(priv);
>  				rte_spinlock_lock(&age_info->aged_sl);
> -				if (__atomic_compare_exchange_n(&ap->state,
> +				if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
>  								&expected,
>  								AGE_TMOUT,
> -								false,
> -							       __ATOMIC_RELAXED,
> -							    __ATOMIC_RELAXED)) {
> +							       rte_memory_order_relaxed,
> +							    rte_memory_order_relaxed)) {
>  					LIST_INSERT_HEAD(&age_info->aged_aso,
>  							 act, next);
>  					MLX5_AGE_SET(age_info,
> @@ -909,9 +908,9 @@
>  	for (i = 0; i < aso_mtrs_nums; ++i) {
>  		aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
>  		MLX5_ASSERT(aso_mtr);
> -		(void)__atomic_compare_exchange_n(&aso_mtr->state,
> +		(void)rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
>  				&exp_state, ASO_METER_READY,
> -				false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
> +				rte_memory_order_relaxed, rte_memory_order_relaxed);
>  	}
>  }
>  
> @@ -1056,12 +1055,12 @@
>  		sq = &sh->mtrmng->pools_mng.sq;
>  		need_lock = true;
>  	}
> -	state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
> +	state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
>  	if (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC)
>  		return 0;
>  	do {
>  		mlx5_aso_mtr_completion_handle(sq, need_lock);
> -		if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
> +		if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
>  					    ASO_METER_READY)
>  			return 0;
>  		/* Waiting for CQE ready. */
> @@ -1360,7 +1359,7 @@
>  	uint16_t wqe_idx;
>  	struct mlx5_aso_ct_pool *pool;
>  	enum mlx5_aso_ct_state state =
> -				__atomic_load_n(&ct->state, __ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
>  
>  	if (state == ASO_CONNTRACK_FREE) {
>  		DRV_LOG(ERR, "Fail: No context to query");
> @@ -1569,12 +1568,12 @@
>  		sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
>  	else
>  		sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
> -	if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
> +	if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
>  	    ASO_CONNTRACK_READY)
>  		return 0;
>  	do {
>  		mlx5_aso_ct_completion_handle(sh, sq, need_lock);
> -		if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
> +		if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
>  		    ASO_CONNTRACK_READY)
>  			return 0;
>  		/* Waiting for CQE ready, consider should block or sleep. */
> @@ -1740,7 +1739,7 @@
>  	bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
>  	uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
>  	enum mlx5_aso_ct_state state =
> -				__atomic_load_n(&ct->state, __ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
>  
>  	if (sh->config.dv_flow_en == 2)
>  		sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
> @@ -1756,7 +1755,7 @@
>  	}
>  	do {
>  		mlx5_aso_ct_completion_handle(sh, sq, need_lock);
> -		state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
> +		state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
>  		if (state == ASO_CONNTRACK_READY ||
>  		    state == ASO_CONNTRACK_QUERY)
>  			return 0;
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
> index 154e509..ca45cd8 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -285,7 +285,7 @@ struct field_modify_info modify_tcp[] = {
>  }
>  
>  static inline struct mlx5_hlist *
> -flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
> +flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
>  		     const char *name, uint32_t size, bool direct_key,
>  		     bool lcores_share, void *ctx,
>  		     mlx5_list_create_cb cb_create,
> @@ -299,7 +299,7 @@ struct field_modify_info modify_tcp[] = {
>  	struct mlx5_hlist *expected = NULL;
>  	char s[MLX5_NAME_SIZE];
>  
> -	hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
> +	hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
>  	if (likely(hl))
>  		return hl;
>  	snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
> @@ -313,11 +313,11 @@ struct field_modify_info modify_tcp[] = {
>  				   "cannot allocate resource memory");
>  		return NULL;
>  	}
> -	if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
> -					 __ATOMIC_SEQ_CST,
> -					 __ATOMIC_SEQ_CST)) {
> +	if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
> +					 rte_memory_order_seq_cst,
> +					 rte_memory_order_seq_cst)) {
>  		mlx5_hlist_destroy(hl);
> -		hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
> +		hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
>  	}
>  	return hl;
>  }
> @@ -5882,8 +5882,8 @@ struct mlx5_list_entry *
>  static struct mlx5_indexed_pool *
>  flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
>  {
> -	struct mlx5_indexed_pool *ipool = __atomic_load_n
> -				     (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
> +	struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
> +				     (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
>  
>  	if (!ipool) {
>  		struct mlx5_indexed_pool *expected = NULL;
> @@ -5908,13 +5908,13 @@ struct mlx5_list_entry *
>  		ipool = mlx5_ipool_create(&cfg);
>  		if (!ipool)
>  			return NULL;
> -		if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
> -						 &expected, ipool, false,
> -						 __ATOMIC_SEQ_CST,
> -						 __ATOMIC_SEQ_CST)) {
> +		if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
> +						 &expected, ipool,
> +						 rte_memory_order_seq_cst,
> +						 rte_memory_order_seq_cst)) {
>  			mlx5_ipool_destroy(ipool);
> -			ipool = __atomic_load_n(&sh->mdh_ipools[index],
> -						__ATOMIC_SEQ_CST);
> +			ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
> +						rte_memory_order_seq_cst);
>  		}
>  	}
>  	return ipool;
> @@ -6735,9 +6735,9 @@ struct mlx5_list_entry *
>  
>  	age_info = GET_PORT_AGE_INFO(priv);
>  	age_param = flow_dv_counter_idx_get_age(dev, counter);
> -	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
> -					 AGE_FREE, false, __ATOMIC_RELAXED,
> -					 __ATOMIC_RELAXED)) {
> +	if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
> +					 AGE_FREE, rte_memory_order_relaxed,
> +					 rte_memory_order_relaxed)) {
>  		/**
>  		 * We need the lock even it is age timeout,
>  		 * since counter may still in process.
> @@ -6745,7 +6745,7 @@ struct mlx5_list_entry *
>  		rte_spinlock_lock(&age_info->aged_sl);
>  		TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
>  		rte_spinlock_unlock(&age_info->aged_sl);
> -		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
>  	}
>  }
>  
> @@ -6781,8 +6781,8 @@ struct mlx5_list_entry *
>  		 * indirect action API, shared info is 1 before the reduction,
>  		 * so this condition is failed and function doesn't return here.
>  		 */
> -		if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
> -				       __ATOMIC_RELAXED) - 1)
> +		if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
> +				       rte_memory_order_relaxed) - 1)
>  			return;
>  	}
>  	cnt->pool = pool;
> @@ -9915,8 +9915,8 @@ struct mlx5_list_entry *
>  			 * Increasing refcnt only in SWS. HWS uses it as global.
>  			 */
>  			if (priv->sh->config.dv_flow_en == 1)
> -				__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
> -						   __ATOMIC_RELAXED);
> +				rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
> +						   rte_memory_order_relaxed);
>  		} else {
>  			ret = rte_flow_error_set(error, ENOMEM,
>  				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
> @@ -9951,8 +9951,8 @@ struct mlx5_list_entry *
>  		geneve_opt_resource->option_class = geneve_opt_v->option_class;
>  		geneve_opt_resource->option_type = geneve_opt_v->option_type;
>  		geneve_opt_resource->length = geneve_opt_v->option_len;
> -		__atomic_store_n(&geneve_opt_resource->refcnt, 1,
> -				__ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
> +				rte_memory_order_relaxed);
>  	}
>  exit:
>  	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
> @@ -11919,8 +11919,8 @@ struct mlx5_list_entry *
>  		(void *)(uintptr_t)(dev_flow->flow_idx);
>  	age_param->timeout = age->timeout;
>  	age_param->port_id = dev->data->port_id;
> -	__atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
> -	__atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
>  	return counter;
>  }
>  
> @@ -12968,9 +12968,9 @@ struct mlx5_list_entry *
>  	uint16_t expected = AGE_CANDIDATE;
>  
>  	age_info = GET_PORT_AGE_INFO(priv);
> -	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
> -					 AGE_FREE, false, __ATOMIC_RELAXED,
> -					 __ATOMIC_RELAXED)) {
> +	if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
> +					 AGE_FREE, rte_memory_order_relaxed,
> +					 rte_memory_order_relaxed)) {
>  		/**
>  		 * We need the lock even it is age timeout,
>  		 * since age action may still in process.
> @@ -12978,7 +12978,7 @@ struct mlx5_list_entry *
>  		rte_spinlock_lock(&age_info->aged_sl);
>  		LIST_REMOVE(age, next);
>  		rte_spinlock_unlock(&age_info->aged_sl);
> -		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
>  	}
>  }
>  
> @@ -13002,7 +13002,7 @@ struct mlx5_list_entry *
>  	struct mlx5_priv *priv = dev->data->dev_private;
>  	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
>  	struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
> -	uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
> +	uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
>  
>  	if (!ret) {
>  		flow_dv_aso_age_remove_from_age(dev, age);
> @@ -13178,7 +13178,7 @@ struct mlx5_list_entry *
>  			return 0; /* 0 is an error. */
>  		}
>  	}
> -	__atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
>  	return pool->index | ((age_free->offset + 1) << 16);
>  }
>  
> @@ -13208,10 +13208,10 @@ struct mlx5_list_entry *
>  	aso_age->age_params.context = context;
>  	aso_age->age_params.timeout = timeout;
>  	aso_age->age_params.port_id = dev->data->port_id;
> -	__atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
> -			 __ATOMIC_RELAXED);
> -	__atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
> -			 __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
> +			 rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
> +			 rte_memory_order_relaxed);
>  }
>  
>  static void
> @@ -13393,12 +13393,12 @@ struct mlx5_list_entry *
>  	uint32_t ret;
>  	struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
>  	enum mlx5_aso_ct_state state =
> -			__atomic_load_n(&ct->state, __ATOMIC_RELAXED);
> +			rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
>  
>  	/* Cannot release when CT is in the ASO SQ. */
>  	if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
>  		return -1;
> -	ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
> +	ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
>  	if (!ret) {
>  		if (ct->dr_action_orig) {
>  #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
> @@ -13588,7 +13588,7 @@ struct mlx5_list_entry *
>  	pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
>  	ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
>  	/* 0: inactive, 1: created, 2+: used by flows. */
> -	__atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
>  	reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
>  	if (!ct->dr_action_orig) {
>  #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
> @@ -14577,8 +14577,8 @@ struct mlx5_list_entry *
>  			age_act = flow_aso_age_get_by_idx(dev, owner_idx);
>  			if (flow->age == 0) {
>  				flow->age = owner_idx;
> -				__atomic_fetch_add(&age_act->refcnt, 1,
> -						   __ATOMIC_RELAXED);
> +				rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
> +						   rte_memory_order_relaxed);
>  			}
>  			age_act_pos = actions_n++;
>  			action_flags |= MLX5_FLOW_ACTION_AGE;
> @@ -14615,9 +14615,9 @@ struct mlx5_list_entry *
>  			} else {
>  				if (flow->counter == 0) {
>  					flow->counter = owner_idx;
> -					__atomic_fetch_add
> +					rte_atomic_fetch_add_explicit
>  						(&cnt_act->shared_info.refcnt,
> -						 1, __ATOMIC_RELAXED);
> +						 1, rte_memory_order_relaxed);
>  				}
>  				/* Save information first, will apply later. */
>  				action_flags |= MLX5_FLOW_ACTION_COUNT;
> @@ -14945,8 +14945,8 @@ struct mlx5_list_entry *
>  				flow->indirect_type =
>  						MLX5_INDIRECT_ACTION_TYPE_CT;
>  				flow->ct = owner_idx;
> -				__atomic_fetch_add(&ct->refcnt, 1,
> -						   __ATOMIC_RELAXED);
> +				rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
> +						   rte_memory_order_relaxed);
>  			}
>  			actions_n++;
>  			action_flags |= MLX5_FLOW_ACTION_CT;
> @@ -15615,7 +15615,7 @@ struct mlx5_list_entry *
>  
>  	shared_rss = mlx5_ipool_get
>  			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
> -	__atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
>  }
>  
>  void
> @@ -15798,8 +15798,8 @@ struct mlx5_list_entry *
>  				sh->geneve_tlv_option_resource;
>  	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
>  	if (geneve_opt_resource) {
> -		if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
> -					 __ATOMIC_RELAXED) - 1)) {
> +		if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
> +					 rte_memory_order_relaxed) - 1)) {
>  			claim_zero(mlx5_devx_cmd_destroy
>  					(geneve_opt_resource->obj));
>  			mlx5_free(sh->geneve_tlv_option_resource);
> @@ -16208,7 +16208,7 @@ struct mlx5_list_entry *
>  	/* Update queue with indirect table queue memoyr. */
>  	origin->queue = shared_rss->ind_tbl->queues;
>  	rte_spinlock_init(&shared_rss->action_rss_sl);
> -	__atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
>  	rte_spinlock_lock(&priv->shared_act_sl);
>  	ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
>  		     &priv->rss_shared_actions, idx, shared_rss, next);
> @@ -16254,9 +16254,9 @@ struct mlx5_list_entry *
>  		return rte_flow_error_set(error, EINVAL,
>  					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
>  					  "invalid shared action");
> -	if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
> -					 0, 0, __ATOMIC_ACQUIRE,
> -					 __ATOMIC_RELAXED))
> +	if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
> +					 0, rte_memory_order_acquire,
> +					 rte_memory_order_relaxed))
>  		return rte_flow_error_set(error, EBUSY,
>  					  RTE_FLOW_ERROR_TYPE_ACTION,
>  					  NULL,
> @@ -16390,10 +16390,10 @@ struct rte_flow_action_handle *
>  		return __flow_dv_action_rss_release(dev, idx, error);
>  	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
>  		cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
> -		if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
> -						 &no_flow_refcnt, 1, false,
> -						 __ATOMIC_ACQUIRE,
> -						 __ATOMIC_RELAXED))
> +		if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
> +						 &no_flow_refcnt, 1,
> +						 rte_memory_order_acquire,
> +						 rte_memory_order_relaxed))
>  			return rte_flow_error_set(error, EBUSY,
>  						  RTE_FLOW_ERROR_TYPE_ACTION,
>  						  NULL,
> @@ -17353,13 +17353,13 @@ struct rte_flow_action_handle *
>  	case MLX5_INDIRECT_ACTION_TYPE_AGE:
>  		age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
>  		resp = data;
> -		resp->aged = __atomic_load_n(&age_param->state,
> -					      __ATOMIC_RELAXED) == AGE_TMOUT ?
> +		resp->aged = rte_atomic_load_explicit(&age_param->state,
> +					      rte_memory_order_relaxed) == AGE_TMOUT ?
>  									  1 : 0;
>  		resp->sec_since_last_hit_valid = !resp->aged;
>  		if (resp->sec_since_last_hit_valid)
> -			resp->sec_since_last_hit = __atomic_load_n
> -			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
> +			resp->sec_since_last_hit = rte_atomic_load_explicit
> +			     (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
>  		return 0;
>  	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
>  		return flow_dv_query_count(dev, idx, data, error);
> @@ -17436,12 +17436,12 @@ struct rte_flow_action_handle *
>  					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  					  NULL, "age data not available");
>  	}
> -	resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
> +	resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
>  				     AGE_TMOUT ? 1 : 0;
>  	resp->sec_since_last_hit_valid = !resp->aged;
>  	if (resp->sec_since_last_hit_valid)
> -		resp->sec_since_last_hit = __atomic_load_n
> -			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
> +		resp->sec_since_last_hit = rte_atomic_load_explicit
> +			     (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
>  	return 0;
>  }
>  
> diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
> index 4ae03a2..8a02247 100644
> --- a/drivers/net/mlx5/mlx5_flow_flex.c
> +++ b/drivers/net/mlx5/mlx5_flow_flex.c
> @@ -86,7 +86,7 @@
>  			MLX5_ASSERT(!item->refcnt);
>  			MLX5_ASSERT(!item->devx_fp);
>  			item->devx_fp = NULL;
> -			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
> +			rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
>  			priv->flex_item_map |= 1u << idx;
>  		}
>  	}
> @@ -107,7 +107,7 @@
>  		MLX5_ASSERT(!item->refcnt);
>  		MLX5_ASSERT(!item->devx_fp);
>  		item->devx_fp = NULL;
> -		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
> +		rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
>  		priv->flex_item_map &= ~(1u << idx);
>  		rte_spinlock_unlock(&priv->flex_item_sl);
>  	}
> @@ -379,7 +379,7 @@
>  		return ret;
>  	}
>  	if (acquire)
> -		__atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
> +		rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
>  	return ret;
>  }
>  
> @@ -414,7 +414,7 @@
>  		rte_errno = -EINVAL;
>  		return -EINVAL;
>  	}
> -	__atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
> +	rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
>  	return 0;
>  }
>  
> @@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
>  	}
>  	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
>  	/* Mark initialized flex item valid. */
> -	__atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
> +	rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
>  	return (struct rte_flow_item_flex_handle *)flex;
>  
>  error:
> @@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
>  					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
>  					  "invalid flex item handle value");
>  	}
> -	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
> -					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
> +	if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
> +					 rte_memory_order_acquire, rte_memory_order_relaxed)) {
>  		rte_spinlock_unlock(&priv->flex_item_sl);
>  		return rte_flow_error_set(error, EBUSY,
>  					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
> index 7376030..5dd2cdc 100644
> --- a/drivers/net/mlx5/mlx5_flow_hw.c
> +++ b/drivers/net/mlx5/mlx5_flow_hw.c
> @@ -555,8 +555,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
>  flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
>  {
>  	if (encap_decap->multi_pattern) {
> -		uint32_t refcnt = __atomic_sub_fetch(encap_decap->multi_pattern_refcnt,
> -						     1, __ATOMIC_RELAXED);
> +		uint32_t refcnt = rte_atomic_fetch_sub_explicit(encap_decap->multi_pattern_refcnt,
> +						     1, rte_memory_order_relaxed) - 1;
>  		if (refcnt)
>  			return;
>  		mlx5_free((void *)(uintptr_t)encap_decap->multi_pattern_refcnt);
> @@ -569,8 +569,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
>  flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
>  {
>  	if (mhdr->multi_pattern) {
> -		uint32_t refcnt = __atomic_sub_fetch(mhdr->multi_pattern_refcnt,
> -						     1, __ATOMIC_RELAXED);
> +		uint32_t refcnt = rte_atomic_fetch_sub_explicit(mhdr->multi_pattern_refcnt,
> +						     1, rte_memory_order_relaxed) - 1;
>  		if (refcnt)
>  			return;
>  		mlx5_free((void *)(uintptr_t)mhdr->multi_pattern_refcnt);
> @@ -604,7 +604,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
>  	}
>  
>  	if (acts->mark)
> -		if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
> +		if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
> +		    rte_memory_order_relaxed) - 1))
>  			flow_hw_rxq_flag_set(dev, false);
>  
>  	if (acts->jump) {
> @@ -2168,7 +2169,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  				goto err;
>  			acts->rule_acts[dr_pos].action =
>  				priv->hw_tag[!!attr->group];
> -			__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
> +			    rte_memory_order_relaxed);
>  			flow_hw_rxq_flag_set(dev, true);
>  			break;
>  		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
> @@ -4065,7 +4067,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  
>  	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
>  		uint32_t j;
> -		uint32_t *reformat_refcnt;
> +		RTE_ATOMIC(uint32_t) *reformat_refcnt;
>  		typeof(mpat->reformat[0]) *reformat = mpat->reformat + i;
>  		struct mlx5dr_action_reformat_header hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
>  		enum mlx5dr_action_type reformat_type =
> @@ -4102,7 +4104,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  	if (mpat->mh.elements_num) {
>  		typeof(mpat->mh) *mh = &mpat->mh;
>  		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
> -		uint32_t *mh_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t),
> +		RTE_ATOMIC(uint32_t) *mh_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t),
>  						 0, rte_socket_id());
>  
>  		if (!mh_refcnt)
> @@ -4146,8 +4148,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  	struct mlx5_tbl_multi_pattern_ctx mpat = MLX5_EMPTY_MULTI_PATTERN_CTX;
>  
>  	for (i = 0; i < nb_action_templates; i++) {
> -		uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
> -						     __ATOMIC_RELAXED);
> +		uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
> +						     rte_memory_order_relaxed) + 1;
>  
>  		if (refcnt <= 1) {
>  			rte_flow_error_set(error, EINVAL,
> @@ -4179,8 +4181,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  at_error:
>  	while (i--) {
>  		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
> -		__atomic_sub_fetch(&action_templates[i]->refcnt,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
> +				   1, rte_memory_order_relaxed);
>  	}
>  	return rte_errno;
>  }
> @@ -4326,8 +4328,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  			rte_errno = EINVAL;
>  			goto it_error;
>  		}
> -		ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
> -					 __ATOMIC_RELAXED) + 1;
> +		ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
> +					 rte_memory_order_relaxed) + 1;
>  		if (ret <= 1) {
>  			rte_errno = EINVAL;
>  			goto it_error;
> @@ -4358,14 +4360,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  at_error:
>  	for (i = 0; i < nb_action_templates; i++) {
>  		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
> -		__atomic_fetch_sub(&action_templates[i]->refcnt,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
> +				   1, rte_memory_order_relaxed);
>  	}
>  	i = nb_item_templates;
>  it_error:
>  	while (i--)
> -		__atomic_fetch_sub(&item_templates[i]->refcnt,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
> +				   1, rte_memory_order_relaxed);
>  error:
>  	err = rte_errno;
>  	if (tbl) {
> @@ -4567,12 +4569,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  	}
>  	LIST_REMOVE(table, next);
>  	for (i = 0; i < table->nb_item_templates; i++)
> -		__atomic_fetch_sub(&table->its[i]->refcnt,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
> +				   1, rte_memory_order_relaxed);
>  	for (i = 0; i < table->nb_action_templates; i++) {
>  		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
> -		__atomic_fetch_sub(&table->ats[i].action_template->refcnt,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
> +				   1, rte_memory_order_relaxed);
>  	}
>  	mlx5dr_matcher_destroy(table->matcher);
>  	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
> @@ -6445,7 +6447,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  	if (!at->tmpl)
>  		goto error;
>  	at->action_flags = action_flags;
> -	__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
>  	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
>  	return at;
>  error:
> @@ -6481,7 +6483,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  	uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
>  			MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
>  
> -	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
> +	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
>  		DRV_LOG(WARNING, "Action template %p is still in use.",
>  			(void *)template);
>  		return rte_flow_error_set(error, EBUSY,
> @@ -6876,7 +6878,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  			}
>  		}
>  	}
> -	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
>  	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
>  	return it;
>  }
> @@ -6899,7 +6901,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
>  			      struct rte_flow_pattern_template *template,
>  			      struct rte_flow_error *error __rte_unused)
>  {
> -	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
> +	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
>  		DRV_LOG(WARNING, "Item template %p is still in use.",
>  			(void *)template);
>  		return rte_flow_error_set(error, EBUSY,
> @@ -9179,7 +9181,8 @@ struct mlx5_list_entry *
>  		}
>  		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
>  		priv->shared_host = host_dev;
> -		__atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
> +		    rte_memory_order_relaxed);
>  	}
>  	dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
>  	/* rte_errno has been updated by HWS layer. */
> @@ -9340,7 +9343,8 @@ struct mlx5_list_entry *
>  	if (_queue_attr)
>  		mlx5_free(_queue_attr);
>  	if (priv->shared_host) {
> -		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
> +		    rte_memory_order_relaxed);
>  		priv->shared_host = NULL;
>  	}
>  	mlx5_free(priv->hw_attr);
> @@ -9434,7 +9438,8 @@ struct mlx5_list_entry *
>  	claim_zero(mlx5dr_context_close(priv->dr_ctx));
>  	if (priv->shared_host) {
>  		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
> -		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
> +		    rte_memory_order_relaxed);
>  		priv->shared_host = NULL;
>  	}
>  	priv->dr_ctx = NULL;
> @@ -9491,8 +9496,8 @@ struct mlx5_list_entry *
>  				NULL,
>  				"Invalid CT destruction index");
>  	}
> -	__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
> -				 __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
> +				 rte_memory_order_relaxed);
>  	mlx5_ipool_free(pool->cts, ct_idx);
>  	return 0;
>  }
> @@ -10185,7 +10190,7 @@ struct mlx5_list_entry *
>  		return rte_flow_error_set(error, EINVAL,
>  					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  					  NULL, "age data not available");
> -	switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
> +	switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
>  	case HWS_AGE_AGED_OUT_REPORTED:
>  	case HWS_AGE_AGED_OUT_NOT_REPORTED:
>  		resp->aged = 1;
> @@ -10205,8 +10210,8 @@ struct mlx5_list_entry *
>  	}
>  	resp->sec_since_last_hit_valid = !resp->aged;
>  	if (resp->sec_since_last_hit_valid)
> -		resp->sec_since_last_hit = __atomic_load_n
> -				 (&param->sec_since_last_hit, __ATOMIC_RELAXED);
> +		resp->sec_since_last_hit = rte_atomic_load_explicit
> +				 (&param->sec_since_last_hit, rte_memory_order_relaxed);
>  	return 0;
>  }
>  
> diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
> index 7cbf772..9f345dd 100644
> --- a/drivers/net/mlx5/mlx5_flow_meter.c
> +++ b/drivers/net/mlx5/mlx5_flow_meter.c
> @@ -1766,9 +1766,9 @@ struct mlx5_flow_meter_policy *
>  			NULL, "Meter profile id not valid.");
>  	/* Meter policy must exist. */
>  	if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
> -		__atomic_fetch_add
> +		rte_atomic_fetch_add_explicit
>  			(&priv->sh->mtrmng->def_policy_ref_cnt,
> -			1, __ATOMIC_RELAXED);
> +			1, rte_memory_order_relaxed);
>  		domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
>  		if (!priv->sh->config.dv_esw_en)
>  			domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
> @@ -1848,7 +1848,7 @@ struct mlx5_flow_meter_policy *
>  	fm->is_enable = params->meter_enable;
>  	fm->shared = !!shared;
>  	fm->color_aware = !!params->use_prev_mtr_color;
> -	__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
>  	if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
>  		fm->def_policy = 1;
>  		fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
> @@ -1877,7 +1877,7 @@ struct mlx5_flow_meter_policy *
>  	}
>  	fm->active_state = params->meter_enable;
>  	if (mtr_policy)
> -		__atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
>  	return 0;
>  error:
>  	mlx5_flow_destroy_mtr_tbls(dev, fm);
> @@ -1972,8 +1972,8 @@ struct mlx5_flow_meter_policy *
>  			RTE_MTR_ERROR_TYPE_UNSPECIFIED,
>  			NULL, "Failed to create devx meter.");
>  	fm->active_state = params->meter_enable;
> -	__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
> -	__atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
> +	rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
>  	return 0;
>  }
>  
> @@ -1995,7 +1995,7 @@ struct mlx5_flow_meter_policy *
>  	if (fmp == NULL)
>  		return -1;
>  	/* Update dependencies. */
> -	__atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
>  	fm->profile = NULL;
>  	/* Remove from list. */
>  	if (!priv->sh->meter_aso_en) {
> @@ -2013,15 +2013,15 @@ struct mlx5_flow_meter_policy *
>  	}
>  	mlx5_flow_destroy_mtr_tbls(dev, fm);
>  	if (fm->def_policy)
> -		__atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
> -				1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
> +				1, rte_memory_order_relaxed);
>  	if (priv->sh->meter_aso_en) {
>  		if (!fm->def_policy) {
>  			mtr_policy = mlx5_flow_meter_policy_find(dev,
>  						fm->policy_id, NULL);
>  			if (mtr_policy)
> -				__atomic_fetch_sub(&mtr_policy->ref_cnt,
> -						1, __ATOMIC_RELAXED);
> +				rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
> +						1, rte_memory_order_relaxed);
>  			fm->policy_id = 0;
>  		}
>  		fm->def_policy = 0;
> @@ -2124,13 +2124,13 @@ struct mlx5_flow_meter_policy *
>  					  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
>  					  NULL, "Meter object is being used.");
>  	/* Destroy the meter profile. */
> -	__atomic_fetch_sub(&fm->profile->ref_cnt,
> -						1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
> +						1, rte_memory_order_relaxed);
>  	/* Destroy the meter policy. */
>  	policy = mlx5_flow_meter_policy_find(dev,
>  			fm->policy_id, NULL);
> -	__atomic_fetch_sub(&policy->ref_cnt,
> -						1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
> +						1, rte_memory_order_relaxed);
>  	memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
>  	return 0;
>  }
> diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
> index 14a2a8b..6ad0e8a 100644
> --- a/drivers/net/mlx5/mlx5_flow_quota.c
> +++ b/drivers/net/mlx5/mlx5_flow_quota.c
> @@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
>  		struct mlx5_quota *quota_obj =
>  			sq->elts[(sq->tail + i) & mask].quota_obj;
>  
> -		__atomic_compare_exchange_n(&quota_obj->state, &state,
> -					    MLX5_QUOTA_STATE_READY, false,
> -					    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
> +		rte_atomic_compare_exchange_strong_explicit(&quota_obj->state, &state,
> +					    MLX5_QUOTA_STATE_READY,
> +					    rte_memory_order_relaxed, rte_memory_order_relaxed);
>  	}
>  }
>  
> @@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
>  		rte_spinlock_lock(&sq->sqsl);
>  		mlx5_quota_cmd_completion_handle(sq);
>  		rte_spinlock_unlock(&sq->sqsl);
> -		if (__atomic_load_n(&quota_obj->state, __ATOMIC_RELAXED) ==
> +		if (rte_atomic_load_explicit(&quota_obj->state, rte_memory_order_relaxed) ==
>  		    MLX5_QUOTA_STATE_READY)
>  			return 0;
>  	} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
> @@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
>  mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
>  {
>  	uint8_t state = MLX5_QUOTA_STATE_READY;
> -	bool verdict = __atomic_compare_exchange_n
> -		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
> -		 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
> +	bool verdict = rte_atomic_compare_exchange_strong_explicit
> +		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
> +		 rte_memory_order_relaxed, rte_memory_order_relaxed);
>  
>  	if (!verdict)
>  		return rte_flow_error_set(error, EBUSY,
> @@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
>  	ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
>  				 async_job ? async_job : &sync_job, push, NULL);
>  	if (ret) {
> -		__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
> -				 __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
> +				 rte_memory_order_relaxed);
>  		return rte_flow_error_set(error, EAGAIN,
>  					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
>  	}
> @@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
>  				 async_job ? async_job : &sync_job, push,
>  				 (void *)(uintptr_t)update->conf);
>  	if (ret) {
> -		__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
> -				 __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
> +				 rte_memory_order_relaxed);
>  		return rte_flow_error_set(error, EAGAIN,
>  					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
>  	}
> @@ -593,9 +593,9 @@ struct rte_flow_action_handle *
>  				   NULL, "quota: failed to allocate quota object");
>  		return NULL;
>  	}
> -	verdict = __atomic_compare_exchange_n
> -		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
> -		 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
> +	verdict = rte_atomic_compare_exchange_strong_explicit
> +		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
> +		 rte_memory_order_relaxed, rte_memory_order_relaxed);
>  	if (!verdict) {
>  		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
>  				   NULL, "quota: new quota object has invalid state");
> @@ -616,8 +616,8 @@ struct rte_flow_action_handle *
>  				 (void *)(uintptr_t)conf);
>  	if (ret) {
>  		mlx5_ipool_free(qctx->quota_ipool, id);
> -		__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
> -				 __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
> +				 rte_memory_order_relaxed);
>  		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
>  				   NULL, "quota: WR failure");
>  		return 0;
> diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
> index f556a9f..4409ae7 100644
> --- a/drivers/net/mlx5/mlx5_hws_cnt.c
> +++ b/drivers/net/mlx5/mlx5_hws_cnt.c
> @@ -149,7 +149,7 @@
>  		}
>  		if (param->timeout == 0)
>  			continue;
> -		switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
> +		switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
>  		case HWS_AGE_AGED_OUT_NOT_REPORTED:
>  		case HWS_AGE_AGED_OUT_REPORTED:
>  			/* Already aged-out, no action is needed. */
> @@ -171,8 +171,8 @@
>  		hits = rte_be_to_cpu_64(stats[i].hits);
>  		if (param->nb_cnts == 1) {
>  			if (hits != param->accumulator_last_hits) {
> -				__atomic_store_n(&param->sec_since_last_hit, 0,
> -						 __ATOMIC_RELAXED);
> +				rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
> +						 rte_memory_order_relaxed);
>  				param->accumulator_last_hits = hits;
>  				continue;
>  			}
> @@ -184,8 +184,8 @@
>  			param->accumulator_cnt = 0;
>  			if (param->accumulator_last_hits !=
>  						param->accumulator_hits) {
> -				__atomic_store_n(&param->sec_since_last_hit,
> -						 0, __ATOMIC_RELAXED);
> +				rte_atomic_store_explicit(&param->sec_since_last_hit,
> +						 0, rte_memory_order_relaxed);
>  				param->accumulator_last_hits =
>  							param->accumulator_hits;
>  				param->accumulator_hits = 0;
> @@ -193,9 +193,9 @@
>  			}
>  			param->accumulator_hits = 0;
>  		}
> -		if (__atomic_fetch_add(&param->sec_since_last_hit, time_delta,
> -				       __ATOMIC_RELAXED) + time_delta <=
> -		   __atomic_load_n(&param->timeout, __ATOMIC_RELAXED))
> +		if (rte_atomic_fetch_add_explicit(&param->sec_since_last_hit, time_delta,
> +				       rte_memory_order_relaxed) + time_delta <=
> +		   rte_atomic_load_explicit(&param->timeout, rte_memory_order_relaxed))
>  			continue;
>  		/* Prepare the relevant ring for this AGE parameter */
>  		if (priv->hws_strict_queue)
> @@ -203,10 +203,10 @@
>  		else
>  			r = age_info->hw_age.aged_list;
>  		/* Changing the state atomically and insert it into the ring. */
> -		if (__atomic_compare_exchange_n(&param->state, &expected1,
> +		if (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected1,
>  						HWS_AGE_AGED_OUT_NOT_REPORTED,
> -						false, __ATOMIC_RELAXED,
> -						__ATOMIC_RELAXED)) {
> +						rte_memory_order_relaxed,
> +						rte_memory_order_relaxed)) {
>  			int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
>  							      sizeof(uint32_t),
>  							      1, NULL);
> @@ -221,11 +221,10 @@
>  			 */
>  			expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
>  			if (ret == 0 &&
> -			    !__atomic_compare_exchange_n(&param->state,
> +			    !rte_atomic_compare_exchange_strong_explicit(&param->state,
>  							 &expected2, expected1,
> -							 false,
> -							 __ATOMIC_RELAXED,
> -							 __ATOMIC_RELAXED) &&
> +							 rte_memory_order_relaxed,
> +							 rte_memory_order_relaxed) &&
>  			    expected2 == HWS_AGE_FREE)
>  				mlx5_hws_age_param_free(priv,
>  							param->own_cnt_index,
> @@ -235,10 +234,10 @@
>  			if (!priv->hws_strict_queue)
>  				MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
>  		} else {
> -			__atomic_compare_exchange_n(&param->state, &expected2,
> +			rte_atomic_compare_exchange_strong_explicit(&param->state, &expected2,
>  						  HWS_AGE_AGED_OUT_NOT_REPORTED,
> -						  false, __ATOMIC_RELAXED,
> -						  __ATOMIC_RELAXED);
> +						  rte_memory_order_relaxed,
> +						  rte_memory_order_relaxed);
>  		}
>  	}
>  	/* The event is irrelevant in strict queue mode. */
> @@ -752,8 +751,8 @@ struct mlx5_hws_cnt_pool *
>  		return rte_flow_error_set(error, EINVAL,
>  					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
>  					  "invalid AGE parameter index");
> -	switch (__atomic_exchange_n(&param->state, HWS_AGE_FREE,
> -				    __ATOMIC_RELAXED)) {
> +	switch (rte_atomic_exchange_explicit(&param->state, HWS_AGE_FREE,
> +				    rte_memory_order_relaxed)) {
>  	case HWS_AGE_CANDIDATE:
>  	case HWS_AGE_AGED_OUT_REPORTED:
>  		mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
> @@ -818,8 +817,8 @@ struct mlx5_hws_cnt_pool *
>  				   "cannot allocate AGE parameter");
>  		return 0;
>  	}
> -	MLX5_ASSERT(__atomic_load_n(&param->state,
> -				    __ATOMIC_RELAXED) == HWS_AGE_FREE);
> +	MLX5_ASSERT(rte_atomic_load_explicit(&param->state,
> +				    rte_memory_order_relaxed) == HWS_AGE_FREE);
>  	if (shared) {
>  		param->nb_cnts = 0;
>  		param->accumulator_hits = 0;
> @@ -870,9 +869,9 @@ struct mlx5_hws_cnt_pool *
>  					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
>  					  "invalid AGE parameter index");
>  	if (update_ade->timeout_valid) {
> -		uint32_t old_timeout = __atomic_exchange_n(&param->timeout,
> +		uint32_t old_timeout = rte_atomic_exchange_explicit(&param->timeout,
>  							   update_ade->timeout,
> -							   __ATOMIC_RELAXED);
> +							   rte_memory_order_relaxed);
>  
>  		if (old_timeout == 0)
>  			sec_since_last_hit_reset = true;
> @@ -891,8 +890,8 @@ struct mlx5_hws_cnt_pool *
>  		state_update = true;
>  	}
>  	if (sec_since_last_hit_reset)
> -		__atomic_store_n(&param->sec_since_last_hit, 0,
> -				 __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
> +				 rte_memory_order_relaxed);
>  	if (state_update) {
>  		uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
>  
> @@ -901,13 +900,13 @@ struct mlx5_hws_cnt_pool *
>  		 *  - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
>  		 *  - AGED_OUT_REPORTED -> CANDIDATE
>  		 */
> -		if (!__atomic_compare_exchange_n(&param->state, &expected,
> +		if (!rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,
>  						 HWS_AGE_CANDIDATE_INSIDE_RING,
> -						 false, __ATOMIC_RELAXED,
> -						 __ATOMIC_RELAXED) &&
> +						 rte_memory_order_relaxed,
> +						 rte_memory_order_relaxed) &&
>  		    expected == HWS_AGE_AGED_OUT_REPORTED)
> -			__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,
> -					 __ATOMIC_RELAXED);
> +			rte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,
> +					 rte_memory_order_relaxed);
>  	}
>  	return 0;
>  }
> @@ -932,9 +931,9 @@ struct mlx5_hws_cnt_pool *
>  	uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
>  
>  	MLX5_ASSERT(param != NULL);
> -	if (__atomic_compare_exchange_n(&param->state, &expected,
> -					HWS_AGE_AGED_OUT_REPORTED, false,
> -					__ATOMIC_RELAXED, __ATOMIC_RELAXED))
> +	if (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,
> +					HWS_AGE_AGED_OUT_REPORTED,
> +					rte_memory_order_relaxed, rte_memory_order_relaxed))
>  		return param->context;
>  	switch (expected) {
>  	case HWS_AGE_FREE:
> @@ -946,8 +945,8 @@ struct mlx5_hws_cnt_pool *
>  		mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
>  		break;
>  	case HWS_AGE_CANDIDATE_INSIDE_RING:
> -		__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,
> -				 __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,
> +				 rte_memory_order_relaxed);
>  		break;
>  	case HWS_AGE_CANDIDATE:
>  		/*
> diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
> index dcd5cec..a18c9be 100644
> --- a/drivers/net/mlx5/mlx5_hws_cnt.h
> +++ b/drivers/net/mlx5/mlx5_hws_cnt.h
> @@ -100,7 +100,7 @@ struct mlx5_hws_cnt_pool_caches {
>  struct mlx5_hws_cnt_pool {
>  	struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;
>  	struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;
> -	uint32_t query_gen __rte_cache_aligned;
> +	RTE_ATOMIC(uint32_t) query_gen __rte_cache_aligned;
>  	struct mlx5_hws_cnt *pool;
>  	struct mlx5_hws_cnt_raw_data_mng *raw_mng;
>  	struct rte_ring *reuse_list;
> @@ -132,10 +132,10 @@ enum {
>  
>  /* HWS counter age parameter. */
>  struct mlx5_hws_age_param {
> -	uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
> -	uint32_t sec_since_last_hit;
> +	RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
> +	RTE_ATOMIC(uint32_t) sec_since_last_hit;
>  	/* Time in seconds since last hit (atomically accessed). */
> -	uint16_t state; /* AGE state (atomically accessed). */
> +	RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
>  	uint64_t accumulator_last_hits;
>  	/* Last total value of hits for comparing. */
>  	uint64_t accumulator_hits;
> @@ -424,7 +424,7 @@ struct mlx5_hws_age_param {
>  	iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
>  	hpool->pool[iidx].in_used = false;
>  	hpool->pool[iidx].query_gen_when_free =
> -		__atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
> +		rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
>  	if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
>  		qcache = hpool->cache->qcache[*queue];
>  	if (unlikely(qcache == NULL)) {
> diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
> index 2fce908..c627113 100644
> --- a/drivers/net/mlx5/mlx5_rx.h
> +++ b/drivers/net/mlx5/mlx5_rx.h
> @@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
>  /* RX queue private data. */
>  struct mlx5_rxq_priv {
>  	uint16_t idx; /* Queue index. */
> -	uint32_t refcnt; /* Reference counter. */
> +	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
>  	struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
>  	LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
>  	struct mlx5_priv *priv; /* Back pointer to private data. */
> @@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
>  /* External RX queue descriptor. */
>  struct mlx5_external_rxq {
>  	uint32_t hw_id; /* Queue index in the Hardware. */
> -	uint32_t refcnt; /* Reference counter. */
> +	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
>  };
>  
>  /* mlx5_rxq.c */
> @@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
>  	struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
>  	void *addr;
>  
> -	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
> +	if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
>  		MLX5_ASSERT(rep != NULL);
>  		/* Replace MPRQ buf. */
>  		(*rxq->mprq_bufs)[rq_idx] = rep;
> @@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
>  		void *buf_addr;
>  
>  		/* Increment the refcnt of the whole chunk. */
> -		__atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
> -		MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> -			    __ATOMIC_RELAXED) <= strd_n + 1);
> +		rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
> +		MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
> +			    rte_memory_order_relaxed) <= strd_n + 1);
>  		buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
>  		/*
>  		 * MLX5 device doesn't use iova but it is necessary in a
> @@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
>  	if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
>  		return false;
>  	rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
> -	return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
> +	return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
>  }
>  
>  #define LWM_COOKIE_RXQID_OFFSET 0
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index 88b2dc5..16a5170 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -416,7 +416,7 @@
>  		rte_errno = EINVAL;
>  		return -rte_errno;
>  	}
> -	return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
> +	return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
>  }
>  
>  /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
> @@ -1319,7 +1319,7 @@
>  
>  	memset(_m, 0, sizeof(*buf));
>  	buf->mp = mp;
> -	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
>  	for (j = 0; j != strd_n; ++j) {
>  		shinfo = &buf->shinfos[j];
>  		shinfo->free_cb = mlx5_mprq_buf_free_cb;
> @@ -2035,7 +2035,7 @@ struct mlx5_rxq_priv *
>  	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
>  
>  	if (rxq != NULL)
> -		__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
>  	return rxq;
>  }
>  
> @@ -2057,7 +2057,7 @@ struct mlx5_rxq_priv *
>  
>  	if (rxq == NULL)
>  		return 0;
> -	return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
> +	return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
>  }
>  
>  /**
> @@ -2136,7 +2136,7 @@ struct mlx5_external_rxq *
>  {
>  	struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
>  
> -	__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
>  	return rxq;
>  }
>  
> @@ -2156,7 +2156,7 @@ struct mlx5_external_rxq *
>  {
>  	struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
>  
> -	return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
> +	return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
>  }
>  
>  /**
> @@ -2445,8 +2445,8 @@ struct mlx5_ind_table_obj *
>  		    (memcmp(ind_tbl->queues, queues,
>  			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
>  		     == 0)) {
> -			__atomic_fetch_add(&ind_tbl->refcnt, 1,
> -					   __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
> +					   rte_memory_order_relaxed);
>  			break;
>  		}
>  	}
> @@ -2477,7 +2477,7 @@ struct mlx5_ind_table_obj *
>  	unsigned int ret;
>  
>  	rte_rwlock_write_lock(&priv->ind_tbls_lock);
> -	ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
> +	ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
>  	if (!ret)
>  		LIST_REMOVE(ind_tbl, next);
>  	rte_rwlock_write_unlock(&priv->ind_tbls_lock);
> @@ -2559,7 +2559,7 @@ struct mlx5_ind_table_obj *
>  		}
>  		return ret;
>  	}
> -	__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
>  	return 0;
>  }
>  
> @@ -2624,7 +2624,7 @@ struct mlx5_ind_table_obj *
>  {
>  	uint32_t refcnt;
>  
> -	refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
> +	refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
>  	if (refcnt <= 1)
>  		return 0;
>  	/*
> @@ -3256,8 +3256,8 @@ struct mlx5_hrxq *
>  	ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
>  	if (ext_rxq == NULL)
>  		return -rte_errno;
> -	if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
> -					 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
> +	if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
> +					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
>  		if (ext_rxq->hw_id != hw_idx) {
>  			DRV_LOG(ERR, "Port %u external RxQ index %u "
>  				"is already mapped to HW index (requesting is "
> @@ -3294,8 +3294,8 @@ struct mlx5_hrxq *
>  		rte_errno = EINVAL;
>  		return -rte_errno;
>  	}
> -	if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
> -					 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
> +	if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
> +					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
>  		DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
>  			port_id, dpdk_idx);
>  		rte_errno = EINVAL;
> diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
> index 7bdb897..7b2ddd9 100644
> --- a/drivers/net/mlx5/mlx5_trigger.c
> +++ b/drivers/net/mlx5/mlx5_trigger.c
> @@ -1436,7 +1436,7 @@
>  	rte_delay_us_sleep(1000 * priv->rxqs_n);
>  	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
>  	if (priv->sh->config.dv_flow_en == 2) {
> -		if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
> +		if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
>  			flow_hw_rxq_flag_set(dev, false);
>  	} else {
>  		mlx5_flow_stop_default(dev);
> diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
> index 264cc19..0be0df7 100644
> --- a/drivers/net/mlx5/mlx5_tx.h
> +++ b/drivers/net/mlx5/mlx5_tx.h
> @@ -178,7 +178,7 @@ struct mlx5_txq_data {
>  /* TX queue control descriptor. */
>  struct mlx5_txq_ctrl {
>  	LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
> -	uint32_t refcnt; /* Reference counter. */
> +	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
>  	unsigned int socket; /* CPU socket ID for allocations. */
>  	bool is_hairpin; /* Whether TxQ type is Hairpin. */
>  	unsigned int max_inline_data; /* Max inline data. */
> @@ -338,8 +338,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
>  		 * the service thread, data should be re-read.
>  		 */
>  		rte_compiler_barrier();
> -		ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
> -		ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
> +		ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
> +		ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
>  		rte_compiler_barrier();
>  		if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
>  			break;
> @@ -349,8 +349,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
>  	mts -= ts;
>  	if (unlikely(mts >= UINT64_MAX / 2)) {
>  		/* We have negative integer, mts is in the past. */
> -		__atomic_fetch_add(&sh->txpp.err_ts_past,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
> +				   1, rte_memory_order_relaxed);
>  		return -1;
>  	}
>  	tick = sh->txpp.tick;
> @@ -359,8 +359,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
>  	mts = (mts + tick - 1) / tick;
>  	if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
>  		/* We have mts is too distant future. */
> -		__atomic_fetch_add(&sh->txpp.err_ts_future,
> -				   1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
> +				   1, rte_memory_order_relaxed);
>  		return -1;
>  	}
>  	mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
> @@ -1742,8 +1742,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
>  		/* Convert the timestamp into completion to wait. */
>  		ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
>  		if (txq->ts_last && ts < txq->ts_last)
> -			__atomic_fetch_add(&txq->sh->txpp.err_ts_order,
> -					   1, __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
> +					   1, rte_memory_order_relaxed);
>  		txq->ts_last = ts;
>  		wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
>  		sh = txq->sh;
> diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
> index 5a5df2d..4e26fa2 100644
> --- a/drivers/net/mlx5/mlx5_txpp.c
> +++ b/drivers/net/mlx5/mlx5_txpp.c
> @@ -538,12 +538,12 @@
>  		uint64_t *ps;
>  
>  		rte_compiler_barrier();
> -		tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
> -		op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
> +		tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
> +		op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
>  		rte_compiler_barrier();
> -		if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
> +		if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
>  			continue;
> -		if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
> +		if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
>  			continue;
>  		ps = (uint64_t *)ts;
>  		ps[0] = tm;
> @@ -561,8 +561,8 @@
>  	ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
>  	ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
>  	rte_compiler_barrier();
> -	__atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
> -	__atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
>  	rte_wmb();
>  }
>  
> @@ -590,8 +590,8 @@
>  			 */
>  			DRV_LOG(DEBUG,
>  				"Clock Queue error sync lost (%X).", opcode);
> -				__atomic_fetch_add(&sh->txpp.err_clock_queue,
> -				   1, __ATOMIC_RELAXED);
> +				rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
> +				   1, rte_memory_order_relaxed);
>  			sh->txpp.sync_lost = 1;
>  		}
>  		return;
> @@ -633,10 +633,10 @@
>  	if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
>  		return;
>  	MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
> -	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
> -			 sh->txpp.ts.ts, __ATOMIC_RELAXED);
> -	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
> -			 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
> +			 sh->txpp.ts.ts, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
> +			 sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
>  	if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
>  		sh->txpp.ts_p = 0;
>  	if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
> @@ -677,8 +677,8 @@
>  		/* Check whether we have missed interrupts. */
>  		if (cq_ci - wq->cq_ci != 1) {
>  			DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
> -			__atomic_fetch_add(&sh->txpp.err_miss_int,
> -					   1, __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
> +					   1, rte_memory_order_relaxed);
>  			/* Check sync lost on wqe index. */
>  			if (cq_ci - wq->cq_ci >=
>  				(((1UL << MLX5_WQ_INDEX_WIDTH) /
> @@ -693,8 +693,8 @@
>  		/* Fire new requests to Rearm Queue. */
>  		if (error) {
>  			DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
> -			__atomic_fetch_add(&sh->txpp.err_rearm_queue,
> -					   1, __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
> +					   1, rte_memory_order_relaxed);
>  			sh->txpp.sync_lost = 1;
>  		}
>  	}
> @@ -987,8 +987,8 @@
>  		mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
>  		if (to.cts.op_own >> 4) {
>  			DRV_LOG(DEBUG, "Clock Queue error sync lost.");
> -			__atomic_fetch_add(&sh->txpp.err_clock_queue,
> -					   1, __ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
> +					   1, rte_memory_order_relaxed);
>  			sh->txpp.sync_lost = 1;
>  			return -EIO;
>  		}
> @@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
>  	struct mlx5_priv *priv = dev->data->dev_private;
>  	struct mlx5_dev_ctx_shared *sh = priv->sh;
>  
> -	__atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
> -	__atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
> -	__atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
> -	__atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
> -	__atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
> -	__atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
> +	rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
>  	return 0;
>  }
>  
> @@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
>  	do {
>  		uint64_t ts, ci;
>  
> -		ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
> -		ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
> +		ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
> +		ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
>  		rte_compiler_barrier();
>  		if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
>  			continue;
> -		if (__atomic_load_n(&txpp->tsa[idx].ts,
> -				    __ATOMIC_RELAXED) != ts)
> +		if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
> +				    rte_memory_order_relaxed) != ts)
>  			continue;
> -		if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
> -				    __ATOMIC_RELAXED) != ci)
> +		if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
> +				    rte_memory_order_relaxed) != ci)
>  			continue;
>  		tsa->ts = ts;
>  		tsa->ci_ts = ci;
> @@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
>  		for (i = 0; i < n_txpp; ++i)
>  			stats[n_used + i].id = n_used + i;
>  		stats[n_used + 0].value =
> -				__atomic_load_n(&sh->txpp.err_miss_int,
> -						__ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&sh->txpp.err_miss_int,
> +						rte_memory_order_relaxed);
>  		stats[n_used + 1].value =
> -				__atomic_load_n(&sh->txpp.err_rearm_queue,
> -						__ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
> +						rte_memory_order_relaxed);
>  		stats[n_used + 2].value =
> -				__atomic_load_n(&sh->txpp.err_clock_queue,
> -						__ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
> +						rte_memory_order_relaxed);
>  		stats[n_used + 3].value =
> -				__atomic_load_n(&sh->txpp.err_ts_past,
> -						__ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&sh->txpp.err_ts_past,
> +						rte_memory_order_relaxed);
>  		stats[n_used + 4].value =
> -				__atomic_load_n(&sh->txpp.err_ts_future,
> -						__ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&sh->txpp.err_ts_future,
> +						rte_memory_order_relaxed);
>  		stats[n_used + 5].value =
> -				__atomic_load_n(&sh->txpp.err_ts_order,
> -						__ATOMIC_RELAXED);
> +				rte_atomic_load_explicit(&sh->txpp.err_ts_order,
> +						rte_memory_order_relaxed);
>  		stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
>  		stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
>  		stats[n_used + 8].value = sh->txpp.sync_lost;
> diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
> index b584055..22956ff 100644
> --- a/drivers/net/mlx5/mlx5_txq.c
> +++ b/drivers/net/mlx5/mlx5_txq.c
> @@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
>  		rte_errno = ENOMEM;
>  		goto error;
>  	}
> -	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
>  	tmpl->is_hairpin = false;
>  	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
>  	return tmpl;
> @@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
>  	tmpl->txq.idx = idx;
>  	tmpl->hairpin_conf = *hairpin_conf;
>  	tmpl->is_hairpin = true;
> -	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
>  	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
>  	return tmpl;
>  }
> @@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
>  
>  	if (txq_data) {
>  		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
> -		__atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
>  	}
>  	return ctrl;
>  }
> @@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
>  	if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
>  		return 0;
>  	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
> -	if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
> +	if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
>  		return 1;
>  	if (txq_ctrl->obj) {
>  		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
> @@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
>  		txq_free_elts(txq_ctrl);
>  		dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
>  	}
> -	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
> +	if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
>  		if (!txq_ctrl->is_hairpin)
>  			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
>  		LIST_REMOVE(txq_ctrl, next);
> @@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
>  	if (!(*priv->txqs)[idx])
>  		return -1;
>  	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
> -	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
> +	return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
>  }
>  
>  /**
> diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
> index 4db7387..7c1d0ff 100644
> --- a/drivers/net/mlx5/mlx5_utils.c
> +++ b/drivers/net/mlx5/mlx5_utils.c
> @@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
>  	struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
>  
>  	lc = pool->cache[cidx]->lc;
> -	gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
> +	gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
>  	if (gc && lc != gc) {
>  		mlx5_ipool_lock(pool);
>  		if (lc && !(--lc->ref_cnt))
> @@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
>  		pool->cache[cidx]->len = fetch_size - 1;
>  		return pool->cache[cidx]->idx[pool->cache[cidx]->len];
>  	}
> -	trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
> -			 __ATOMIC_ACQUIRE) : 0;
> +	trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
> +			 rte_memory_order_acquire) : 0;
>  	trunk_n = lc ? lc->n_trunk : 0;
>  	cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
>  	/* Check if index reach maximum. */
> @@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
>  		lc = p;
>  		lc->ref_cnt = 1;
>  		pool->cache[cidx]->lc = lc;
> -		__atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
>  	}
>  	/* Add trunk to trunks array. */
>  	lc->trunks[trunk_idx] = trunk;
> -	__atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
> +	rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
>  	/* Enqueue half of the index to global. */
>  	ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
>  	fetch_size = trunk->free >> 1;
> diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
> index 82e8298..fddfe09 100644
> --- a/drivers/net/mlx5/mlx5_utils.h
> +++ b/drivers/net/mlx5/mlx5_utils.h
> @@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
>  
>  struct mlx5_indexed_cache {
>  	struct mlx5_indexed_trunk **trunks;
> -	volatile uint32_t n_trunk_valid; /* Trunks allocated. */
> +	volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
>  	uint32_t n_trunk; /* Trunk pointer array size. */
>  	uint32_t ref_cnt;
>  	uint32_t len;
> @@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
>  			uint32_t free_list; /* Index to first free trunk. */
>  		};
>  		struct {
> -			struct mlx5_indexed_cache *gc;
> +			RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
>  			/* Global cache. */
>  			struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
>  			/* Local cache. */
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
> index f900384..98c39a5 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
> @@ -261,8 +261,8 @@
>  	uint32_t timeout = 0;
>  
>  	/* Check and wait all close tasks done. */
> -	while (__atomic_load_n(&priv->dev_close_progress,
> -		__ATOMIC_RELAXED) != 0 && timeout < 1000) {
> +	while (rte_atomic_load_explicit(&priv->dev_close_progress,
> +		rte_memory_order_relaxed) != 0 && timeout < 1000) {
>  		rte_delay_us_sleep(10000);
>  		timeout++;
>  	}
> @@ -294,8 +294,8 @@
>  			priv->last_c_thrd_idx = 0;
>  		else
>  			priv->last_c_thrd_idx++;
> -		__atomic_store_n(&priv->dev_close_progress,
> -			1, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&priv->dev_close_progress,
> +			1, rte_memory_order_relaxed);
>  		if (mlx5_vdpa_task_add(priv,
>  			priv->last_c_thrd_idx,
>  			MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
> @@ -319,8 +319,8 @@
>  	if (!priv->connected)
>  		mlx5_vdpa_dev_cache_clean(priv);
>  	priv->vid = 0;
> -	__atomic_store_n(&priv->dev_close_progress, 0,
> -		__ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&priv->dev_close_progress, 0,
> +		rte_memory_order_relaxed);
>  	priv->state = MLX5_VDPA_STATE_PROBED;
>  	DRV_LOG(INFO, "vDPA device %d was closed.", vid);
>  	return ret;
> @@ -664,7 +664,9 @@
>  static int
>  mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
>  {
> -	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
> +	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
> +	RTE_ATOMIC(uint32_t) err_cnt = 0;
> +	uint32_t task_num = 0;
>  	uint32_t max_queues, index, thrd_idx, data[1];
>  	struct mlx5_vdpa_virtq *virtq;
>  
> @@ -847,8 +849,8 @@
>  		if (conf_thread_mng.initializer_priv == priv)
>  			if (mlx5_vdpa_mult_threads_create())
>  				goto error;
> -		__atomic_fetch_add(&conf_thread_mng.refcnt, 1,
> -			__ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
> +			rte_memory_order_relaxed);
>  	}
>  	if (mlx5_vdpa_create_dev_resources(priv))
>  		goto error;
> @@ -937,8 +939,8 @@
>  	if (priv->vdev)
>  		rte_vdpa_unregister_device(priv->vdev);
>  	if (priv->use_c_thread)
> -		if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
> -			1, __ATOMIC_RELAXED) == 1)
> +		if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
> +			1, rte_memory_order_relaxed) == 1)
>  			mlx5_vdpa_mult_threads_destroy(true);
>  	rte_free(priv);
>  }
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
> index 7b37c98..0cc67ed 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa.h
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
> @@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
>  struct mlx5_vdpa_task {
>  	struct mlx5_vdpa_priv *priv;
>  	enum mlx5_vdpa_task_type type;
> -	uint32_t *remaining_cnt;
> -	uint32_t *err_cnt;
> +	RTE_ATOMIC(uint32_t) *remaining_cnt;
> +	RTE_ATOMIC(uint32_t) *err_cnt;
>  	uint32_t idx;
>  } __rte_packed __rte_aligned(4);
>  
> @@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
>  
>  struct mlx5_vdpa_conf_thread_mng {
>  	void *initializer_priv;
> -	uint32_t refcnt;
> +	RTE_ATOMIC(uint32_t) refcnt;
>  	uint32_t max_thrds;
>  	pthread_mutex_t cthrd_lock;
>  	struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
> @@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
>  	uint64_t features; /* Negotiated features. */
>  	uint16_t log_max_rqt_size;
>  	uint16_t last_c_thrd_idx;
> -	uint16_t dev_close_progress;
> +	RTE_ATOMIC(uint16_t) dev_close_progress;
>  	uint16_t num_mrs; /* Number of memory regions. */
>  	struct mlx5_vdpa_steer steer;
>  	struct mlx5dv_var *var;
> @@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
>  mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
>  		uint32_t thrd_idx,
>  		enum mlx5_vdpa_task_type task_type,
> -		uint32_t *remaining_cnt, uint32_t *err_cnt,
> +		RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
>  		void **task_data, uint32_t num);
>  int
>  mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
>  bool
> -mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
> -		uint32_t *err_cnt, uint32_t sleep_time);
> +mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
> +		RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
>  int
>  mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
>  void
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
> index 68ed841..84f611c 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
> @@ -48,7 +48,7 @@
>  mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
>  		uint32_t thrd_idx,
>  		enum mlx5_vdpa_task_type task_type,
> -		uint32_t *remaining_cnt, uint32_t *err_cnt,
> +		RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
>  		void **task_data, uint32_t num)
>  {
>  	struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
> @@ -70,8 +70,8 @@
>  		return -1;
>  	for (i = 0 ; i < num; i++)
>  		if (task[i].remaining_cnt)
> -			__atomic_fetch_add(task[i].remaining_cnt, 1,
> -				__ATOMIC_RELAXED);
> +			rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
> +				rte_memory_order_relaxed);
>  	/* wake up conf thread. */
>  	pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
>  	pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
> @@ -80,16 +80,16 @@
>  }
>  
>  bool
> -mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
> -		uint32_t *err_cnt, uint32_t sleep_time)
> +mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
> +		RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
>  {
>  	/* Check and wait all tasks done. */
> -	while (__atomic_load_n(remaining_cnt,
> -		__ATOMIC_RELAXED) != 0) {
> +	while (rte_atomic_load_explicit(remaining_cnt,
> +		rte_memory_order_relaxed) != 0) {
>  		rte_delay_us_sleep(sleep_time);
>  	}
> -	if (__atomic_load_n(err_cnt,
> -		__ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(err_cnt,
> +		rte_memory_order_relaxed)) {
>  		DRV_LOG(ERR, "Tasks done with error.");
>  		return true;
>  	}
> @@ -137,8 +137,8 @@
>  			if (ret) {
>  				DRV_LOG(ERR,
>  				"Failed to register mr %d.", task.idx);
> -				__atomic_fetch_add(task.err_cnt, 1,
> -				__ATOMIC_RELAXED);
> +				rte_atomic_fetch_add_explicit(task.err_cnt, 1,
> +				rte_memory_order_relaxed);
>  			}
>  			break;
>  		case MLX5_VDPA_TASK_SETUP_VIRTQ:
> @@ -149,8 +149,8 @@
>  			if (ret) {
>  				DRV_LOG(ERR,
>  					"Failed to setup virtq %d.", task.idx);
> -				__atomic_fetch_add(
> -					task.err_cnt, 1, __ATOMIC_RELAXED);
> +				rte_atomic_fetch_add_explicit(
> +					task.err_cnt, 1, rte_memory_order_relaxed);
>  			}
>  			virtq->enable = 1;
>  			pthread_mutex_unlock(&virtq->virtq_lock);
> @@ -164,9 +164,9 @@
>  				DRV_LOG(ERR,
>  				"Failed to stop virtq %d.",
>  				task.idx);
> -				__atomic_fetch_add(
> +				rte_atomic_fetch_add_explicit(
>  					task.err_cnt, 1,
> -					__ATOMIC_RELAXED);
> +					rte_memory_order_relaxed);
>  				pthread_mutex_unlock(&virtq->virtq_lock);
>  				break;
>  			}
> @@ -176,9 +176,9 @@
>  				DRV_LOG(ERR,
>  		"Failed to get negotiated features virtq %d.",
>  				task.idx);
> -				__atomic_fetch_add(
> +				rte_atomic_fetch_add_explicit(
>  					task.err_cnt, 1,
> -					__ATOMIC_RELAXED);
> +					rte_memory_order_relaxed);
>  				pthread_mutex_unlock(&virtq->virtq_lock);
>  				break;
>  			}
> @@ -200,9 +200,9 @@
>  			if (!priv->connected)
>  				mlx5_vdpa_dev_cache_clean(priv);
>  			priv->vid = 0;
> -			__atomic_store_n(
> +			rte_atomic_store_explicit(
>  				&priv->dev_close_progress, 0,
> -				__ATOMIC_RELAXED);
> +				rte_memory_order_relaxed);
>  			break;
>  		case MLX5_VDPA_TASK_PREPARE_VIRTQ:
>  			ret = mlx5_vdpa_virtq_single_resource_prepare(
> @@ -211,9 +211,9 @@
>  				DRV_LOG(ERR,
>  				"Failed to prepare virtq %d.",
>  				task.idx);
> -				__atomic_fetch_add(
> +				rte_atomic_fetch_add_explicit(
>  				task.err_cnt, 1,
> -				__ATOMIC_RELAXED);
> +				rte_memory_order_relaxed);
>  			}
>  			break;
>  		default:
> @@ -222,8 +222,8 @@
>  			break;
>  		}
>  		if (task.remaining_cnt)
> -			__atomic_fetch_sub(task.remaining_cnt,
> -			1, __ATOMIC_RELAXED);
> +			rte_atomic_fetch_sub_explicit(task.remaining_cnt,
> +			1, rte_memory_order_relaxed);
>  	}
>  	return 0;
>  }
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> index 0fa671f..a207734 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
> @@ -92,7 +92,9 @@
>  int
>  mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
>  {
> -	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
> +	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
> +	RTE_ATOMIC(uint32_t) err_cnt = 0;
> +	uint32_t task_num = 0;
>  	uint32_t i, thrd_idx, data[1];
>  	struct mlx5_vdpa_virtq *virtq;
>  	uint64_t features;
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
> index e333f0b..4dfe800 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
> @@ -279,7 +279,9 @@
>  	uint8_t mode = 0;
>  	int ret = -rte_errno;
>  	uint32_t i, thrd_idx, data[1];
> -	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
> +	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
> +	RTE_ATOMIC(uint32_t) err_cnt = 0;
> +	uint32_t task_num = 0;
>  	struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
>  			(priv->vid, &mode, &priv->vmem_info.size,
>  			&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
> diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> index 607e290..093cdd0 100644
> --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
> @@ -666,7 +666,9 @@
>  {
>  	int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
>  	uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
> -	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
> +	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
> +	RTE_ATOMIC(uint32_t) err_cnt = 0;
> +	uint32_t task_num = 0;
>  	uint32_t i, thrd_idx, data[1];
>  	struct mlx5_vdpa_virtq *virtq;
>  	struct rte_vhost_vring vq;
> -- 
> 1.8.3.1
  

Patch

diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 28a1f56..2fcd859 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@  struct mlx5_nl_port_info {
 	uint16_t state; /**< IB device port state (out). */
 };
 
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
 
 /* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE \
+	(rte_atomic_fetch_add_explicit(&atomic_sn, 1, rte_memory_order_relaxed) + 1)
 
 /**
  * Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@  enum mlx5_cqe_status {
 	/* Prevent speculative reading of other fields in CQE until
 	 * CQE is valid.
 	 */
-	rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+	rte_atomic_thread_fence(rte_memory_order_acquire);
 
 	if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
 		     op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 40ff915..72e36ed 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@  struct mlx5_range {
 /** Memory region for a mempool. */
 struct mlx5_mempool_mr {
 	struct mlx5_pmd_mr pmd_mr;
-	uint32_t refcnt; /**< Number of mempools sharing this MR. */
+	RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
 };
 
 /* Mempool registration. */
@@ -56,11 +56,11 @@  struct mlx5_mempool_reg {
 {
 	struct mlx5_mprq_buf *buf = opaque;
 
-	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+	if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
 		rte_mempool_put(buf->mp, buf);
-	} else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
-					       __ATOMIC_RELAXED) - 1 == 0)) {
-		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+	} else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+					       rte_memory_order_relaxed) - 1 == 0)) {
+		rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
 		rte_mempool_put(buf->mp, buf);
 	}
 }
@@ -1650,7 +1650,7 @@  struct mlx5_mempool_get_extmem_data {
 	unsigned int i;
 
 	for (i = 0; i < mpr->mrs_n; i++)
-		__atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
 }
 
 /**
@@ -1665,8 +1665,8 @@  struct mlx5_mempool_get_extmem_data {
 	bool ret = false;
 
 	for (i = 0; i < mpr->mrs_n; i++)
-		ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
-					  __ATOMIC_RELAXED) - 1 == 0;
+		ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+					  rte_memory_order_relaxed) - 1 == 0;
 	return ret;
 }
 
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index 8789d40..5bdf48a 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@  struct mlx5_mr_share_cache {
 /* Multi-Packet RQ buffer header. */
 struct mlx5_mprq_buf {
 	struct rte_mempool *mp;
-	uint16_t refcnt; /* Atomically accessed refcnt. */
+	RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
 	struct rte_mbuf_ext_shared_info shinfos[];
 	/*
 	 * Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@  struct mlx5_list *
 	while (entry != NULL) {
 		if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
 			if (reuse) {
-				ret = __atomic_fetch_add(&entry->ref_cnt, 1,
-							 __ATOMIC_RELAXED);
+				ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+							 rte_memory_order_relaxed);
 				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
 					l_const->name, (void *)entry,
 					entry->ref_cnt);
 			} else if (lcore_index < MLX5_LIST_GLOBAL) {
-				ret = __atomic_load_n(&entry->ref_cnt,
-						      __ATOMIC_RELAXED);
+				ret = rte_atomic_load_explicit(&entry->ref_cnt,
+						      rte_memory_order_relaxed);
 			}
 			if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
 				return entry;
@@ -151,13 +151,13 @@  struct mlx5_list_entry *
 {
 	struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
 	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
-	uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
-					       __ATOMIC_RELAXED);
+	uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+					       rte_memory_order_relaxed);
 
 	while (inv_cnt != 0 && entry != NULL) {
 		struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
 
-		if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+		if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
 			LIST_REMOVE(entry, next);
 			if (l_const->lcores_share)
 				l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@  struct mlx5_list_entry *
 		entry->lcore_idx = (uint32_t)lcore_index;
 		LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
 				 entry, next);
-		__atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
 		DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
 			l_const->name, lcore_index,
 			(void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@  struct mlx5_list_entry *
 	l_inconst->gen_cnt++;
 	rte_rwlock_write_unlock(&l_inconst->lock);
 	LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
-	__atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
 	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
 		(void *)entry, entry->ref_cnt);
 	return local_entry;
@@ -285,7 +285,7 @@  struct mlx5_list_entry *
 {
 	struct mlx5_list_entry *gentry = entry->gentry;
 
-	if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+	if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
 		return 1;
 	if (entry->lcore_idx == (uint32_t)lcore_idx) {
 		LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@  struct mlx5_list_entry *
 		else
 			l_const->cb_remove(l_const->ctx, entry);
 	} else {
-		__atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+				   1, rte_memory_order_relaxed);
 	}
 	if (!l_const->lcores_share) {
-		__atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
 			l_const->name, (void *)entry);
 		return 0;
 	}
-	if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+	if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
 		return 1;
 	rte_rwlock_write_lock(&l_inconst->lock);
 	if (likely(gentry->ref_cnt == 0)) {
 		LIST_REMOVE(gentry, next);
 		rte_rwlock_write_unlock(&l_inconst->lock);
 		l_const->cb_remove(l_const->ctx, gentry);
-		__atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
 			l_const->name, (void *)gentry);
 		return 0;
@@ -377,7 +377,7 @@  struct mlx5_list_entry *
 mlx5_list_get_entry_num(struct mlx5_list *list)
 {
 	MLX5_ASSERT(list);
-	return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+	return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
 }
 
 /********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index ae15119..cb4d104 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@ 
  */
 struct mlx5_list_entry {
 	LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
-	uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
+	RTE_ATOMIC(uint32_t) ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
 	uint32_t lcore_idx;
 	union {
 		struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@  struct mlx5_list_entry {
 
 struct mlx5_list_cache {
 	LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
-	uint32_t inv_cnt; /* Invalid entries counter. */
+	RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
 } __rte_cache_aligned;
 
 /**
@@ -111,7 +111,7 @@  struct mlx5_list_const {
 struct mlx5_list_inconst {
 	rte_rwlock_t lock; /* read/write lock. */
 	volatile uint32_t gen_cnt; /* List modification may update it. */
-	volatile uint32_t count; /* number of entries in list. */
+	volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
 	struct mlx5_list_cache *cache[MLX5_LIST_MAX];
 	/* Lcore cache, last index is the global cache. */
 };
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@  struct mlx5_sys_mem {
 	uint32_t init:1; /* Memory allocator initialized. */
 	uint32_t enable:1; /* System memory select. */
 	uint32_t reserve:30; /* Reserve. */
-	struct rte_memseg_list *last_msl;
+	RTE_ATOMIC(struct rte_memseg_list *) last_msl;
 	/* last allocated rte memory memseg list. */
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	uint64_t malloc_sys;
@@ -93,14 +93,14 @@  struct mlx5_sys_mem {
 	 * different with the cached msl.
 	 */
 	if (addr && !mlx5_mem_check_msl(addr,
-	    (struct rte_memseg_list *)__atomic_load_n
-	    (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
-		__atomic_store_n(&mlx5_sys_mem.last_msl,
+	    (struct rte_memseg_list *)rte_atomic_load_explicit
+	    (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+		rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
 			rte_mem_virt2memseg_list(addr),
-			__ATOMIC_RELAXED);
+			rte_memory_order_relaxed);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-		__atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
-				   __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+				   rte_memory_order_relaxed);
 #endif
 	}
 }
@@ -122,11 +122,11 @@  struct mlx5_sys_mem {
 	 * to check if the memory belongs to rte memory.
 	 */
 	if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
-	    __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+	    rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
 		if (!rte_mem_virt2memseg_list(addr))
 			return false;
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-		__atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
 #endif
 	}
 	return true;
@@ -185,8 +185,8 @@  struct mlx5_sys_mem {
 		mlx5_mem_update_msl(addr);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 		if (addr)
-			__atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
-					   __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
+					   rte_memory_order_relaxed);
 #endif
 		return addr;
 	}
@@ -199,8 +199,8 @@  struct mlx5_sys_mem {
 		addr = malloc(size);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	if (addr)
-		__atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
-				   __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+				   rte_memory_order_relaxed);
 #endif
 	return addr;
 }
@@ -233,8 +233,8 @@  struct mlx5_sys_mem {
 		mlx5_mem_update_msl(new_addr);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 		if (new_addr)
-			__atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
-					   __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+					   rte_memory_order_relaxed);
 #endif
 		return new_addr;
 	}
@@ -246,8 +246,8 @@  struct mlx5_sys_mem {
 	new_addr = realloc(addr, size);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 	if (new_addr)
-		__atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
-				   __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+				   rte_memory_order_relaxed);
 #endif
 	return new_addr;
 }
@@ -259,14 +259,14 @@  struct mlx5_sys_mem {
 		return;
 	if (!mlx5_mem_is_rte(addr)) {
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-		__atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
-				   __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+				   rte_memory_order_relaxed);
 #endif
 		mlx5_os_free(addr);
 	} else {
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-		__atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
-				   __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+				   rte_memory_order_relaxed);
 #endif
 		rte_free(addr);
 	}
@@ -280,14 +280,14 @@  struct mlx5_sys_mem {
 		" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
 		" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
 		" update:%"PRIi64"",
-		__atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
-		__atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
-		__atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
-		__atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
-		__atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
-		__atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
-		__atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
-		__atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+		rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
+		rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
+		rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
+		rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
+		rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
+		rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
+		rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
+		rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
 #endif
 }
 
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index dd5a0c5..d35cf82 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1867,9 +1867,9 @@  int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
 		return -ENOTSUP;
 	}
 	/* Check there is no concurrent mapping in other thread. */
-	if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
-					 base, false,
-					 __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+	if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
+					 base,
+					 rte_memory_order_relaxed, rte_memory_order_relaxed))
 		rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
 	return 0;
 }
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@ 
 		return 0;
 	}
 	/* Only need to check refcnt, 0 after "sh" is allocated. */
-	if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+	if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
 		MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
 		priv->lb_used = 1;
 		return 0;
@@ -1163,7 +1163,7 @@ 
 		claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
 		sh->self_lb.ibv_cq = NULL;
 	}
-	__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
 	return -rte_errno;
 #else
 	RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@ 
 
 	if (!priv->lb_used)
 		return;
-	MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
-	if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
+	MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
+	if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+	    rte_memory_order_relaxed) - 1)) {
 		if (sh->self_lb.qp) {
 			claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
 			sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 2cf21a1..03944e3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -854,8 +854,8 @@  static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		ct_pool = mng->pools[idx];
 		for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
 			ct = &ct_pool->actions[i];
-			val = __atomic_fetch_sub(&ct->refcnt, 1,
-						 __ATOMIC_RELAXED);
+			val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+						 rte_memory_order_relaxed);
 			MLX5_ASSERT(val == 1);
 			if (val > 1)
 				cnt++;
@@ -1081,7 +1081,8 @@  static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
 		return -ENOTSUP;
 	}
-	if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
+	if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+	    rte_memory_order_relaxed) + 1 > 1)
 		return 0;
 	priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
 			sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1172,7 +1173,7 @@  static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
 
-	if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+	if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
 		return;
 	mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
 	mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index ee13ad6..d76e4b3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -370,7 +370,7 @@  struct mlx5_drop {
 struct mlx5_lb_ctx {
 	struct ibv_qp *qp; /* QP object. */
 	void *ibv_cq; /* Completion queue. */
-	uint16_t refcnt; /* Reference count for representors. */
+	RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
 };
 
 /* HW steering queue job descriptor type. */
@@ -473,10 +473,10 @@  enum mlx5_counter_type {
 
 /* Counter age parameter. */
 struct mlx5_age_param {
-	uint16_t state; /**< Age state (atomically accessed). */
+	RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
 	uint16_t port_id; /**< Port id of the counter. */
 	uint32_t timeout:24; /**< Aging timeout in seconds. */
-	uint32_t sec_since_last_hit;
+	RTE_ATOMIC(uint32_t) sec_since_last_hit;
 	/**< Time in seconds since last hit (atomically accessed). */
 	void *context; /**< Flow counter age context. */
 };
@@ -489,7 +489,7 @@  struct flow_counter_stats {
 /* Shared counters information for counters. */
 struct mlx5_flow_counter_shared {
 	union {
-		uint32_t refcnt; /* Only for shared action management. */
+		RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
 		uint32_t id; /* User counter ID for legacy sharing. */
 	};
 };
@@ -580,7 +580,7 @@  struct mlx5_counter_stats_raw {
 
 /* Counter global management structure. */
 struct mlx5_flow_counter_mng {
-	volatile uint16_t n_valid; /* Number of valid pools. */
+	volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
 	uint16_t last_pool_idx; /* Last used pool index */
 	int min_id; /* The minimum counter ID in the pools. */
 	int max_id; /* The maximum counter ID in the pools. */
@@ -646,7 +646,7 @@  struct mlx5_aso_sq {
 struct mlx5_aso_age_action {
 	LIST_ENTRY(mlx5_aso_age_action) next;
 	void *dr_action;
-	uint32_t refcnt;
+	RTE_ATOMIC(uint32_t) refcnt;
 	/* Following fields relevant only when action is active. */
 	uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
 	struct mlx5_age_param age_params;
@@ -680,7 +680,7 @@  struct mlx5_geneve_tlv_option_resource {
 	rte_be16_t option_class; /* geneve tlv opt class.*/
 	uint8_t option_type; /* geneve tlv opt type.*/
 	uint8_t length; /* geneve tlv opt length. */
-	uint32_t refcnt; /* geneve tlv object reference counter */
+	RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
 };
 
 
@@ -895,7 +895,7 @@  struct mlx5_flow_meter_policy {
 	uint16_t group;
 	/* The group. */
 	rte_spinlock_t sl;
-	uint32_t ref_cnt;
+	RTE_ATOMIC(uint32_t) ref_cnt;
 	/* Use count. */
 	struct rte_flow_pattern_template *hws_item_templ;
 	/* Hardware steering item templates. */
@@ -1030,7 +1030,7 @@  struct mlx5_flow_meter_profile {
 		struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
 		/**< srtcm_rfc2697 struct. */
 	};
-	uint32_t ref_cnt; /**< Use count. */
+	RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
 	uint32_t g_support:1; /**< If G color will be generated. */
 	uint32_t y_support:1; /**< If Y color will be generated. */
 	uint32_t initialized:1; /**< Initialized. */
@@ -1070,7 +1070,7 @@  struct mlx5_aso_mtr {
 	enum mlx5_aso_mtr_type type;
 	struct mlx5_flow_meter_info fm;
 	/**< Pointer to the next aso flow meter structure. */
-	uint8_t state; /**< ASO flow meter state. */
+	RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
 	uint32_t offset;
 	enum rte_color init_color;
 };
@@ -1116,7 +1116,7 @@  struct mlx5_flow_mtr_mng {
 	/* Default policy table. */
 	uint32_t def_policy_id;
 	/* Default policy id. */
-	uint32_t def_policy_ref_cnt;
+	RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
 	/** def_policy meter use count. */
 	struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
 	/* Meter drop table. */
@@ -1189,8 +1189,8 @@  struct mlx5_txpp_wq {
 
 /* Tx packet pacing internal timestamp. */
 struct mlx5_txpp_ts {
-	uint64_t ci_ts;
-	uint64_t ts;
+	RTE_ATOMIC(uint64_t) ci_ts;
+	RTE_ATOMIC(uint64_t) ts;
 };
 
 /* Tx packet pacing structure. */
@@ -1213,12 +1213,12 @@  struct mlx5_dev_txpp {
 	struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
 	uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
 	/* Statistics counters. */
-	uint64_t err_miss_int; /* Missed service interrupt. */
-	uint64_t err_rearm_queue; /* Rearm Queue errors. */
-	uint64_t err_clock_queue; /* Clock Queue errors. */
-	uint64_t err_ts_past; /* Timestamp in the past. */
-	uint64_t err_ts_future; /* Timestamp in the distant future. */
-	uint64_t err_ts_order; /* Timestamp not in ascending order. */
+	RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+	RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+	RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+	RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+	RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
+	RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
 };
 
 /* Sample ID information of eCPRI flex parser structure. */
@@ -1279,16 +1279,16 @@  struct mlx5_aso_ct_action {
 	void *dr_action_orig;
 	/* General action object for reply dir. */
 	void *dr_action_rply;
-	uint32_t refcnt; /* Action used count in device flows. */
+	RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
 	uint16_t offset; /* Offset of ASO CT in DevX objects bulk. */
 	uint16_t peer; /* The only peer port index could also use this CT. */
-	enum mlx5_aso_ct_state state; /* ASO CT state. */
+	RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
 	bool is_original; /* The direction of the DR action to be used. */
 };
 
 /* CT action object state update. */
 #define MLX5_ASO_CT_UPDATE_STATE(c, s) \
-	__atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+	rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
 
 #ifdef PEDANTIC
 #pragma GCC diagnostic ignored "-Wpedantic"
@@ -1362,7 +1362,7 @@  struct mlx5_flex_pattern_field {
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
-	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+	RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
 	enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
 	uint32_t mapnum; /* Number of pattern translation entries. */
 	struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1375,7 +1375,7 @@  struct mlx5_flex_item {
 #define MLX5_SRV6_SAMPLE_NUM 5
 /* Mlx5 internal flex parser profile structure. */
 struct mlx5_internal_flex_parser_profile {
-	uint32_t refcnt;
+	RTE_ATOMIC(uint32_t) refcnt;
 	struct mlx5_flex_item flex; /* Hold map info for modify field. */
 };
 
@@ -1474,9 +1474,9 @@  struct mlx5_dev_ctx_shared {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
 #endif
-	struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
-	struct mlx5_hlist *modify_cmds;
-	struct mlx5_hlist *tag_table;
+	RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
+	RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+	RTE_ATOMIC(struct mlx5_hlist *) tag_table;
 	struct mlx5_list *port_id_action_list; /* Port ID action list. */
 	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
 	struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1487,7 +1487,7 @@  struct mlx5_dev_ctx_shared {
 	/* SW steering counters management structure. */
 	void *default_miss_action; /* Default miss action. */
 	struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
-	struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+	RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
 	/* Shared interrupt handler section. */
 	struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
 	struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1530,7 +1530,7 @@  struct mlx5_dev_ctx_shared {
  * Caution, secondary process may rebuild the struct during port start.
  */
 struct mlx5_proc_priv {
-	void *hca_bar;
+	RTE_ATOMIC(void *) hca_bar;
 	/* Mapped HCA PCI BAR area. */
 	size_t uar_table_sz;
 	/* Size of UAR register table. */
@@ -1595,7 +1595,7 @@  struct mlx5_rxq_obj {
 /* Indirection table. */
 struct mlx5_ind_table_obj {
 	LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
-	uint32_t refcnt; /* Reference counter. */
+	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
 	union {
 		void *ind_table; /**< Indirection table. */
 		struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1746,7 +1746,7 @@  enum mlx5_quota_state {
 };
 
 struct mlx5_quota {
-	uint8_t state; /* object state */
+	RTE_ATOMIC(uint8_t) state; /* object state */
 	uint8_t mode;  /* metering mode */
 	/**
 	 * Keep track of application update types.
@@ -1877,7 +1877,7 @@  struct mlx5_priv {
 	uint32_t flex_item_map; /* Map of allocated flex item elements. */
 	uint32_t nb_queue; /* HW steering queue number. */
 	struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
-	uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+	RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
 	struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
 	struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -1917,7 +1917,7 @@  struct mlx5_priv {
 	/**< HW steering templates used to create control flow rules. */
 #endif
 	struct rte_eth_dev *shared_host; /* Host device for HW steering. */
-	uint16_t shared_refcnt; /* HW steering host reference counter. */
+	RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 85e8c77..08b595a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4716,8 +4716,8 @@  struct mlx5_translated_action_handle {
 			shared_rss = mlx5_ipool_get
 				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
 									   idx);
-			__atomic_fetch_add(&shared_rss->refcnt, 1,
-					   __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+					   rte_memory_order_relaxed);
 			return idx;
 		default:
 			break;
@@ -7533,7 +7533,7 @@  struct mlx5_list_entry *
 	if (tunnel) {
 		flow->tunnel = 1;
 		flow->tunnel_id = tunnel->tunnel_id;
-		__atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
 		mlx5_free(default_miss_ctx.queue);
 	}
 	mlx5_flow_pop_thread_workspace();
@@ -7544,10 +7544,10 @@  struct mlx5_list_entry *
 	flow_mreg_del_copy_action(dev, flow);
 	flow_drv_destroy(dev, flow);
 	if (rss_desc->shared_rss)
-		__atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+		rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
 			mlx5_ipool_get
 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
-			rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+			rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
 	mlx5_ipool_free(priv->flows[type], idx);
 	rte_errno = ret; /* Restore rte_errno. */
 	ret = rte_errno;
@@ -8050,7 +8050,8 @@  struct rte_flow *
 
 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
 		RTE_VERIFY(tunnel);
-		if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+		if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+		    rte_memory_order_relaxed) - 1))
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
 	flow_mreg_del_copy_action(dev, flow);
@@ -9948,7 +9949,7 @@  struct mlx5_flow_workspace*
 {
 	uint32_t pools_n, us;
 
-	pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+	pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
 	us = MLX5_POOL_QUERY_FREQ_US / pools_n;
 	DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
 	if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -10050,17 +10051,17 @@  struct mlx5_flow_workspace*
 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
 		cnt = MLX5_POOL_GET_CNT(pool, i);
 		age_param = MLX5_CNT_TO_AGE(cnt);
-		if (__atomic_load_n(&age_param->state,
-				    __ATOMIC_RELAXED) != AGE_CANDIDATE)
+		if (rte_atomic_load_explicit(&age_param->state,
+				    rte_memory_order_relaxed) != AGE_CANDIDATE)
 			continue;
 		if (cur->data[i].hits != prev->data[i].hits) {
-			__atomic_store_n(&age_param->sec_since_last_hit, 0,
-					 __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+					 rte_memory_order_relaxed);
 			continue;
 		}
-		if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+		if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
 				       time_delta,
-				       __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
+				       rte_memory_order_relaxed) + time_delta <= age_param->timeout)
 			continue;
 		/**
 		 * Hold the lock first, or if between the
@@ -10071,10 +10072,10 @@  struct mlx5_flow_workspace*
 		priv = rte_eth_devices[age_param->port_id].data->dev_private;
 		age_info = GET_PORT_AGE_INFO(priv);
 		rte_spinlock_lock(&age_info->aged_sl);
-		if (__atomic_compare_exchange_n(&age_param->state, &expected,
-						AGE_TMOUT, false,
-						__ATOMIC_RELAXED,
-						__ATOMIC_RELAXED)) {
+		if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+						AGE_TMOUT,
+						rte_memory_order_relaxed,
+						rte_memory_order_relaxed)) {
 			TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
 			MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
 		}
@@ -11904,7 +11905,7 @@  struct tunnel_db_element_release_ctx {
 {
 	struct tunnel_db_element_release_ctx *ctx = x;
 	ctx->ret = 0;
-	if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+	if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
 		mlx5_flow_tunnel_free(dev, tunnel);
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 094be12..5541244 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1018,7 +1018,7 @@  struct mlx5_flow_tunnel {
 	LIST_ENTRY(mlx5_flow_tunnel) chain;
 	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
 	uint32_t tunnel_id;			/** unique tunnel ID */
-	uint32_t refctn;
+	RTE_ATOMIC(uint32_t) refctn;
 	struct rte_flow_action action;
 	struct rte_flow_item item;
 	struct mlx5_hlist *groups;		/** tunnel groups */
@@ -1338,7 +1338,7 @@  struct rte_flow_pattern_template {
 	struct mlx5dr_match_template *mt; /* mlx5 match template. */
 	uint64_t item_flags; /* Item layer flags. */
 	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
-	uint32_t refcnt;  /* Reference counter. */
+	RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
 	/*
 	 * If true, then rule pattern should be prepended with
 	 * represented_port pattern item.
@@ -1368,7 +1368,7 @@  struct rte_flow_actions_template {
 	uint16_t reformat_off; /* Offset of DR reformat action. */
 	uint16_t mhdr_off; /* Offset of DR modify header action. */
 	uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
-	uint32_t refcnt; /* Reference counter. */
+	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
 	uint8_t flex_item; /* flex item index. */
 };
 
@@ -1388,7 +1388,7 @@  struct mlx5_hw_encap_decap_action {
 	/* Is header_reformat action shared across flows in table. */
 	uint32_t shared:1;
 	uint32_t multi_pattern:1;
-	volatile uint32_t *multi_pattern_refcnt;
+	volatile RTE_ATOMIC(uint32_t) *multi_pattern_refcnt;
 	size_t data_size; /* Action metadata size. */
 	uint8_t data[]; /* Action data. */
 };
@@ -1411,7 +1411,7 @@  struct mlx5_hw_modify_header_action {
 	/* Is MODIFY_HEADER action shared across flows in table. */
 	uint32_t shared:1;
 	uint32_t multi_pattern:1;
-	volatile uint32_t *multi_pattern_refcnt;
+	volatile RTE_ATOMIC(uint32_t) *multi_pattern_refcnt;
 	/* Amount of modification commands stored in the precompiled buffer. */
 	uint32_t mhdr_cmds_num;
 	/* Precompiled modification commands. */
@@ -1567,7 +1567,7 @@  struct rte_flow_template_table {
 /* Shared RSS action structure */
 struct mlx5_shared_action_rss {
 	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
-	uint32_t refcnt; /**< Atomically accessed refcnt. */
+	RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
 	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index 3c08da0..d1f014e 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@ 
 			uint8_t *u8addr;
 			uint8_t hit;
 
-			if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+			if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
 					    AGE_CANDIDATE)
 				continue;
 			byte = 63 - (j / 8);
@@ -627,13 +627,13 @@ 
 			u8addr = (uint8_t *)addr;
 			hit = (u8addr[byte] >> offset) & 0x1;
 			if (hit) {
-				__atomic_store_n(&ap->sec_since_last_hit, 0,
-						 __ATOMIC_RELAXED);
+				rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+						 rte_memory_order_relaxed);
 			} else {
 				struct mlx5_priv *priv;
 
-				__atomic_fetch_add(&ap->sec_since_last_hit,
-						   diff, __ATOMIC_RELAXED);
+				rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+						   diff, rte_memory_order_relaxed);
 				/* If timeout passed add to aged-out list. */
 				if (ap->sec_since_last_hit <= ap->timeout)
 					continue;
@@ -641,12 +641,11 @@ 
 				rte_eth_devices[ap->port_id].data->dev_private;
 				age_info = GET_PORT_AGE_INFO(priv);
 				rte_spinlock_lock(&age_info->aged_sl);
-				if (__atomic_compare_exchange_n(&ap->state,
+				if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
 								&expected,
 								AGE_TMOUT,
-								false,
-							       __ATOMIC_RELAXED,
-							    __ATOMIC_RELAXED)) {
+							       rte_memory_order_relaxed,
+							    rte_memory_order_relaxed)) {
 					LIST_INSERT_HEAD(&age_info->aged_aso,
 							 act, next);
 					MLX5_AGE_SET(age_info,
@@ -909,9 +908,9 @@ 
 	for (i = 0; i < aso_mtrs_nums; ++i) {
 		aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
 		MLX5_ASSERT(aso_mtr);
-		(void)__atomic_compare_exchange_n(&aso_mtr->state,
+		(void)rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
 				&exp_state, ASO_METER_READY,
-				false, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+				rte_memory_order_relaxed, rte_memory_order_relaxed);
 	}
 }
 
@@ -1056,12 +1055,12 @@ 
 		sq = &sh->mtrmng->pools_mng.sq;
 		need_lock = true;
 	}
-	state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+	state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
 	if (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC)
 		return 0;
 	do {
 		mlx5_aso_mtr_completion_handle(sq, need_lock);
-		if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+		if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
 					    ASO_METER_READY)
 			return 0;
 		/* Waiting for CQE ready. */
@@ -1360,7 +1359,7 @@ 
 	uint16_t wqe_idx;
 	struct mlx5_aso_ct_pool *pool;
 	enum mlx5_aso_ct_state state =
-				__atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
 
 	if (state == ASO_CONNTRACK_FREE) {
 		DRV_LOG(ERR, "Fail: No context to query");
@@ -1569,12 +1568,12 @@ 
 		sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
 	else
 		sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
-	if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
 	    ASO_CONNTRACK_READY)
 		return 0;
 	do {
 		mlx5_aso_ct_completion_handle(sh, sq, need_lock);
-		if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+		if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
 		    ASO_CONNTRACK_READY)
 			return 0;
 		/* Waiting for CQE ready, consider should block or sleep. */
@@ -1740,7 +1739,7 @@ 
 	bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
 	uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
 	enum mlx5_aso_ct_state state =
-				__atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
 
 	if (sh->config.dv_flow_en == 2)
 		sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1756,7 +1755,7 @@ 
 	}
 	do {
 		mlx5_aso_ct_completion_handle(sh, sq, need_lock);
-		state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+		state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
 		if (state == ASO_CONNTRACK_READY ||
 		    state == ASO_CONNTRACK_QUERY)
 			return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 154e509..ca45cd8 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -285,7 +285,7 @@  struct field_modify_info modify_tcp[] = {
 }
 
 static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
 		     const char *name, uint32_t size, bool direct_key,
 		     bool lcores_share, void *ctx,
 		     mlx5_list_create_cb cb_create,
@@ -299,7 +299,7 @@  struct field_modify_info modify_tcp[] = {
 	struct mlx5_hlist *expected = NULL;
 	char s[MLX5_NAME_SIZE];
 
-	hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+	hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
 	if (likely(hl))
 		return hl;
 	snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -313,11 +313,11 @@  struct field_modify_info modify_tcp[] = {
 				   "cannot allocate resource memory");
 		return NULL;
 	}
-	if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
-					 __ATOMIC_SEQ_CST,
-					 __ATOMIC_SEQ_CST)) {
+	if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+					 rte_memory_order_seq_cst,
+					 rte_memory_order_seq_cst)) {
 		mlx5_hlist_destroy(hl);
-		hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+		hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
 	}
 	return hl;
 }
@@ -5882,8 +5882,8 @@  struct mlx5_list_entry *
 static struct mlx5_indexed_pool *
 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
 {
-	struct mlx5_indexed_pool *ipool = __atomic_load_n
-				     (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+	struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+				     (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
 
 	if (!ipool) {
 		struct mlx5_indexed_pool *expected = NULL;
@@ -5908,13 +5908,13 @@  struct mlx5_list_entry *
 		ipool = mlx5_ipool_create(&cfg);
 		if (!ipool)
 			return NULL;
-		if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
-						 &expected, ipool, false,
-						 __ATOMIC_SEQ_CST,
-						 __ATOMIC_SEQ_CST)) {
+		if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+						 &expected, ipool,
+						 rte_memory_order_seq_cst,
+						 rte_memory_order_seq_cst)) {
 			mlx5_ipool_destroy(ipool);
-			ipool = __atomic_load_n(&sh->mdh_ipools[index],
-						__ATOMIC_SEQ_CST);
+			ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+						rte_memory_order_seq_cst);
 		}
 	}
 	return ipool;
@@ -6735,9 +6735,9 @@  struct mlx5_list_entry *
 
 	age_info = GET_PORT_AGE_INFO(priv);
 	age_param = flow_dv_counter_idx_get_age(dev, counter);
-	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
-					 AGE_FREE, false, __ATOMIC_RELAXED,
-					 __ATOMIC_RELAXED)) {
+	if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+					 AGE_FREE, rte_memory_order_relaxed,
+					 rte_memory_order_relaxed)) {
 		/**
 		 * We need the lock even it is age timeout,
 		 * since counter may still in process.
@@ -6745,7 +6745,7 @@  struct mlx5_list_entry *
 		rte_spinlock_lock(&age_info->aged_sl);
 		TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
 		rte_spinlock_unlock(&age_info->aged_sl);
-		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
 	}
 }
 
@@ -6781,8 +6781,8 @@  struct mlx5_list_entry *
 		 * indirect action API, shared info is 1 before the reduction,
 		 * so this condition is failed and function doesn't return here.
 		 */
-		if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
-				       __ATOMIC_RELAXED) - 1)
+		if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+				       rte_memory_order_relaxed) - 1)
 			return;
 	}
 	cnt->pool = pool;
@@ -9915,8 +9915,8 @@  struct mlx5_list_entry *
 			 * Increasing refcnt only in SWS. HWS uses it as global.
 			 */
 			if (priv->sh->config.dv_flow_en == 1)
-				__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
-						   __ATOMIC_RELAXED);
+				rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+						   rte_memory_order_relaxed);
 		} else {
 			ret = rte_flow_error_set(error, ENOMEM,
 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -9951,8 +9951,8 @@  struct mlx5_list_entry *
 		geneve_opt_resource->option_class = geneve_opt_v->option_class;
 		geneve_opt_resource->option_type = geneve_opt_v->option_type;
 		geneve_opt_resource->length = geneve_opt_v->option_len;
-		__atomic_store_n(&geneve_opt_resource->refcnt, 1,
-				__ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+				rte_memory_order_relaxed);
 	}
 exit:
 	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -11919,8 +11919,8 @@  struct mlx5_list_entry *
 		(void *)(uintptr_t)(dev_flow->flow_idx);
 	age_param->timeout = age->timeout;
 	age_param->port_id = dev->data->port_id;
-	__atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
 	return counter;
 }
 
@@ -12968,9 +12968,9 @@  struct mlx5_list_entry *
 	uint16_t expected = AGE_CANDIDATE;
 
 	age_info = GET_PORT_AGE_INFO(priv);
-	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
-					 AGE_FREE, false, __ATOMIC_RELAXED,
-					 __ATOMIC_RELAXED)) {
+	if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+					 AGE_FREE, rte_memory_order_relaxed,
+					 rte_memory_order_relaxed)) {
 		/**
 		 * We need the lock even it is age timeout,
 		 * since age action may still in process.
@@ -12978,7 +12978,7 @@  struct mlx5_list_entry *
 		rte_spinlock_lock(&age_info->aged_sl);
 		LIST_REMOVE(age, next);
 		rte_spinlock_unlock(&age_info->aged_sl);
-		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
 	}
 }
 
@@ -13002,7 +13002,7 @@  struct mlx5_list_entry *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
 	struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
-	uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
+	uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
 
 	if (!ret) {
 		flow_dv_aso_age_remove_from_age(dev, age);
@@ -13178,7 +13178,7 @@  struct mlx5_list_entry *
 			return 0; /* 0 is an error. */
 		}
 	}
-	__atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
 	return pool->index | ((age_free->offset + 1) << 16);
 }
 
@@ -13208,10 +13208,10 @@  struct mlx5_list_entry *
 	aso_age->age_params.context = context;
 	aso_age->age_params.timeout = timeout;
 	aso_age->age_params.port_id = dev->data->port_id;
-	__atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
-			 __ATOMIC_RELAXED);
-	__atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
-			 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+			 rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+			 rte_memory_order_relaxed);
 }
 
 static void
@@ -13393,12 +13393,12 @@  struct mlx5_list_entry *
 	uint32_t ret;
 	struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
 	enum mlx5_aso_ct_state state =
-			__atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+			rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
 
 	/* Cannot release when CT is in the ASO SQ. */
 	if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
 		return -1;
-	ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+	ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
 	if (!ret) {
 		if (ct->dr_action_orig) {
 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13588,7 +13588,7 @@  struct mlx5_list_entry *
 	pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
 	ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
 	/* 0: inactive, 1: created, 2+: used by flows. */
-	__atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
 	reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
 	if (!ct->dr_action_orig) {
 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14577,8 +14577,8 @@  struct mlx5_list_entry *
 			age_act = flow_aso_age_get_by_idx(dev, owner_idx);
 			if (flow->age == 0) {
 				flow->age = owner_idx;
-				__atomic_fetch_add(&age_act->refcnt, 1,
-						   __ATOMIC_RELAXED);
+				rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
+						   rte_memory_order_relaxed);
 			}
 			age_act_pos = actions_n++;
 			action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14615,9 +14615,9 @@  struct mlx5_list_entry *
 			} else {
 				if (flow->counter == 0) {
 					flow->counter = owner_idx;
-					__atomic_fetch_add
+					rte_atomic_fetch_add_explicit
 						(&cnt_act->shared_info.refcnt,
-						 1, __ATOMIC_RELAXED);
+						 1, rte_memory_order_relaxed);
 				}
 				/* Save information first, will apply later. */
 				action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -14945,8 +14945,8 @@  struct mlx5_list_entry *
 				flow->indirect_type =
 						MLX5_INDIRECT_ACTION_TYPE_CT;
 				flow->ct = owner_idx;
-				__atomic_fetch_add(&ct->refcnt, 1,
-						   __ATOMIC_RELAXED);
+				rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+						   rte_memory_order_relaxed);
 			}
 			actions_n++;
 			action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15615,7 +15615,7 @@  struct mlx5_list_entry *
 
 	shared_rss = mlx5_ipool_get
 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
-	__atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
 }
 
 void
@@ -15798,8 +15798,8 @@  struct mlx5_list_entry *
 				sh->geneve_tlv_option_resource;
 	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
 	if (geneve_opt_resource) {
-		if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
-					 __ATOMIC_RELAXED) - 1)) {
+		if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+					 rte_memory_order_relaxed) - 1)) {
 			claim_zero(mlx5_devx_cmd_destroy
 					(geneve_opt_resource->obj));
 			mlx5_free(sh->geneve_tlv_option_resource);
@@ -16208,7 +16208,7 @@  struct mlx5_list_entry *
 	/* Update queue with indirect table queue memoyr. */
 	origin->queue = shared_rss->ind_tbl->queues;
 	rte_spinlock_init(&shared_rss->action_rss_sl);
-	__atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
 	rte_spinlock_lock(&priv->shared_act_sl);
 	ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
 		     &priv->rss_shared_actions, idx, shared_rss, next);
@@ -16254,9 +16254,9 @@  struct mlx5_list_entry *
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "invalid shared action");
-	if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
-					 0, 0, __ATOMIC_ACQUIRE,
-					 __ATOMIC_RELAXED))
+	if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
+					 0, rte_memory_order_acquire,
+					 rte_memory_order_relaxed))
 		return rte_flow_error_set(error, EBUSY,
 					  RTE_FLOW_ERROR_TYPE_ACTION,
 					  NULL,
@@ -16390,10 +16390,10 @@  struct rte_flow_action_handle *
 		return __flow_dv_action_rss_release(dev, idx, error);
 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
 		cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
-		if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
-						 &no_flow_refcnt, 1, false,
-						 __ATOMIC_ACQUIRE,
-						 __ATOMIC_RELAXED))
+		if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+						 &no_flow_refcnt, 1,
+						 rte_memory_order_acquire,
+						 rte_memory_order_relaxed))
 			return rte_flow_error_set(error, EBUSY,
 						  RTE_FLOW_ERROR_TYPE_ACTION,
 						  NULL,
@@ -17353,13 +17353,13 @@  struct rte_flow_action_handle *
 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
 		age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
 		resp = data;
-		resp->aged = __atomic_load_n(&age_param->state,
-					      __ATOMIC_RELAXED) == AGE_TMOUT ?
+		resp->aged = rte_atomic_load_explicit(&age_param->state,
+					      rte_memory_order_relaxed) == AGE_TMOUT ?
 									  1 : 0;
 		resp->sec_since_last_hit_valid = !resp->aged;
 		if (resp->sec_since_last_hit_valid)
-			resp->sec_since_last_hit = __atomic_load_n
-			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+			resp->sec_since_last_hit = rte_atomic_load_explicit
+			     (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
 		return 0;
 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
 		return flow_dv_query_count(dev, idx, data, error);
@@ -17436,12 +17436,12 @@  struct rte_flow_action_handle *
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "age data not available");
 	}
-	resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+	resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
 				     AGE_TMOUT ? 1 : 0;
 	resp->sec_since_last_hit_valid = !resp->aged;
 	if (resp->sec_since_last_hit_valid)
-		resp->sec_since_last_hit = __atomic_load_n
-			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+		resp->sec_since_last_hit = rte_atomic_load_explicit
+			     (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
 	return 0;
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@ 
 			MLX5_ASSERT(!item->refcnt);
 			MLX5_ASSERT(!item->devx_fp);
 			item->devx_fp = NULL;
-			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+			rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
 			priv->flex_item_map |= 1u << idx;
 		}
 	}
@@ -107,7 +107,7 @@ 
 		MLX5_ASSERT(!item->refcnt);
 		MLX5_ASSERT(!item->devx_fp);
 		item->devx_fp = NULL;
-		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+		rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
 		priv->flex_item_map &= ~(1u << idx);
 		rte_spinlock_unlock(&priv->flex_item_sl);
 	}
@@ -379,7 +379,7 @@ 
 		return ret;
 	}
 	if (acquire)
-		__atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+		rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
 	return ret;
 }
 
@@ -414,7 +414,7 @@ 
 		rte_errno = -EINVAL;
 		return -EINVAL;
 	}
-	__atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
 	return 0;
 }
 
@@ -1337,7 +1337,7 @@  struct rte_flow_item_flex_handle *
 	}
 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
 	/* Mark initialized flex item valid. */
-	__atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
 	return (struct rte_flow_item_flex_handle *)flex;
 
 error:
@@ -1378,8 +1378,8 @@  struct rte_flow_item_flex_handle *
 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
 					  "invalid flex item handle value");
 	}
-	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
-					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+	if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
+					 rte_memory_order_acquire, rte_memory_order_relaxed)) {
 		rte_spinlock_unlock(&priv->flex_item_sl);
 		return rte_flow_error_set(error, EBUSY,
 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7376030..5dd2cdc 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -555,8 +555,8 @@  static int flow_hw_translate_group(struct rte_eth_dev *dev,
 flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
 {
 	if (encap_decap->multi_pattern) {
-		uint32_t refcnt = __atomic_sub_fetch(encap_decap->multi_pattern_refcnt,
-						     1, __ATOMIC_RELAXED);
+		uint32_t refcnt = rte_atomic_fetch_sub_explicit(encap_decap->multi_pattern_refcnt,
+						     1, rte_memory_order_relaxed) - 1;
 		if (refcnt)
 			return;
 		mlx5_free((void *)(uintptr_t)encap_decap->multi_pattern_refcnt);
@@ -569,8 +569,8 @@  static int flow_hw_translate_group(struct rte_eth_dev *dev,
 flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
 {
 	if (mhdr->multi_pattern) {
-		uint32_t refcnt = __atomic_sub_fetch(mhdr->multi_pattern_refcnt,
-						     1, __ATOMIC_RELAXED);
+		uint32_t refcnt = rte_atomic_fetch_sub_explicit(mhdr->multi_pattern_refcnt,
+						     1, rte_memory_order_relaxed) - 1;
 		if (refcnt)
 			return;
 		mlx5_free((void *)(uintptr_t)mhdr->multi_pattern_refcnt);
@@ -604,7 +604,8 @@  static int flow_hw_translate_group(struct rte_eth_dev *dev,
 	}
 
 	if (acts->mark)
-		if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
+		if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+		    rte_memory_order_relaxed) - 1))
 			flow_hw_rxq_flag_set(dev, false);
 
 	if (acts->jump) {
@@ -2168,7 +2169,8 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 				goto err;
 			acts->rule_acts[dr_pos].action =
 				priv->hw_tag[!!attr->group];
-			__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+			    rte_memory_order_relaxed);
 			flow_hw_rxq_flag_set(dev, true);
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4065,7 +4067,7 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 
 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
 		uint32_t j;
-		uint32_t *reformat_refcnt;
+		RTE_ATOMIC(uint32_t) *reformat_refcnt;
 		typeof(mpat->reformat[0]) *reformat = mpat->reformat + i;
 		struct mlx5dr_action_reformat_header hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
 		enum mlx5dr_action_type reformat_type =
@@ -4102,7 +4104,7 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 	if (mpat->mh.elements_num) {
 		typeof(mpat->mh) *mh = &mpat->mh;
 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
-		uint32_t *mh_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t),
+		RTE_ATOMIC(uint32_t) *mh_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t),
 						 0, rte_socket_id());
 
 		if (!mh_refcnt)
@@ -4146,8 +4148,8 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 	struct mlx5_tbl_multi_pattern_ctx mpat = MLX5_EMPTY_MULTI_PATTERN_CTX;
 
 	for (i = 0; i < nb_action_templates; i++) {
-		uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
-						     __ATOMIC_RELAXED);
+		uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+						     rte_memory_order_relaxed) + 1;
 
 		if (refcnt <= 1) {
 			rte_flow_error_set(error, EINVAL,
@@ -4179,8 +4181,8 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 at_error:
 	while (i--) {
 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
-		__atomic_sub_fetch(&action_templates[i]->refcnt,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+				   1, rte_memory_order_relaxed);
 	}
 	return rte_errno;
 }
@@ -4326,8 +4328,8 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 			rte_errno = EINVAL;
 			goto it_error;
 		}
-		ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
-					 __ATOMIC_RELAXED) + 1;
+		ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
+					 rte_memory_order_relaxed) + 1;
 		if (ret <= 1) {
 			rte_errno = EINVAL;
 			goto it_error;
@@ -4358,14 +4360,14 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 at_error:
 	for (i = 0; i < nb_action_templates; i++) {
 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
-		__atomic_fetch_sub(&action_templates[i]->refcnt,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+				   1, rte_memory_order_relaxed);
 	}
 	i = nb_item_templates;
 it_error:
 	while (i--)
-		__atomic_fetch_sub(&item_templates[i]->refcnt,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+				   1, rte_memory_order_relaxed);
 error:
 	err = rte_errno;
 	if (tbl) {
@@ -4567,12 +4569,12 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 	}
 	LIST_REMOVE(table, next);
 	for (i = 0; i < table->nb_item_templates; i++)
-		__atomic_fetch_sub(&table->its[i]->refcnt,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+				   1, rte_memory_order_relaxed);
 	for (i = 0; i < table->nb_action_templates; i++) {
 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
-		__atomic_fetch_sub(&table->ats[i].action_template->refcnt,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+				   1, rte_memory_order_relaxed);
 	}
 	mlx5dr_matcher_destroy(table->matcher);
 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
@@ -6445,7 +6447,7 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 	if (!at->tmpl)
 		goto error;
 	at->action_flags = action_flags;
-	__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
 	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
 	return at;
 error:
@@ -6481,7 +6483,7 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 	uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
 			MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
 
-	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
 		DRV_LOG(WARNING, "Action template %p is still in use.",
 			(void *)template);
 		return rte_flow_error_set(error, EBUSY,
@@ -6876,7 +6878,7 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 			}
 		}
 	}
-	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
 }
@@ -6899,7 +6901,7 @@  static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
 			      struct rte_flow_pattern_template *template,
 			      struct rte_flow_error *error __rte_unused)
 {
-	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
 		DRV_LOG(WARNING, "Item template %p is still in use.",
 			(void *)template);
 		return rte_flow_error_set(error, EBUSY,
@@ -9179,7 +9181,8 @@  struct mlx5_list_entry *
 		}
 		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
 		priv->shared_host = host_dev;
-		__atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+		    rte_memory_order_relaxed);
 	}
 	dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
 	/* rte_errno has been updated by HWS layer. */
@@ -9340,7 +9343,8 @@  struct mlx5_list_entry *
 	if (_queue_attr)
 		mlx5_free(_queue_attr);
 	if (priv->shared_host) {
-		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+		    rte_memory_order_relaxed);
 		priv->shared_host = NULL;
 	}
 	mlx5_free(priv->hw_attr);
@@ -9434,7 +9438,8 @@  struct mlx5_list_entry *
 	claim_zero(mlx5dr_context_close(priv->dr_ctx));
 	if (priv->shared_host) {
 		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
-		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+		    rte_memory_order_relaxed);
 		priv->shared_host = NULL;
 	}
 	priv->dr_ctx = NULL;
@@ -9491,8 +9496,8 @@  struct mlx5_list_entry *
 				NULL,
 				"Invalid CT destruction index");
 	}
-	__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
-				 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+				 rte_memory_order_relaxed);
 	mlx5_ipool_free(pool->cts, ct_idx);
 	return 0;
 }
@@ -10185,7 +10190,7 @@  struct mlx5_list_entry *
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "age data not available");
-	switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
+	switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
 	case HWS_AGE_AGED_OUT_REPORTED:
 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
 		resp->aged = 1;
@@ -10205,8 +10210,8 @@  struct mlx5_list_entry *
 	}
 	resp->sec_since_last_hit_valid = !resp->aged;
 	if (resp->sec_since_last_hit_valid)
-		resp->sec_since_last_hit = __atomic_load_n
-				 (&param->sec_since_last_hit, __ATOMIC_RELAXED);
+		resp->sec_since_last_hit = rte_atomic_load_explicit
+				 (&param->sec_since_last_hit, rte_memory_order_relaxed);
 	return 0;
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 7cbf772..9f345dd 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -1766,9 +1766,9 @@  struct mlx5_flow_meter_policy *
 			NULL, "Meter profile id not valid.");
 	/* Meter policy must exist. */
 	if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
-		__atomic_fetch_add
+		rte_atomic_fetch_add_explicit
 			(&priv->sh->mtrmng->def_policy_ref_cnt,
-			1, __ATOMIC_RELAXED);
+			1, rte_memory_order_relaxed);
 		domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
 		if (!priv->sh->config.dv_esw_en)
 			domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -1848,7 +1848,7 @@  struct mlx5_flow_meter_policy *
 	fm->is_enable = params->meter_enable;
 	fm->shared = !!shared;
 	fm->color_aware = !!params->use_prev_mtr_color;
-	__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
 	if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
 		fm->def_policy = 1;
 		fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -1877,7 +1877,7 @@  struct mlx5_flow_meter_policy *
 	}
 	fm->active_state = params->meter_enable;
 	if (mtr_policy)
-		__atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
 	return 0;
 error:
 	mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -1972,8 +1972,8 @@  struct mlx5_flow_meter_policy *
 			RTE_MTR_ERROR_TYPE_UNSPECIFIED,
 			NULL, "Failed to create devx meter.");
 	fm->active_state = params->meter_enable;
-	__atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
-	__atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
+	rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
 	return 0;
 }
 
@@ -1995,7 +1995,7 @@  struct mlx5_flow_meter_policy *
 	if (fmp == NULL)
 		return -1;
 	/* Update dependencies. */
-	__atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
 	fm->profile = NULL;
 	/* Remove from list. */
 	if (!priv->sh->meter_aso_en) {
@@ -2013,15 +2013,15 @@  struct mlx5_flow_meter_policy *
 	}
 	mlx5_flow_destroy_mtr_tbls(dev, fm);
 	if (fm->def_policy)
-		__atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
-				1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+				1, rte_memory_order_relaxed);
 	if (priv->sh->meter_aso_en) {
 		if (!fm->def_policy) {
 			mtr_policy = mlx5_flow_meter_policy_find(dev,
 						fm->policy_id, NULL);
 			if (mtr_policy)
-				__atomic_fetch_sub(&mtr_policy->ref_cnt,
-						1, __ATOMIC_RELAXED);
+				rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+						1, rte_memory_order_relaxed);
 			fm->policy_id = 0;
 		}
 		fm->def_policy = 0;
@@ -2124,13 +2124,13 @@  struct mlx5_flow_meter_policy *
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "Meter object is being used.");
 	/* Destroy the meter profile. */
-	__atomic_fetch_sub(&fm->profile->ref_cnt,
-						1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+						1, rte_memory_order_relaxed);
 	/* Destroy the meter policy. */
 	policy = mlx5_flow_meter_policy_find(dev,
 			fm->policy_id, NULL);
-	__atomic_fetch_sub(&policy->ref_cnt,
-						1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+						1, rte_memory_order_relaxed);
 	memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
 	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@  typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
 		struct mlx5_quota *quota_obj =
 			sq->elts[(sq->tail + i) & mask].quota_obj;
 
-		__atomic_compare_exchange_n(&quota_obj->state, &state,
-					    MLX5_QUOTA_STATE_READY, false,
-					    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+		rte_atomic_compare_exchange_strong_explicit(&quota_obj->state, &state,
+					    MLX5_QUOTA_STATE_READY,
+					    rte_memory_order_relaxed, rte_memory_order_relaxed);
 	}
 }
 
@@ -278,7 +278,7 @@  typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
 		rte_spinlock_lock(&sq->sqsl);
 		mlx5_quota_cmd_completion_handle(sq);
 		rte_spinlock_unlock(&sq->sqsl);
-		if (__atomic_load_n(&quota_obj->state, __ATOMIC_RELAXED) ==
+		if (rte_atomic_load_explicit(&quota_obj->state, rte_memory_order_relaxed) ==
 		    MLX5_QUOTA_STATE_READY)
 			return 0;
 	} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@  typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
 mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
 {
 	uint8_t state = MLX5_QUOTA_STATE_READY;
-	bool verdict = __atomic_compare_exchange_n
-		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
-		 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+	bool verdict = rte_atomic_compare_exchange_strong_explicit
+		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+		 rte_memory_order_relaxed, rte_memory_order_relaxed);
 
 	if (!verdict)
 		return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@  typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
 	ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
 				 async_job ? async_job : &sync_job, push, NULL);
 	if (ret) {
-		__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+				 rte_memory_order_relaxed);
 		return rte_flow_error_set(error, EAGAIN,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
 	}
@@ -557,8 +557,8 @@  typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
 				 async_job ? async_job : &sync_job, push,
 				 (void *)(uintptr_t)update->conf);
 	if (ret) {
-		__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+				 rte_memory_order_relaxed);
 		return rte_flow_error_set(error, EAGAIN,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
 	}
@@ -593,9 +593,9 @@  struct rte_flow_action_handle *
 				   NULL, "quota: failed to allocate quota object");
 		return NULL;
 	}
-	verdict = __atomic_compare_exchange_n
-		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
-		 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+	verdict = rte_atomic_compare_exchange_strong_explicit
+		(&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+		 rte_memory_order_relaxed, rte_memory_order_relaxed);
 	if (!verdict) {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
 				   NULL, "quota: new quota object has invalid state");
@@ -616,8 +616,8 @@  struct rte_flow_action_handle *
 				 (void *)(uintptr_t)conf);
 	if (ret) {
 		mlx5_ipool_free(qctx->quota_ipool, id);
-		__atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+				 rte_memory_order_relaxed);
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
 				   NULL, "quota: WR failure");
 		return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index f556a9f..4409ae7 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@ 
 		}
 		if (param->timeout == 0)
 			continue;
-		switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
+		switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
 		case HWS_AGE_AGED_OUT_NOT_REPORTED:
 		case HWS_AGE_AGED_OUT_REPORTED:
 			/* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@ 
 		hits = rte_be_to_cpu_64(stats[i].hits);
 		if (param->nb_cnts == 1) {
 			if (hits != param->accumulator_last_hits) {
-				__atomic_store_n(&param->sec_since_last_hit, 0,
-						 __ATOMIC_RELAXED);
+				rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
+						 rte_memory_order_relaxed);
 				param->accumulator_last_hits = hits;
 				continue;
 			}
@@ -184,8 +184,8 @@ 
 			param->accumulator_cnt = 0;
 			if (param->accumulator_last_hits !=
 						param->accumulator_hits) {
-				__atomic_store_n(&param->sec_since_last_hit,
-						 0, __ATOMIC_RELAXED);
+				rte_atomic_store_explicit(&param->sec_since_last_hit,
+						 0, rte_memory_order_relaxed);
 				param->accumulator_last_hits =
 							param->accumulator_hits;
 				param->accumulator_hits = 0;
@@ -193,9 +193,9 @@ 
 			}
 			param->accumulator_hits = 0;
 		}
-		if (__atomic_fetch_add(&param->sec_since_last_hit, time_delta,
-				       __ATOMIC_RELAXED) + time_delta <=
-		   __atomic_load_n(&param->timeout, __ATOMIC_RELAXED))
+		if (rte_atomic_fetch_add_explicit(&param->sec_since_last_hit, time_delta,
+				       rte_memory_order_relaxed) + time_delta <=
+		   rte_atomic_load_explicit(&param->timeout, rte_memory_order_relaxed))
 			continue;
 		/* Prepare the relevant ring for this AGE parameter */
 		if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@ 
 		else
 			r = age_info->hw_age.aged_list;
 		/* Changing the state atomically and insert it into the ring. */
-		if (__atomic_compare_exchange_n(&param->state, &expected1,
+		if (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected1,
 						HWS_AGE_AGED_OUT_NOT_REPORTED,
-						false, __ATOMIC_RELAXED,
-						__ATOMIC_RELAXED)) {
+						rte_memory_order_relaxed,
+						rte_memory_order_relaxed)) {
 			int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
 							      sizeof(uint32_t),
 							      1, NULL);
@@ -221,11 +221,10 @@ 
 			 */
 			expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
 			if (ret == 0 &&
-			    !__atomic_compare_exchange_n(&param->state,
+			    !rte_atomic_compare_exchange_strong_explicit(&param->state,
 							 &expected2, expected1,
-							 false,
-							 __ATOMIC_RELAXED,
-							 __ATOMIC_RELAXED) &&
+							 rte_memory_order_relaxed,
+							 rte_memory_order_relaxed) &&
 			    expected2 == HWS_AGE_FREE)
 				mlx5_hws_age_param_free(priv,
 							param->own_cnt_index,
@@ -235,10 +234,10 @@ 
 			if (!priv->hws_strict_queue)
 				MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
 		} else {
-			__atomic_compare_exchange_n(&param->state, &expected2,
+			rte_atomic_compare_exchange_strong_explicit(&param->state, &expected2,
 						  HWS_AGE_AGED_OUT_NOT_REPORTED,
-						  false, __ATOMIC_RELAXED,
-						  __ATOMIC_RELAXED);
+						  rte_memory_order_relaxed,
+						  rte_memory_order_relaxed);
 		}
 	}
 	/* The event is irrelevant in strict queue mode. */
@@ -752,8 +751,8 @@  struct mlx5_hws_cnt_pool *
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "invalid AGE parameter index");
-	switch (__atomic_exchange_n(&param->state, HWS_AGE_FREE,
-				    __ATOMIC_RELAXED)) {
+	switch (rte_atomic_exchange_explicit(&param->state, HWS_AGE_FREE,
+				    rte_memory_order_relaxed)) {
 	case HWS_AGE_CANDIDATE:
 	case HWS_AGE_AGED_OUT_REPORTED:
 		mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -818,8 +817,8 @@  struct mlx5_hws_cnt_pool *
 				   "cannot allocate AGE parameter");
 		return 0;
 	}
-	MLX5_ASSERT(__atomic_load_n(&param->state,
-				    __ATOMIC_RELAXED) == HWS_AGE_FREE);
+	MLX5_ASSERT(rte_atomic_load_explicit(&param->state,
+				    rte_memory_order_relaxed) == HWS_AGE_FREE);
 	if (shared) {
 		param->nb_cnts = 0;
 		param->accumulator_hits = 0;
@@ -870,9 +869,9 @@  struct mlx5_hws_cnt_pool *
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "invalid AGE parameter index");
 	if (update_ade->timeout_valid) {
-		uint32_t old_timeout = __atomic_exchange_n(&param->timeout,
+		uint32_t old_timeout = rte_atomic_exchange_explicit(&param->timeout,
 							   update_ade->timeout,
-							   __ATOMIC_RELAXED);
+							   rte_memory_order_relaxed);
 
 		if (old_timeout == 0)
 			sec_since_last_hit_reset = true;
@@ -891,8 +890,8 @@  struct mlx5_hws_cnt_pool *
 		state_update = true;
 	}
 	if (sec_since_last_hit_reset)
-		__atomic_store_n(&param->sec_since_last_hit, 0,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
+				 rte_memory_order_relaxed);
 	if (state_update) {
 		uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
 
@@ -901,13 +900,13 @@  struct mlx5_hws_cnt_pool *
 		 *  - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
 		 *  - AGED_OUT_REPORTED -> CANDIDATE
 		 */
-		if (!__atomic_compare_exchange_n(&param->state, &expected,
+		if (!rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,
 						 HWS_AGE_CANDIDATE_INSIDE_RING,
-						 false, __ATOMIC_RELAXED,
-						 __ATOMIC_RELAXED) &&
+						 rte_memory_order_relaxed,
+						 rte_memory_order_relaxed) &&
 		    expected == HWS_AGE_AGED_OUT_REPORTED)
-			__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,
-					 __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,
+					 rte_memory_order_relaxed);
 	}
 	return 0;
 }
@@ -932,9 +931,9 @@  struct mlx5_hws_cnt_pool *
 	uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
 
 	MLX5_ASSERT(param != NULL);
-	if (__atomic_compare_exchange_n(&param->state, &expected,
-					HWS_AGE_AGED_OUT_REPORTED, false,
-					__ATOMIC_RELAXED, __ATOMIC_RELAXED))
+	if (rte_atomic_compare_exchange_strong_explicit(&param->state, &expected,
+					HWS_AGE_AGED_OUT_REPORTED,
+					rte_memory_order_relaxed, rte_memory_order_relaxed))
 		return param->context;
 	switch (expected) {
 	case HWS_AGE_FREE:
@@ -946,8 +945,8 @@  struct mlx5_hws_cnt_pool *
 		mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
 		break;
 	case HWS_AGE_CANDIDATE_INSIDE_RING:
-		__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,
+				 rte_memory_order_relaxed);
 		break;
 	case HWS_AGE_CANDIDATE:
 		/*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index dcd5cec..a18c9be 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -100,7 +100,7 @@  struct mlx5_hws_cnt_pool_caches {
 struct mlx5_hws_cnt_pool {
 	struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;
 	struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;
-	uint32_t query_gen __rte_cache_aligned;
+	RTE_ATOMIC(uint32_t) query_gen __rte_cache_aligned;
 	struct mlx5_hws_cnt *pool;
 	struct mlx5_hws_cnt_raw_data_mng *raw_mng;
 	struct rte_ring *reuse_list;
@@ -132,10 +132,10 @@  enum {
 
 /* HWS counter age parameter. */
 struct mlx5_hws_age_param {
-	uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
-	uint32_t sec_since_last_hit;
+	RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
+	RTE_ATOMIC(uint32_t) sec_since_last_hit;
 	/* Time in seconds since last hit (atomically accessed). */
-	uint16_t state; /* AGE state (atomically accessed). */
+	RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
 	uint64_t accumulator_last_hits;
 	/* Last total value of hits for comparing. */
 	uint64_t accumulator_hits;
@@ -424,7 +424,7 @@  struct mlx5_hws_age_param {
 	iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
 	hpool->pool[iidx].in_used = false;
 	hpool->pool[iidx].query_gen_when_free =
-		__atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+		rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
 	if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
 		qcache = hpool->cache->qcache[*queue];
 	if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 2fce908..c627113 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@  struct mlx5_rxq_ctrl {
 /* RX queue private data. */
 struct mlx5_rxq_priv {
 	uint16_t idx; /* Queue index. */
-	uint32_t refcnt; /* Reference counter. */
+	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
 	struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
 	LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
 	struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@  struct mlx5_rxq_priv {
 /* External RX queue descriptor. */
 struct mlx5_external_rxq {
 	uint32_t hw_id; /* Queue index in the Hardware. */
-	uint32_t refcnt; /* Reference counter. */
+	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
 };
 
 /* mlx5_rxq.c */
@@ -412,7 +412,7 @@  uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
 	struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
 	void *addr;
 
-	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+	if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
 		MLX5_ASSERT(rep != NULL);
 		/* Replace MPRQ buf. */
 		(*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@  uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
 		void *buf_addr;
 
 		/* Increment the refcnt of the whole chunk. */
-		__atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
-		MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
-			    __ATOMIC_RELAXED) <= strd_n + 1);
+		rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
+		MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+			    rte_memory_order_relaxed) <= strd_n + 1);
 		buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
 		/*
 		 * MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@  uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
 	if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
 		return false;
 	rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
-	return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+	return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
 }
 
 #define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 88b2dc5..16a5170 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@ 
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
-	return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+	return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
 }
 
 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@ 
 
 	memset(_m, 0, sizeof(*buf));
 	buf->mp = mp;
-	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
 	for (j = 0; j != strd_n; ++j) {
 		shinfo = &buf->shinfos[j];
 		shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2035,7 +2035,7 @@  struct mlx5_rxq_priv *
 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
 
 	if (rxq != NULL)
-		__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
 	return rxq;
 }
 
@@ -2057,7 +2057,7 @@  struct mlx5_rxq_priv *
 
 	if (rxq == NULL)
 		return 0;
-	return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+	return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
 }
 
 /**
@@ -2136,7 +2136,7 @@  struct mlx5_external_rxq *
 {
 	struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
 
-	__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
 	return rxq;
 }
 
@@ -2156,7 +2156,7 @@  struct mlx5_external_rxq *
 {
 	struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
 
-	return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+	return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
 }
 
 /**
@@ -2445,8 +2445,8 @@  struct mlx5_ind_table_obj *
 		    (memcmp(ind_tbl->queues, queues,
 			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
 		     == 0)) {
-			__atomic_fetch_add(&ind_tbl->refcnt, 1,
-					   __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+					   rte_memory_order_relaxed);
 			break;
 		}
 	}
@@ -2477,7 +2477,7 @@  struct mlx5_ind_table_obj *
 	unsigned int ret;
 
 	rte_rwlock_write_lock(&priv->ind_tbls_lock);
-	ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+	ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
 	if (!ret)
 		LIST_REMOVE(ind_tbl, next);
 	rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2559,7 +2559,7 @@  struct mlx5_ind_table_obj *
 		}
 		return ret;
 	}
-	__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
 	return 0;
 }
 
@@ -2624,7 +2624,7 @@  struct mlx5_ind_table_obj *
 {
 	uint32_t refcnt;
 
-	refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+	refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
 	if (refcnt <= 1)
 		return 0;
 	/*
@@ -3256,8 +3256,8 @@  struct mlx5_hrxq *
 	ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
 	if (ext_rxq == NULL)
 		return -rte_errno;
-	if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
-					 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+	if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
+					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
 		if (ext_rxq->hw_id != hw_idx) {
 			DRV_LOG(ERR, "Port %u external RxQ index %u "
 				"is already mapped to HW index (requesting is "
@@ -3294,8 +3294,8 @@  struct mlx5_hrxq *
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
-	if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
-					 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+	if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
+					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
 		DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
 			port_id, dpdk_idx);
 		rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 7bdb897..7b2ddd9 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1436,7 +1436,7 @@ 
 	rte_delay_us_sleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
 	if (priv->sh->config.dv_flow_en == 2) {
-		if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+		if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
 			flow_hw_rxq_flag_set(dev, false);
 	} else {
 		mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 264cc19..0be0df7 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -178,7 +178,7 @@  struct mlx5_txq_data {
 /* TX queue control descriptor. */
 struct mlx5_txq_ctrl {
 	LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
-	uint32_t refcnt; /* Reference counter. */
+	RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
 	unsigned int socket; /* CPU socket ID for allocations. */
 	bool is_hairpin; /* Whether TxQ type is Hairpin. */
 	unsigned int max_inline_data; /* Max inline data. */
@@ -338,8 +338,8 @@  int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		 * the service thread, data should be re-read.
 		 */
 		rte_compiler_barrier();
-		ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
-		ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+		ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
+		ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
 		rte_compiler_barrier();
 		if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
 			break;
@@ -349,8 +349,8 @@  int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 	mts -= ts;
 	if (unlikely(mts >= UINT64_MAX / 2)) {
 		/* We have negative integer, mts is in the past. */
-		__atomic_fetch_add(&sh->txpp.err_ts_past,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+				   1, rte_memory_order_relaxed);
 		return -1;
 	}
 	tick = sh->txpp.tick;
@@ -359,8 +359,8 @@  int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 	mts = (mts + tick - 1) / tick;
 	if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
 		/* We have mts is too distant future. */
-		__atomic_fetch_add(&sh->txpp.err_ts_future,
-				   1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+				   1, rte_memory_order_relaxed);
 		return -1;
 	}
 	mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1742,8 +1742,8 @@  int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		/* Convert the timestamp into completion to wait. */
 		ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
 		if (txq->ts_last && ts < txq->ts_last)
-			__atomic_fetch_add(&txq->sh->txpp.err_ts_order,
-					   1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+					   1, rte_memory_order_relaxed);
 		txq->ts_last = ts;
 		wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
 		sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@ 
 		uint64_t *ps;
 
 		rte_compiler_barrier();
-		tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
-		op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+		tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
+		op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
 		rte_compiler_barrier();
-		if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+		if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
 			continue;
-		if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+		if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
 			continue;
 		ps = (uint64_t *)ts;
 		ps[0] = tm;
@@ -561,8 +561,8 @@ 
 	ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
 	ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
 	rte_compiler_barrier();
-	__atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
-	__atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
 	rte_wmb();
 }
 
@@ -590,8 +590,8 @@ 
 			 */
 			DRV_LOG(DEBUG,
 				"Clock Queue error sync lost (%X).", opcode);
-				__atomic_fetch_add(&sh->txpp.err_clock_queue,
-				   1, __ATOMIC_RELAXED);
+				rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+				   1, rte_memory_order_relaxed);
 			sh->txpp.sync_lost = 1;
 		}
 		return;
@@ -633,10 +633,10 @@ 
 	if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
 		return;
 	MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
-	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
-			 sh->txpp.ts.ts, __ATOMIC_RELAXED);
-	__atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
-			 sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+			 sh->txpp.ts.ts, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+			 sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
 	if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
 		sh->txpp.ts_p = 0;
 	if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@ 
 		/* Check whether we have missed interrupts. */
 		if (cq_ci - wq->cq_ci != 1) {
 			DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
-			__atomic_fetch_add(&sh->txpp.err_miss_int,
-					   1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+					   1, rte_memory_order_relaxed);
 			/* Check sync lost on wqe index. */
 			if (cq_ci - wq->cq_ci >=
 				(((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@ 
 		/* Fire new requests to Rearm Queue. */
 		if (error) {
 			DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
-			__atomic_fetch_add(&sh->txpp.err_rearm_queue,
-					   1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+					   1, rte_memory_order_relaxed);
 			sh->txpp.sync_lost = 1;
 		}
 	}
@@ -987,8 +987,8 @@ 
 		mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
 		if (to.cts.op_own >> 4) {
 			DRV_LOG(DEBUG, "Clock Queue error sync lost.");
-			__atomic_fetch_add(&sh->txpp.err_clock_queue,
-					   1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+					   1, rte_memory_order_relaxed);
 			sh->txpp.sync_lost = 1;
 			return -EIO;
 		}
@@ -1031,12 +1031,12 @@  int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
 
-	__atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
-	__atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
+	rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
 	return 0;
 }
 
@@ -1081,16 +1081,16 @@  int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
 	do {
 		uint64_t ts, ci;
 
-		ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
-		ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+		ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
+		ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
 		rte_compiler_barrier();
 		if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
 			continue;
-		if (__atomic_load_n(&txpp->tsa[idx].ts,
-				    __ATOMIC_RELAXED) != ts)
+		if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+				    rte_memory_order_relaxed) != ts)
 			continue;
-		if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
-				    __ATOMIC_RELAXED) != ci)
+		if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+				    rte_memory_order_relaxed) != ci)
 			continue;
 		tsa->ts = ts;
 		tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@  int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
 		for (i = 0; i < n_txpp; ++i)
 			stats[n_used + i].id = n_used + i;
 		stats[n_used + 0].value =
-				__atomic_load_n(&sh->txpp.err_miss_int,
-						__ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+						rte_memory_order_relaxed);
 		stats[n_used + 1].value =
-				__atomic_load_n(&sh->txpp.err_rearm_queue,
-						__ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+						rte_memory_order_relaxed);
 		stats[n_used + 2].value =
-				__atomic_load_n(&sh->txpp.err_clock_queue,
-						__ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+						rte_memory_order_relaxed);
 		stats[n_used + 3].value =
-				__atomic_load_n(&sh->txpp.err_ts_past,
-						__ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+						rte_memory_order_relaxed);
 		stats[n_used + 4].value =
-				__atomic_load_n(&sh->txpp.err_ts_future,
-						__ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+						rte_memory_order_relaxed);
 		stats[n_used + 5].value =
-				__atomic_load_n(&sh->txpp.err_ts_order,
-						__ATOMIC_RELAXED);
+				rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+						rte_memory_order_relaxed);
 		stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
 		stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
 		stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b584055..22956ff 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@  struct mlx5_txq_ctrl *
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
 	tmpl->is_hairpin = false;
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
@@ -1153,7 +1153,7 @@  struct mlx5_txq_ctrl *
 	tmpl->txq.idx = idx;
 	tmpl->hairpin_conf = *hairpin_conf;
 	tmpl->is_hairpin = true;
-	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
 }
@@ -1178,7 +1178,7 @@  struct mlx5_txq_ctrl *
 
 	if (txq_data) {
 		ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
-		__atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
 	}
 	return ctrl;
 }
@@ -1203,7 +1203,7 @@  struct mlx5_txq_ctrl *
 	if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
 		return 0;
 	txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-	if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+	if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
 		return 1;
 	if (txq_ctrl->obj) {
 		priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@  struct mlx5_txq_ctrl *
 		txq_free_elts(txq_ctrl);
 		dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
-	if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+	if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
 		if (!txq_ctrl->is_hairpin)
 			mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
 		LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@  struct mlx5_txq_ctrl *
 	if (!(*priv->txqs)[idx])
 		return -1;
 	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-	return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+	return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index 4db7387..7c1d0ff 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@  struct mlx5_indexed_pool *
 	struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
 
 	lc = pool->cache[cidx]->lc;
-	gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+	gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
 	if (gc && lc != gc) {
 		mlx5_ipool_lock(pool);
 		if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@  struct mlx5_indexed_pool *
 		pool->cache[cidx]->len = fetch_size - 1;
 		return pool->cache[cidx]->idx[pool->cache[cidx]->len];
 	}
-	trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
-			 __ATOMIC_ACQUIRE) : 0;
+	trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+			 rte_memory_order_acquire) : 0;
 	trunk_n = lc ? lc->n_trunk : 0;
 	cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
 	/* Check if index reach maximum. */
@@ -332,11 +332,11 @@  struct mlx5_indexed_pool *
 		lc = p;
 		lc->ref_cnt = 1;
 		pool->cache[cidx]->lc = lc;
-		__atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
 	}
 	/* Add trunk to trunks array. */
 	lc->trunks[trunk_idx] = trunk;
-	__atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
 	/* Enqueue half of the index to global. */
 	ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
 	fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 82e8298..fddfe09 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@  struct mlx5_indexed_trunk {
 
 struct mlx5_indexed_cache {
 	struct mlx5_indexed_trunk **trunks;
-	volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+	volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
 	uint32_t n_trunk; /* Trunk pointer array size. */
 	uint32_t ref_cnt;
 	uint32_t len;
@@ -266,7 +266,7 @@  struct mlx5_indexed_pool {
 			uint32_t free_list; /* Index to first free trunk. */
 		};
 		struct {
-			struct mlx5_indexed_cache *gc;
+			RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
 			/* Global cache. */
 			struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
 			/* Local cache. */
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@ 
 	uint32_t timeout = 0;
 
 	/* Check and wait all close tasks done. */
-	while (__atomic_load_n(&priv->dev_close_progress,
-		__ATOMIC_RELAXED) != 0 && timeout < 1000) {
+	while (rte_atomic_load_explicit(&priv->dev_close_progress,
+		rte_memory_order_relaxed) != 0 && timeout < 1000) {
 		rte_delay_us_sleep(10000);
 		timeout++;
 	}
@@ -294,8 +294,8 @@ 
 			priv->last_c_thrd_idx = 0;
 		else
 			priv->last_c_thrd_idx++;
-		__atomic_store_n(&priv->dev_close_progress,
-			1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&priv->dev_close_progress,
+			1, rte_memory_order_relaxed);
 		if (mlx5_vdpa_task_add(priv,
 			priv->last_c_thrd_idx,
 			MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@ 
 	if (!priv->connected)
 		mlx5_vdpa_dev_cache_clean(priv);
 	priv->vid = 0;
-	__atomic_store_n(&priv->dev_close_progress, 0,
-		__ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+		rte_memory_order_relaxed);
 	priv->state = MLX5_VDPA_STATE_PROBED;
 	DRV_LOG(INFO, "vDPA device %d was closed.", vid);
 	return ret;
@@ -664,7 +664,9 @@ 
 static int
 mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
 {
-	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+	RTE_ATOMIC(uint32_t) err_cnt = 0;
+	uint32_t task_num = 0;
 	uint32_t max_queues, index, thrd_idx, data[1];
 	struct mlx5_vdpa_virtq *virtq;
 
@@ -847,8 +849,8 @@ 
 		if (conf_thread_mng.initializer_priv == priv)
 			if (mlx5_vdpa_mult_threads_create())
 				goto error;
-		__atomic_fetch_add(&conf_thread_mng.refcnt, 1,
-			__ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+			rte_memory_order_relaxed);
 	}
 	if (mlx5_vdpa_create_dev_resources(priv))
 		goto error;
@@ -937,8 +939,8 @@ 
 	if (priv->vdev)
 		rte_vdpa_unregister_device(priv->vdev);
 	if (priv->use_c_thread)
-		if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
-			1, __ATOMIC_RELAXED) == 1)
+		if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+			1, rte_memory_order_relaxed) == 1)
 			mlx5_vdpa_mult_threads_destroy(true);
 	rte_free(priv);
 }
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 7b37c98..0cc67ed 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@  enum mlx5_vdpa_task_type {
 struct mlx5_vdpa_task {
 	struct mlx5_vdpa_priv *priv;
 	enum mlx5_vdpa_task_type type;
-	uint32_t *remaining_cnt;
-	uint32_t *err_cnt;
+	RTE_ATOMIC(uint32_t) *remaining_cnt;
+	RTE_ATOMIC(uint32_t) *err_cnt;
 	uint32_t idx;
 } __rte_packed __rte_aligned(4);
 
@@ -107,7 +107,7 @@  struct mlx5_vdpa_c_thread {
 
 struct mlx5_vdpa_conf_thread_mng {
 	void *initializer_priv;
-	uint32_t refcnt;
+	RTE_ATOMIC(uint32_t) refcnt;
 	uint32_t max_thrds;
 	pthread_mutex_t cthrd_lock;
 	struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@  struct mlx5_vdpa_priv {
 	uint64_t features; /* Negotiated features. */
 	uint16_t log_max_rqt_size;
 	uint16_t last_c_thrd_idx;
-	uint16_t dev_close_progress;
+	RTE_ATOMIC(uint16_t) dev_close_progress;
 	uint16_t num_mrs; /* Number of memory regions. */
 	struct mlx5_vdpa_steer steer;
 	struct mlx5dv_var *var;
@@ -581,13 +581,13 @@  int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
 mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
 		uint32_t thrd_idx,
 		enum mlx5_vdpa_task_type task_type,
-		uint32_t *remaining_cnt, uint32_t *err_cnt,
+		RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
 		void **task_data, uint32_t num);
 int
 mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
 bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
-		uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+		RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
 int
 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
 void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@ 
 mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
 		uint32_t thrd_idx,
 		enum mlx5_vdpa_task_type task_type,
-		uint32_t *remaining_cnt, uint32_t *err_cnt,
+		RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
 		void **task_data, uint32_t num)
 {
 	struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@ 
 		return -1;
 	for (i = 0 ; i < num; i++)
 		if (task[i].remaining_cnt)
-			__atomic_fetch_add(task[i].remaining_cnt, 1,
-				__ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+				rte_memory_order_relaxed);
 	/* wake up conf thread. */
 	pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
 	pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@ 
 }
 
 bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
-		uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+		RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
 {
 	/* Check and wait all tasks done. */
-	while (__atomic_load_n(remaining_cnt,
-		__ATOMIC_RELAXED) != 0) {
+	while (rte_atomic_load_explicit(remaining_cnt,
+		rte_memory_order_relaxed) != 0) {
 		rte_delay_us_sleep(sleep_time);
 	}
-	if (__atomic_load_n(err_cnt,
-		__ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(err_cnt,
+		rte_memory_order_relaxed)) {
 		DRV_LOG(ERR, "Tasks done with error.");
 		return true;
 	}
@@ -137,8 +137,8 @@ 
 			if (ret) {
 				DRV_LOG(ERR,
 				"Failed to register mr %d.", task.idx);
-				__atomic_fetch_add(task.err_cnt, 1,
-				__ATOMIC_RELAXED);
+				rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+				rte_memory_order_relaxed);
 			}
 			break;
 		case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@ 
 			if (ret) {
 				DRV_LOG(ERR,
 					"Failed to setup virtq %d.", task.idx);
-				__atomic_fetch_add(
-					task.err_cnt, 1, __ATOMIC_RELAXED);
+				rte_atomic_fetch_add_explicit(
+					task.err_cnt, 1, rte_memory_order_relaxed);
 			}
 			virtq->enable = 1;
 			pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@ 
 				DRV_LOG(ERR,
 				"Failed to stop virtq %d.",
 				task.idx);
-				__atomic_fetch_add(
+				rte_atomic_fetch_add_explicit(
 					task.err_cnt, 1,
-					__ATOMIC_RELAXED);
+					rte_memory_order_relaxed);
 				pthread_mutex_unlock(&virtq->virtq_lock);
 				break;
 			}
@@ -176,9 +176,9 @@ 
 				DRV_LOG(ERR,
 		"Failed to get negotiated features virtq %d.",
 				task.idx);
-				__atomic_fetch_add(
+				rte_atomic_fetch_add_explicit(
 					task.err_cnt, 1,
-					__ATOMIC_RELAXED);
+					rte_memory_order_relaxed);
 				pthread_mutex_unlock(&virtq->virtq_lock);
 				break;
 			}
@@ -200,9 +200,9 @@ 
 			if (!priv->connected)
 				mlx5_vdpa_dev_cache_clean(priv);
 			priv->vid = 0;
-			__atomic_store_n(
+			rte_atomic_store_explicit(
 				&priv->dev_close_progress, 0,
-				__ATOMIC_RELAXED);
+				rte_memory_order_relaxed);
 			break;
 		case MLX5_VDPA_TASK_PREPARE_VIRTQ:
 			ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@ 
 				DRV_LOG(ERR,
 				"Failed to prepare virtq %d.",
 				task.idx);
-				__atomic_fetch_add(
+				rte_atomic_fetch_add_explicit(
 				task.err_cnt, 1,
-				__ATOMIC_RELAXED);
+				rte_memory_order_relaxed);
 			}
 			break;
 		default:
@@ -222,8 +222,8 @@ 
 			break;
 		}
 		if (task.remaining_cnt)
-			__atomic_fetch_sub(task.remaining_cnt,
-			1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+			1, rte_memory_order_relaxed);
 	}
 	return 0;
 }
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@ 
 int
 mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
 {
-	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+	RTE_ATOMIC(uint32_t) err_cnt = 0;
+	uint32_t task_num = 0;
 	uint32_t i, thrd_idx, data[1];
 	struct mlx5_vdpa_virtq *virtq;
 	uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@ 
 	uint8_t mode = 0;
 	int ret = -rte_errno;
 	uint32_t i, thrd_idx, data[1];
-	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+	RTE_ATOMIC(uint32_t) err_cnt = 0;
+	uint32_t task_num = 0;
 	struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
 			(priv->vid, &mode, &priv->vmem_info.size,
 			&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@ 
 {
 	int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
 	uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
-	uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+	RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+	RTE_ATOMIC(uint32_t) err_cnt = 0;
+	uint32_t task_num = 0;
 	uint32_t i, thrd_idx, data[1];
 	struct mlx5_vdpa_virtq *virtq;
 	struct rte_vhost_vring vq;