Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
3 files changed, 11 insertions(+), 9 deletions(-)
@@ -1130,7 +1130,7 @@ struct rte_ixgbe_xstats_name_off {
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbe_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbevf_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
@@ -4203,7 +4203,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
/* NOTE: review for potential ordering optimization */
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -4240,7 +4240,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -4336,7 +4336,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
ixgbe_dev_wait_setup_link_complete(dev, 0);
/* NOTE: review for potential ordering optimization */
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the IXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -4348,7 +4349,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR,
"Create link thread failed!");
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
@@ -511,7 +511,7 @@ struct ixgbe_adapter {
*/
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
- bool link_thread_running;
+ RTE_ATOMIC(bool) link_thread_running;
rte_thread_t link_thread_tid;
};
@@ -1831,7 +1831,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
@@ -2114,7 +2114,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;