@@ -235,7 +235,7 @@ struct ice_lock {
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
- static uint64_t ice_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -243,7 +243,7 @@ struct ice_lock {
return NULL;
snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
- __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&ice_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)
@@ -764,7 +764,7 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
rte_spinlock_init(&hw->vc_cmd_queue_lock);
TAILQ_INIT(&hw->vc_cmd_queue);
- __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
if (hw->arq_buf == NULL) {
@@ -888,8 +888,8 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
ice_dcf_dev_interrupt_handler, hw);
/* Wait for all `ice-thread` threads to exit. */
- while (__atomic_load_n(&hw->vsi_update_thread_num,
- __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
+ rte_memory_order_acquire) != 0)
rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
ice_dcf_mode_disable(hw);
@@ -105,7 +105,7 @@ struct ice_dcf_hw {
void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
- int vsi_update_thread_num;
+ RTE_ATOMIC(int) vsi_update_thread_num;
uint8_t *arq_buf;
@@ -1743,7 +1743,7 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
ice_dcf_adminq_need_retry(struct ice_adapter *ad)
{
return ad->hw.dcf_enabled &&
- !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+ !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
}
/* Add UDP tunneling port */
@@ -1944,12 +1944,12 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
return -1;
}
- __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
@@ -123,8 +123,8 @@ struct ice_dcf_reset_event_param {
container_of(hw, struct ice_dcf_adapter, real_hw);
struct ice_adapter *parent_adapter = &adapter->parent;
- __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_relaxed);
rte_thread_detach(rte_thread_self());
@@ -133,8 +133,8 @@ struct ice_dcf_reset_event_param {
rte_spinlock_lock(&vsi_update_lock);
if (!ice_dcf_handle_vsi_update_event(hw)) {
- __atomic_store_n(&parent_adapter->dcf_state_on, true,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
+ rte_memory_order_relaxed);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
}
@@ -156,8 +156,8 @@ struct ice_dcf_reset_event_param {
free(param);
- __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_release);
return 0;
}
@@ -269,8 +269,8 @@ struct ice_dcf_reset_event_param {
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
@@ -4062,9 +4062,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = &dev->data->dev_link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -4078,9 +4078,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -621,7 +621,7 @@ struct ice_adapter {
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
- bool dcf_state_on;
+ RTE_ATOMIC(bool) dcf_state_on;
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;