[3/5] hash: use rte atomic thread fence
Checks
Commit Message
Use __rte_atomic_thread_fence instead of directly using
__atomic_thread_fence builtin gcc intrinsic
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
lib/hash/rte_cuckoo_hash.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
@@ -871,7 +871,7 @@ struct rte_hash *
/* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_release);
+ __rte_atomic_thread_fence(rte_memory_order_release);
}
/* Need to swap current/alt sig to allow later
@@ -903,7 +903,7 @@ struct rte_hash *
/* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_release);
+ __rte_atomic_thread_fence(rte_memory_order_release);
}
curr_bkt->sig_current[curr_slot] = sig;
@@ -1396,7 +1396,7 @@ struct rte_hash *
/* The loads of sig_current in search_one_bucket
* should not move below the load from tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_acquire);
+ __rte_atomic_thread_fence(rte_memory_order_acquire);
/* Re-read the table change counter to check if the
* table has changed during search. If yes, re-do
* the search.
@@ -1625,7 +1625,7 @@ struct rte_hash *
/* The store to sig_current should
* not move above the store to tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_release);
+ __rte_atomic_thread_fence(rte_memory_order_release);
}
last_bkt->sig_current[i] = NULL_SIGNATURE;
rte_atomic_store_explicit(&last_bkt->key_idx[i],
@@ -2216,7 +2216,7 @@ struct rte_hash *
/* The loads of sig_current in compare_signatures
* should not move below the load from tbl_chng_cnt.
*/
- __atomic_thread_fence(rte_memory_order_acquire);
+ __rte_atomic_thread_fence(rte_memory_order_acquire);
/* Re-read the table change counter to check if the
* table has changed during search. If yes, re-do
* the search.