[3/3] eal: use C11 memory model GCC builtin atomics

Message ID 1679927420-26737-4-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Accepted, archived
Delegated to: David Marchand
Headers
Series use C11 memory model GCC builtin atomics |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-unit-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-testing success Testing PASS
ci/iol-x86_64-unit-testing fail Testing issues
ci/iol-aarch64-compile-testing success Testing PASS
ci/intel-Functional success Functional PASS

Commit Message

Tyler Retzlaff March 27, 2023, 2:30 p.m. UTC
  Replace use of __sync_fetch_and_add and __sync_fetch_and_sub with
__atomic_fetch_add and __atomic_fetch_sub.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 lib/eal/include/generic/rte_atomic.h | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)
  

Patch

diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
index 234b268..58df843 100644
--- a/lib/eal/include/generic/rte_atomic.h
+++ b/lib/eal/include/generic/rte_atomic.h
@@ -243,7 +243,7 @@ 
 static inline void
 rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
 {
-	__sync_fetch_and_add(&v->cnt, inc);
+	__atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST);
 }
 
 /**
@@ -257,7 +257,7 @@ 
 static inline void
 rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
 {
-	__sync_fetch_and_sub(&v->cnt, dec);
+	__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST);
 }
 
 /**
@@ -310,7 +310,7 @@ 
 static inline int16_t
 rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
 {
-	return __sync_add_and_fetch(&v->cnt, inc);
+	return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc;
 }
 
 /**
@@ -330,7 +330,7 @@ 
 static inline int16_t
 rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
 {
-	return __sync_sub_and_fetch(&v->cnt, dec);
+	return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec;
 }
 
 /**
@@ -349,7 +349,7 @@ 
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 {
-	return __sync_add_and_fetch(&v->cnt, 1) == 0;
+	return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_SEQ_CST) + 1 == 0;
 }
 #endif
 
@@ -369,7 +369,7 @@  static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
 {
-	return __sync_sub_and_fetch(&v->cnt, 1) == 0;
+	return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_SEQ_CST) - 1 == 0;
 }
 #endif
 
@@ -522,7 +522,7 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline void
 rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
 {
-	__sync_fetch_and_add(&v->cnt, inc);
+	__atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST);
 }
 
 /**
@@ -536,7 +536,7 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline void
 rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
 {
-	__sync_fetch_and_sub(&v->cnt, dec);
+	__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST);
 }
 
 /**
@@ -589,7 +589,7 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline int32_t
 rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
 {
-	return __sync_add_and_fetch(&v->cnt, inc);
+	return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc;
 }
 
 /**
@@ -609,7 +609,7 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 static inline int32_t
 rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
 {
-	return __sync_sub_and_fetch(&v->cnt, dec);
+	return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec;
 }
 
 /**
@@ -628,7 +628,7 @@  static inline void rte_atomic16_clear(rte_atomic16_t *v)
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
 {
-	return __sync_add_and_fetch(&v->cnt, 1) == 0;
+	return __atomic_fetch_add(&v->cnt, 1, __ATOMIC_SEQ_CST) + 1 == 0;
 }
 #endif
 
@@ -648,7 +648,7 @@  static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
 {
-	return __sync_sub_and_fetch(&v->cnt, 1) == 0;
+	return __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_SEQ_CST) - 1 == 0;
 }
 #endif
 
@@ -854,7 +854,7 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline void
 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
 {
-	__sync_fetch_and_add(&v->cnt, inc);
+	__atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST);
 }
 #endif
 
@@ -873,7 +873,7 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline void
 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
 {
-	__sync_fetch_and_sub(&v->cnt, dec);
+	__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST);
 }
 #endif
 
@@ -931,7 +931,7 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline int64_t
 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
 {
-	return __sync_add_and_fetch(&v->cnt, inc);
+	return __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc;
 }
 #endif
 
@@ -955,7 +955,7 @@  static inline void rte_atomic32_clear(rte_atomic32_t *v)
 static inline int64_t
 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
 {
-	return __sync_sub_and_fetch(&v->cnt, dec);
+	return __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec;
 }
 #endif