@@ -22,6 +22,8 @@
* https://locklessinc.com/articles/locks/
*/
+#include <stdatomic.h>
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -57,7 +59,7 @@
#define RTE_RWLOCK_READ 0x4 /* Reader increment */
typedef struct __rte_lockable {
- int32_t cnt;
+ int32_t _Atomic cnt;
} rte_rwlock_t;
/**
@@ -92,21 +94,21 @@
while (1) {
/* Wait while writer is present or pending */
- while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED)
+ while (atomic_load_explicit(&rwl->cnt, memory_order_relaxed)
& RTE_RWLOCK_MASK)
rte_pause();
/* Try to get read lock */
- x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,
- __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;
+ x = atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,
+ memory_order_acquire) + RTE_RWLOCK_READ;
/* If no writer, then acquire was successful */
if (likely(!(x & RTE_RWLOCK_MASK)))
return;
/* Lost race with writer, backout the change. */
- __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ,
- __ATOMIC_RELAXED);
+ atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,
+ memory_order_relaxed);
}
}
@@ -127,20 +129,20 @@
{
int32_t x;
- x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+ x = atomic_load_explicit(&rwl->cnt, memory_order_relaxed);
/* fail if write lock is held or writer is pending */
if (x & RTE_RWLOCK_MASK)
return -EBUSY;
/* Try to get read lock */
- x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,
- __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;
+ x = atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,
+ memory_order_acquire) + RTE_RWLOCK_READ;
/* Back out if writer raced in */
if (unlikely(x & RTE_RWLOCK_MASK)) {
- __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ,
- __ATOMIC_RELEASE);
+ atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,
+ memory_order_release);
return -EBUSY;
}
@@ -158,7 +160,7 @@
__rte_unlock_function(rwl)
__rte_no_thread_safety_analysis
{
- __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, __ATOMIC_RELEASE);
+ atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, memory_order_release);
}
/**
@@ -178,10 +180,10 @@
{
int32_t x;
- x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+ x = atomic_load_explicit(&rwl->cnt, memory_order_relaxed);
if (x < RTE_RWLOCK_WRITE &&
- __atomic_compare_exchange_n(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE,
- 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+ atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE,
+ memory_order_acquire, memory_order_relaxed))
return 0;
else
return -EBUSY;
@@ -201,22 +203,22 @@
int32_t x;
while (1) {
- x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+ x = atomic_load_explicit(&rwl->cnt, memory_order_relaxed);
/* No readers or writers? */
if (likely(x < RTE_RWLOCK_WRITE)) {
/* Turn off RTE_RWLOCK_WAIT, turn on RTE_RWLOCK_WRITE */
- if (__atomic_compare_exchange_n(&rwl->cnt, &x, RTE_RWLOCK_WRITE, 1,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+ if (atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, RTE_RWLOCK_WRITE,
+ memory_order_acquire, memory_order_relaxed))
return;
}
/* Turn on writer wait bit */
if (!(x & RTE_RWLOCK_WAIT))
- __atomic_fetch_or(&rwl->cnt, RTE_RWLOCK_WAIT, __ATOMIC_RELAXED);
+ atomic_fetch_or_explicit(&rwl->cnt, RTE_RWLOCK_WAIT, memory_order_relaxed);
/* Wait until no readers before trying again */
- while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > RTE_RWLOCK_WAIT)
+ while (atomic_load_explicit(&rwl->cnt, memory_order_relaxed) > RTE_RWLOCK_WAIT)
rte_pause();
}
@@ -233,7 +235,7 @@
__rte_unlock_function(rwl)
__rte_no_thread_safety_analysis
{
- __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_WRITE, __ATOMIC_RELEASE);
+ atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_WRITE, memory_order_release);
}
/**
@@ -247,7 +249,7 @@
static inline int
rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
{
- if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & RTE_RWLOCK_WRITE)
+ if (atomic_load_explicit(&rwl->cnt, memory_order_relaxed) & RTE_RWLOCK_WRITE)
return 1;
return 0;
@@ -17,6 +17,8 @@
* All locks must be initialised before use, and only initialised once.
*/
+#include <stdatomic.h>
+
#include <rte_lcore.h>
#ifdef RTE_FORCE_INTRINSICS
#include <rte_common.h>
@@ -28,7 +30,7 @@
* The rte_spinlock_t type.
*/
typedef struct __rte_lockable {
- volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
+ int _Atomic locked; /**< lock status 0 = unlocked, 1 = locked */
} rte_spinlock_t;
/**
@@ -65,10 +67,10 @@
{
int exp = 0;
- while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
- rte_wait_until_equal_32((volatile uint32_t *)&sl->locked,
- 0, __ATOMIC_RELAXED);
+ while (!atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1,
+ memory_order_acquire, memory_order_relaxed)) {
+ rte_wait_until_equal_32((uint32_t _Atomic *)&sl->locked,
+ 0, memory_order_relaxed);
exp = 0;
}
}
@@ -89,7 +91,7 @@
rte_spinlock_unlock(rte_spinlock_t *sl)
__rte_no_thread_safety_analysis
{
- __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
+ atomic_store_explicit(&sl->locked, 0, memory_order_release);
}
#endif
@@ -112,9 +114,8 @@
__rte_no_thread_safety_analysis
{
int exp = 0;
- return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
- 0, /* disallow spurious failure */
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
+ return atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1,
+ memory_order_acquire, memory_order_relaxed);
}
#endif
@@ -128,7 +129,7 @@
*/
static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
{
- return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
+ return atomic_load_explicit(&sl->locked, memory_order_acquire);
}
/**
@@ -78,7 +78,7 @@ static inline int rte_tm_supported(void)
}
static inline int
-rte_try_tm(volatile int *lock)
+rte_try_tm(int _Atomic *lock)
{
int i, retries;