From patchwork Wed Aug 2 05:31:58 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Tyler Retzlaff X-Patchwork-Id: 129810 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AB56A42FB4; Wed, 2 Aug 2023 07:32:17 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6DA8B43252; Wed, 2 Aug 2023 07:32:04 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 381A443251 for ; Wed, 2 Aug 2023 07:32:00 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id 4DC22238AF7B; Tue, 1 Aug 2023 22:31:59 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 4DC22238AF7B DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1690954319; bh=jEkJw4GKpB3w+at6JrkNStOLeHdzmZLjFNn/JmI1ZLI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=XrOxN/1LuFlU6r9eGYJFllZPDll/FVj0igFRHcBASknIm5+j8tiwuRUTHRD9qgoCk JSKrzwlGG9T6r4n7bRXACPNgv/EIJ3VcqRyvA5tBZl/DJ92JPlz2YXOG9MKSJwCcWq wqCYAF5BLdStT0sO5wpKjjADrF80wmam0I6CcsqE= From: Tyler Retzlaff To: dev@dpdk.org Cc: Gaetan Rivet , Bruce Richardson , Thomas Monjalon , Nicolas Chautru , Yipeng Wang , Sameh Gobriel , Vladimir Medvedkin , Honnappa Nagarahalli , Konstantin Ananyev , Anatoly Burakov , Olivier Matz , Andrew Rybchenko , Joyce Kong , Erik Gabriel Carrillo , Liang Ma , Peter Mccarthy , Jerin Jacob , Maciej Czekaj , David Hunt , Ruifeng Wang , Min Zhou , David Christensen , Stanislaw Kardach , david.marchand@redhat.com, stephen@networkplumber.org, mb@smartsharesystems.com, Tyler Retzlaff Subject: [PATCH v4 4/4] eal: adapt rte spinlock and rwlock APIs to use C11 atomics Date: Tue, 1 Aug 2023 22:31:58 -0700 Message-Id: <1690954318-3126-5-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1690954318-3126-1-git-send-email-roretzla@linux.microsoft.com> References: <1690837661-27573-1-git-send-email-roretzla@linux.microsoft.com> <1690954318-3126-1-git-send-email-roretzla@linux.microsoft.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adapt rte_spinlock.h and rte_rwlock.h APIs to use standard C11 atomics. Update consumers of the spinlock and rwlock APIs for the API break. Signed-off-by: Tyler Retzlaff Acked-by: Morten Brørup --- lib/eal/include/generic/rte_rwlock.h | 46 ++++++++++++++++++---------------- lib/eal/include/generic/rte_spinlock.h | 21 ++++++++-------- lib/eal/x86/include/rte_spinlock.h | 2 +- 3 files changed, 36 insertions(+), 33 deletions(-) diff --git a/lib/eal/include/generic/rte_rwlock.h b/lib/eal/include/generic/rte_rwlock.h index 9e083bb..b659c4c 100644 --- a/lib/eal/include/generic/rte_rwlock.h +++ b/lib/eal/include/generic/rte_rwlock.h @@ -22,6 +22,8 @@ * https://locklessinc.com/articles/locks/ */ +#include + #ifdef __cplusplus extern "C" { #endif @@ -57,7 +59,7 @@ #define RTE_RWLOCK_READ 0x4 /* Reader increment */ typedef struct __rte_lockable { - int32_t cnt; + int32_t _Atomic cnt; } rte_rwlock_t; /** @@ -92,21 +94,21 @@ while (1) { /* Wait while writer is present or pending */ - while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) + while (atomic_load_explicit(&rwl->cnt, memory_order_relaxed) & RTE_RWLOCK_MASK) rte_pause(); /* Try to get read lock */ - x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ; + x = atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ, + memory_order_acquire) + RTE_RWLOCK_READ; /* If no writer, then acquire was successful */ if (likely(!(x & RTE_RWLOCK_MASK))) return; /* Lost race with writer, backout the change. */ - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_RELAXED); + atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, + memory_order_relaxed); } } @@ -127,20 +129,20 @@ { int32_t x; - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + x = atomic_load_explicit(&rwl->cnt, memory_order_relaxed); /* fail if write lock is held or writer is pending */ if (x & RTE_RWLOCK_MASK) return -EBUSY; /* Try to get read lock */ - x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ; + x = atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ, + memory_order_acquire) + RTE_RWLOCK_READ; /* Back out if writer raced in */ if (unlikely(x & RTE_RWLOCK_MASK)) { - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, - __ATOMIC_RELEASE); + atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, + memory_order_release); return -EBUSY; } @@ -158,7 +160,7 @@ __rte_unlock_function(rwl) __rte_no_thread_safety_analysis { - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, __ATOMIC_RELEASE); + atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, memory_order_release); } /** @@ -178,10 +180,10 @@ { int32_t x; - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + x = atomic_load_explicit(&rwl->cnt, memory_order_relaxed); if (x < RTE_RWLOCK_WRITE && - __atomic_compare_exchange_n(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE, - 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) + atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE, + memory_order_acquire, memory_order_relaxed)) return 0; else return -EBUSY; @@ -201,22 +203,22 @@ int32_t x; while (1) { - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + x = atomic_load_explicit(&rwl->cnt, memory_order_relaxed); /* No readers or writers? */ if (likely(x < RTE_RWLOCK_WRITE)) { /* Turn off RTE_RWLOCK_WAIT, turn on RTE_RWLOCK_WRITE */ - if (__atomic_compare_exchange_n(&rwl->cnt, &x, RTE_RWLOCK_WRITE, 1, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) + if (atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, RTE_RWLOCK_WRITE, + memory_order_acquire, memory_order_relaxed)) return; } /* Turn on writer wait bit */ if (!(x & RTE_RWLOCK_WAIT)) - __atomic_fetch_or(&rwl->cnt, RTE_RWLOCK_WAIT, __ATOMIC_RELAXED); + atomic_fetch_or_explicit(&rwl->cnt, RTE_RWLOCK_WAIT, memory_order_relaxed); /* Wait until no readers before trying again */ - while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > RTE_RWLOCK_WAIT) + while (atomic_load_explicit(&rwl->cnt, memory_order_relaxed) > RTE_RWLOCK_WAIT) rte_pause(); } @@ -233,7 +235,7 @@ __rte_unlock_function(rwl) __rte_no_thread_safety_analysis { - __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_WRITE, __ATOMIC_RELEASE); + atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_WRITE, memory_order_release); } /** @@ -247,7 +249,7 @@ static inline int rte_rwlock_write_is_locked(rte_rwlock_t *rwl) { - if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & RTE_RWLOCK_WRITE) + if (atomic_load_explicit(&rwl->cnt, memory_order_relaxed) & RTE_RWLOCK_WRITE) return 1; return 0; diff --git a/lib/eal/include/generic/rte_spinlock.h b/lib/eal/include/generic/rte_spinlock.h index c50ebaa..d92432d 100644 --- a/lib/eal/include/generic/rte_spinlock.h +++ b/lib/eal/include/generic/rte_spinlock.h @@ -17,6 +17,8 @@ * All locks must be initialised before use, and only initialised once. */ +#include + #include #ifdef RTE_FORCE_INTRINSICS #include @@ -28,7 +30,7 @@ * The rte_spinlock_t type. */ typedef struct __rte_lockable { - volatile int locked; /**< lock status 0 = unlocked, 1 = locked */ + int _Atomic locked; /**< lock status 0 = unlocked, 1 = locked */ } rte_spinlock_t; /** @@ -65,10 +67,10 @@ { int exp = 0; - while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { - rte_wait_until_equal_32((volatile uint32_t *)&sl->locked, - 0, __ATOMIC_RELAXED); + while (!atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1, + memory_order_acquire, memory_order_relaxed)) { + rte_wait_until_equal_32((uint32_t _Atomic *)&sl->locked, + 0, memory_order_relaxed); exp = 0; } } @@ -89,7 +91,7 @@ rte_spinlock_unlock(rte_spinlock_t *sl) __rte_no_thread_safety_analysis { - __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE); + atomic_store_explicit(&sl->locked, 0, memory_order_release); } #endif @@ -112,9 +114,8 @@ __rte_no_thread_safety_analysis { int exp = 0; - return __atomic_compare_exchange_n(&sl->locked, &exp, 1, - 0, /* disallow spurious failure */ - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + return atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1, + memory_order_acquire, memory_order_relaxed); } #endif @@ -128,7 +129,7 @@ */ static inline int rte_spinlock_is_locked (rte_spinlock_t *sl) { - return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE); + return atomic_load_explicit(&sl->locked, memory_order_acquire); } /** diff --git a/lib/eal/x86/include/rte_spinlock.h b/lib/eal/x86/include/rte_spinlock.h index 0b20ddf..3e7f8ac 100644 --- a/lib/eal/x86/include/rte_spinlock.h +++ b/lib/eal/x86/include/rte_spinlock.h @@ -78,7 +78,7 @@ static inline int rte_tm_supported(void) } static inline int -rte_try_tm(volatile int *lock) +rte_try_tm(int _Atomic *lock) { int i, retries;