@@ -84,6 +84,17 @@ FILE_PATTERNS = rte_*.h \
PREDEFINED = __DOXYGEN__ \
RTE_HAS_CPUSET \
VFIO_PRESENT \
+ __rte_lockable= \
+ __rte_guarded_by(x)= \
+ __rte_exclusive_locks_required(x)= \
+ __rte_exclusive_lock_function(x)= \
+ __rte_exclusive_trylock_function(x)= \
+ __rte_assert_exclusive_lock(x)= \
+ __rte_shared_locks_required(x)= \
+ __rte_shared_lock_function(x)= \
+ __rte_shared_trylock_function(x)= \
+ __rte_assert_shared_lock(x)= \
+ __rte_unlock_function(x)= \
__attribute__(x)=
OPTIMIZE_OUTPUT_FOR_C = YES
@@ -529,6 +529,30 @@ Misc Functions
Locks and atomic operations are per-architecture (i686 and x86_64).
+Lock annotations
+~~~~~~~~~~~~~~~~
+
+R/W locks, seq locks and spinlocks have been instrumented to help developers in
+catching issues in DPDK.
+
+This instrumentation relies on
+`clang Thread Safety checks <https://clang.llvm.org/docs/ThreadSafetyAnalysis.html>`_.
+All attributes are prefixed with __rte and are fully described in the clang
+documentation.
+
+Some general comments:
+
+- it is important that lock requirements are expressed at the function
+ declaration level in headers so that other code units can be inspected,
+- when some global lock is necessary to some user-exposed API, it is preferred
+ to expose it via an internal helper rather than expose the global variable,
+- there are a list of known limitations with clang instrumentation, but before
+ waiving checks with ``__rte_no_thread_safety_analysis`` in your code, please
+ discuss it on the mailing list,
+
+A DPDK library/driver can enable/disable the checks by setting
+``annotate_locks`` accordingly in its ``meson.build`` file.
+
IOVA Mode Detection
~~~~~~~~~~~~~~~~~~~
@@ -55,6 +55,11 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Introduced lock annotations.**
+
+ Added lock annotations attributes so that clang can statically analyze lock
+ correctness.
+
* **Updated AMD axgbe driver.**
* Added multi-process support.
@@ -91,6 +91,7 @@ foreach subpath:subdirs
build = true # set to false to disable, e.g. missing deps
reason = '<unknown reason>' # set if build == false to explain
name = drv
+ annotate_locks = false
sources = []
headers = []
driver_sdk_headers = [] # public headers included by drivers
@@ -167,6 +168,10 @@ foreach subpath:subdirs
enabled_drivers += name
lib_name = '_'.join(['rte', class, name])
cflags += '-DRTE_LOG_DEFAULT_LOGTYPE=' + '.'.join([log_prefix, name])
+ if annotate_locks and cc.has_argument('-Wthread-safety')
+ cflags += '-DRTE_ANNOTATE_LOCKS'
+ cflags += '-Wthread-safety'
+ endif
dpdk_conf.set(lib_name.to_upper(), 1)
dpdk_extra_ldflags += pkgconfig_extra_libs
@@ -30,6 +30,7 @@ extern "C" {
#include <rte_branch_prediction.h>
#include <rte_common.h>
+#include <rte_lock_annotations.h>
#include <rte_pause.h>
/**
@@ -55,7 +56,7 @@ extern "C" {
/* Writer is waiting or has lock */
#define RTE_RWLOCK_READ 0x4 /* Reader increment */
-typedef struct {
+typedef struct __rte_lockable {
int32_t cnt;
} rte_rwlock_t;
@@ -84,6 +85,8 @@ rte_rwlock_init(rte_rwlock_t *rwl)
*/
static inline void
rte_rwlock_read_lock(rte_rwlock_t *rwl)
+ __rte_shared_lock_function(rwl)
+ __rte_no_thread_safety_analysis
{
int32_t x;
@@ -119,6 +122,8 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
*/
static inline int
rte_rwlock_read_trylock(rte_rwlock_t *rwl)
+ __rte_shared_trylock_function(0, rwl)
+ __rte_no_thread_safety_analysis
{
int32_t x;
@@ -150,6 +155,8 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl)
*/
static inline void
rte_rwlock_read_unlock(rte_rwlock_t *rwl)
+ __rte_unlock_function(rwl)
+ __rte_no_thread_safety_analysis
{
__atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, __ATOMIC_RELEASE);
}
@@ -166,6 +173,8 @@ rte_rwlock_read_unlock(rte_rwlock_t *rwl)
*/
static inline int
rte_rwlock_write_trylock(rte_rwlock_t *rwl)
+ __rte_exclusive_trylock_function(0, rwl)
+ __rte_no_thread_safety_analysis
{
int32_t x;
@@ -186,6 +195,8 @@ rte_rwlock_write_trylock(rte_rwlock_t *rwl)
*/
static inline void
rte_rwlock_write_lock(rte_rwlock_t *rwl)
+ __rte_exclusive_lock_function(rwl)
+ __rte_no_thread_safety_analysis
{
int32_t x;
@@ -219,6 +230,8 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
*/
static inline void
rte_rwlock_write_unlock(rte_rwlock_t *rwl)
+ __rte_unlock_function(rwl)
+ __rte_no_thread_safety_analysis
{
__atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_WRITE, __ATOMIC_RELEASE);
}
@@ -237,7 +250,8 @@ rte_rwlock_write_unlock(rte_rwlock_t *rwl)
* A pointer to a rwlock structure.
*/
static inline void
-rte_rwlock_read_lock_tm(rte_rwlock_t *rwl);
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+ __rte_shared_lock_function(rwl);
/**
* Commit hardware memory transaction or release the read lock if the lock is used as a fall-back
@@ -246,7 +260,8 @@ rte_rwlock_read_lock_tm(rte_rwlock_t *rwl);
* A pointer to the rwlock structure.
*/
static inline void
-rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl);
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+ __rte_unlock_function(rwl);
/**
* Try to execute critical section in a hardware memory transaction, if it
@@ -262,7 +277,8 @@ rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl);
* A pointer to a rwlock structure.
*/
static inline void
-rte_rwlock_write_lock_tm(rte_rwlock_t *rwl);
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+ __rte_exclusive_lock_function(rwl);
/**
* Commit hardware memory transaction or release the write lock if the lock is used as a fall-back
@@ -271,7 +287,8 @@ rte_rwlock_write_lock_tm(rte_rwlock_t *rwl);
* A pointer to a rwlock structure.
*/
static inline void
-rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl);
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+ __rte_unlock_function(rwl);
#ifdef __cplusplus
}
@@ -22,12 +22,13 @@
#ifdef RTE_FORCE_INTRINSICS
#include <rte_common.h>
#endif
+#include <rte_lock_annotations.h>
#include <rte_pause.h>
/**
* The rte_spinlock_t type.
*/
-typedef struct {
+typedef struct __rte_lockable {
volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
} rte_spinlock_t;
@@ -55,11 +56,13 @@ rte_spinlock_init(rte_spinlock_t *sl)
* A pointer to the spinlock.
*/
static inline void
-rte_spinlock_lock(rte_spinlock_t *sl);
+rte_spinlock_lock(rte_spinlock_t *sl)
+ __rte_exclusive_lock_function(sl);
#ifdef RTE_FORCE_INTRINSICS
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
int exp = 0;
@@ -79,11 +82,13 @@ rte_spinlock_lock(rte_spinlock_t *sl)
* A pointer to the spinlock.
*/
static inline void
-rte_spinlock_unlock (rte_spinlock_t *sl);
+rte_spinlock_unlock(rte_spinlock_t *sl)
+ __rte_unlock_function(sl);
#ifdef RTE_FORCE_INTRINSICS
static inline void
-rte_spinlock_unlock (rte_spinlock_t *sl)
+rte_spinlock_unlock(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
__atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
}
@@ -99,11 +104,13 @@ rte_spinlock_unlock (rte_spinlock_t *sl)
*/
__rte_warn_unused_result
static inline int
-rte_spinlock_trylock (rte_spinlock_t *sl);
+rte_spinlock_trylock(rte_spinlock_t *sl)
+ __rte_exclusive_trylock_function(1, sl);
#ifdef RTE_FORCE_INTRINSICS
static inline int
-rte_spinlock_trylock (rte_spinlock_t *sl)
+rte_spinlock_trylock(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
int exp = 0;
return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
@@ -147,7 +154,8 @@ static inline int rte_tm_supported(void);
* A pointer to the spinlock.
*/
static inline void
-rte_spinlock_lock_tm(rte_spinlock_t *sl);
+rte_spinlock_lock_tm(rte_spinlock_t *sl)
+ __rte_exclusive_lock_function(sl);
/**
* Commit hardware memory transaction or release the spinlock if
@@ -157,7 +165,8 @@ rte_spinlock_lock_tm(rte_spinlock_t *sl);
* A pointer to the spinlock.
*/
static inline void
-rte_spinlock_unlock_tm(rte_spinlock_t *sl);
+rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+ __rte_unlock_function(sl);
/**
* Try to execute critical section in a hardware memory transaction,
@@ -177,7 +186,8 @@ rte_spinlock_unlock_tm(rte_spinlock_t *sl);
*/
__rte_warn_unused_result
static inline int
-rte_spinlock_trylock_tm(rte_spinlock_t *sl);
+rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+ __rte_exclusive_trylock_function(1, sl);
/**
* The rte_spinlock_recursive_t type.
@@ -213,6 +223,7 @@ static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
* A pointer to the recursive spinlock.
*/
static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
+ __rte_no_thread_safety_analysis
{
int id = rte_gettid();
@@ -229,6 +240,7 @@ static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
* A pointer to the recursive spinlock.
*/
static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
+ __rte_no_thread_safety_analysis
{
if (--(slr->count) == 0) {
slr->user = -1;
@@ -247,6 +259,7 @@ static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
*/
__rte_warn_unused_result
static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
+ __rte_no_thread_safety_analysis
{
int id = rte_gettid();
@@ -27,6 +27,7 @@ headers += files(
'rte_keepalive.h',
'rte_launch.h',
'rte_lcore.h',
+ 'rte_lock_annotations.h',
'rte_log.h',
'rte_malloc.h',
'rte_mcslock.h',
new file mode 100644
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Red Hat, Inc.
+ */
+
+#ifndef RTE_LOCK_ANNOTATIONS_H
+#define RTE_LOCK_ANNOTATIONS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef RTE_ANNOTATE_LOCKS
+
+#define __rte_lockable \
+ __attribute__((lockable))
+
+#define __rte_guarded_by(...) \
+ __attribute__((guarded_by(__VA_ARGS__)))
+#define __rte_guarded_var \
+ __attribute__((guarded_var))
+
+#define __rte_exclusive_locks_required(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define __rte_exclusive_lock_function(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define __rte_exclusive_trylock_function(ret, ...) \
+ __attribute__((exclusive_trylock_function(ret, __VA_ARGS__)))
+#define __rte_assert_exclusive_lock(...) \
+ __attribute__((assert_exclusive_lock(__VA_ARGS__)))
+
+#define __rte_shared_locks_required(...) \
+ __attribute__((shared_locks_required(__VA_ARGS__)))
+#define __rte_shared_lock_function(...) \
+ __attribute__((shared_lock_function(__VA_ARGS__)))
+#define __rte_shared_trylock_function(ret, ...) \
+ __attribute__((shared_trylock_function(ret, __VA_ARGS__)))
+#define __rte_assert_shared_lock(...) \
+ __attribute__((assert_shared_lock(__VA_ARGS__)))
+
+#define __rte_unlock_function(...) \
+ __attribute__((unlock_function(__VA_ARGS__)))
+
+#define __rte_no_thread_safety_analysis \
+ __attribute__((no_thread_safety_analysis))
+
+#else /* ! RTE_ANNOTATE_LOCKS */
+
+#define __rte_lockable
+
+#define __rte_guarded_by(...)
+#define __rte_guarded_var
+
+#define __rte_exclusive_locks_required(...)
+#define __rte_exclusive_lock_function(...)
+#define __rte_exclusive_trylock_function(...)
+#define __rte_assert_exclusive_lock(...)
+
+#define __rte_shared_locks_required(...)
+#define __rte_shared_lock_function(...)
+#define __rte_shared_trylock_function(...)
+#define __rte_assert_shared_lock(...)
+
+#define __rte_unlock_function(...)
+
+#define __rte_no_thread_safety_analysis
+
+#endif /* RTE_ANNOTATE_LOCKS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_LOCK_ANNOTATIONS_H */
@@ -215,6 +215,7 @@ rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn)
__rte_experimental
static inline void
rte_seqlock_write_lock(rte_seqlock_t *seqlock)
+ __rte_exclusive_lock_function(&seqlock->lock)
{
/* To synchronize with other writers. */
rte_spinlock_lock(&seqlock->lock);
@@ -240,6 +241,7 @@ rte_seqlock_write_lock(rte_seqlock_t *seqlock)
__rte_experimental
static inline void
rte_seqlock_write_unlock(rte_seqlock_t *seqlock)
+ __rte_unlock_function(&seqlock->lock)
{
rte_seqcount_write_end(&seqlock->count);
@@ -20,6 +20,7 @@ extern "C" {
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
while (__sync_lock_test_and_set(&sl->locked, 1))
while (sl->locked)
@@ -28,12 +29,14 @@ rte_spinlock_lock(rte_spinlock_t *sl)
static inline void
rte_spinlock_unlock(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
__sync_lock_release(&sl->locked);
}
static inline int
rte_spinlock_trylock(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
return __sync_lock_test_and_set(&sl->locked, 1) == 0;
}
@@ -14,6 +14,7 @@ extern "C" {
static inline void
rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+ __rte_no_thread_safety_analysis
{
if (likely(rte_try_tm(&rwl->cnt)))
return;
@@ -22,6 +23,7 @@ rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
static inline void
rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+ __rte_no_thread_safety_analysis
{
if (unlikely(rwl->cnt))
rte_rwlock_read_unlock(rwl);
@@ -31,6 +33,7 @@ rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
static inline void
rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+ __rte_no_thread_safety_analysis
{
if (likely(rte_try_tm(&rwl->cnt)))
return;
@@ -39,6 +42,7 @@ rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
static inline void
rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+ __rte_no_thread_safety_analysis
{
if (unlikely(rwl->cnt))
rte_rwlock_write_unlock(rwl);
@@ -23,6 +23,7 @@ extern "C" {
#ifndef RTE_FORCE_INTRINSICS
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
int lock_val = 1;
asm volatile (
@@ -43,6 +44,7 @@ rte_spinlock_lock(rte_spinlock_t *sl)
static inline void
rte_spinlock_unlock (rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
int unlock_val = 0;
asm volatile (
@@ -54,6 +56,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl)
static inline int
rte_spinlock_trylock (rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
int lockval = 1;
@@ -121,6 +124,7 @@ rte_try_tm(volatile int *lock)
static inline void
rte_spinlock_lock_tm(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
if (likely(rte_try_tm(&sl->locked)))
return;
@@ -130,6 +134,7 @@ rte_spinlock_lock_tm(rte_spinlock_t *sl)
static inline int
rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
if (likely(rte_try_tm(&sl->locked)))
return 1;
@@ -139,6 +144,7 @@ rte_spinlock_trylock_tm(rte_spinlock_t *sl)
static inline void
rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+ __rte_no_thread_safety_analysis
{
if (unlikely(sl->locked))
rte_spinlock_unlock(sl);
@@ -148,6 +154,7 @@ rte_spinlock_unlock_tm(rte_spinlock_t *sl)
static inline void
rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+ __rte_no_thread_safety_analysis
{
if (likely(rte_try_tm(&slr->sl.locked)))
return;
@@ -157,6 +164,7 @@ rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
static inline void
rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+ __rte_no_thread_safety_analysis
{
if (unlikely(slr->sl.locked))
rte_spinlock_recursive_unlock(slr);
@@ -166,6 +174,7 @@ rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
static inline int
rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+ __rte_no_thread_safety_analysis
{
if (likely(rte_try_tm(&slr->sl.locked)))
return 1;
@@ -120,6 +120,7 @@ foreach l:libraries
reason = '<unknown reason>' # set if build == false to explain why
name = l
use_function_versioning = false
+ annotate_locks = false
sources = []
headers = []
indirect_headers = [] # public headers not directly included by apps
@@ -202,6 +203,10 @@ foreach l:libraries
cflags += '-DRTE_USE_FUNCTION_VERSIONING'
endif
cflags += '-DRTE_LOG_DEFAULT_LOGTYPE=lib.' + l
+ if annotate_locks and cc.has_argument('-Wthread-safety')
+ cflags += '-DRTE_ANNOTATE_LOCKS'
+ cflags += '-Wthread-safety'
+ endif
# first build static lib
static_lib = static_library(libname,