@@ -87,6 +87,7 @@ foreach subpath:subdirs
build = true # set to false to disable, e.g. missing deps
reason = '<unknown reason>' # set if build == false to explain
name = drv
+ annotate_locks = false
sources = []
headers = []
objs = []
@@ -152,6 +153,10 @@ foreach subpath:subdirs
enabled_drivers += name
lib_name = '_'.join(['rte', class, name])
cflags += '-DRTE_LOG_DEFAULT_LOGTYPE=' + '.'.join([log_prefix, name])
+ if annotate_locks and cc.has_argument('-Wthread-safety')
+ cflags += '-DRTE_ANNOTATE_LOCKS'
+ cflags += '-Wthread-safety'
+ endif
dpdk_conf.set(lib_name.to_upper(), 1)
dpdk_extra_ldflags += pkgconfig_extra_libs
@@ -13,24 +13,28 @@ extern "C" {
static inline void
rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_read_lock(rwl);
}
static inline void
rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_read_unlock(rwl);
}
static inline void
rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_write_lock(rwl);
}
static inline void
rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_write_unlock(rwl);
}
@@ -23,36 +23,42 @@ static inline int rte_tm_supported(void)
static inline void
rte_spinlock_lock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_lock(sl); /* fall-back */
}
static inline int
rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
return rte_spinlock_trylock(sl);
}
static inline void
rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_unlock(sl);
}
static inline void
rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_recursive_lock(slr); /* fall-back */
}
static inline void
rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_recursive_unlock(slr);
}
static inline int
rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
return rte_spinlock_recursive_trylock(slr);
}
@@ -23,6 +23,7 @@ extern "C" {
#include <rte_common.h>
#include <rte_atomic.h>
+#include <rte_lock_annotations.h>
#include <rte_pause.h>
/**
@@ -30,7 +31,7 @@ extern "C" {
*
* cnt is -1 when write lock is held, and > 0 when read locks are held.
*/
-typedef struct {
+typedef struct RTE_ANNOTATED_LOCK {
volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */
} rte_rwlock_t;
@@ -58,7 +59,8 @@ rte_rwlock_init(rte_rwlock_t *rwl)
* A pointer to a rwlock structure.
*/
static inline void
-rte_rwlock_read_lock(rte_rwlock_t *rwl)
+rte_rwlock_read_lock(rte_rwlock_t *rwl) RTE_SHARED_LOCK_ACQUIRES(rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int32_t x;
int success = 0;
@@ -90,7 +92,8 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
*/
__rte_experimental
static inline int
-rte_rwlock_read_trylock(rte_rwlock_t *rwl)
+rte_rwlock_read_trylock(rte_rwlock_t *rwl) RTE_SHARED_LOCK_TRYLOCK(1, rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int32_t x;
int success = 0;
@@ -114,7 +117,8 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl)
* A pointer to the rwlock structure.
*/
static inline void
-rte_rwlock_read_unlock(rte_rwlock_t *rwl)
+rte_rwlock_read_unlock(rte_rwlock_t *rwl) RTE_LOCK_RELEASES(rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
__atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
}
@@ -134,7 +138,8 @@ rte_rwlock_read_unlock(rte_rwlock_t *rwl)
*/
__rte_experimental
static inline int
-rte_rwlock_write_trylock(rte_rwlock_t *rwl)
+rte_rwlock_write_trylock(rte_rwlock_t *rwl) RTE_EXC_LOCK_TRYLOCK(1, rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int32_t x;
@@ -153,7 +158,8 @@ rte_rwlock_write_trylock(rte_rwlock_t *rwl)
* A pointer to a rwlock structure.
*/
static inline void
-rte_rwlock_write_lock(rte_rwlock_t *rwl)
+rte_rwlock_write_lock(rte_rwlock_t *rwl) RTE_EXC_LOCK_ACQUIRES(rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int32_t x;
int success = 0;
@@ -177,7 +183,8 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
* A pointer to a rwlock structure.
*/
static inline void
-rte_rwlock_write_unlock(rte_rwlock_t *rwl)
+rte_rwlock_write_unlock(rte_rwlock_t *rwl) RTE_LOCK_RELEASES(rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
__atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
}
@@ -22,12 +22,13 @@
#ifdef RTE_FORCE_INTRINSICS
#include <rte_common.h>
#endif
+#include <rte_lock_annotations.h>
#include <rte_pause.h>
/**
* The rte_spinlock_t type.
*/
-typedef struct {
+typedef struct RTE_ANNOTATED_LOCK {
volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
} rte_spinlock_t;
@@ -55,11 +56,12 @@ rte_spinlock_init(rte_spinlock_t *sl)
* A pointer to the spinlock.
*/
static inline void
-rte_spinlock_lock(rte_spinlock_t *sl);
+rte_spinlock_lock(rte_spinlock_t *sl) RTE_EXC_LOCK_ACQUIRES(sl);
#ifdef RTE_FORCE_INTRINSICS
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int exp = 0;
@@ -79,11 +81,12 @@ rte_spinlock_lock(rte_spinlock_t *sl)
* A pointer to the spinlock.
*/
static inline void
-rte_spinlock_unlock (rte_spinlock_t *sl);
+rte_spinlock_unlock(rte_spinlock_t *sl) RTE_LOCK_RELEASES(sl);
#ifdef RTE_FORCE_INTRINSICS
static inline void
-rte_spinlock_unlock (rte_spinlock_t *sl)
+rte_spinlock_unlock(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
__atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
}
@@ -98,11 +101,12 @@ rte_spinlock_unlock (rte_spinlock_t *sl)
* 1 if the lock is successfully taken; 0 otherwise.
*/
static inline int
-rte_spinlock_trylock (rte_spinlock_t *sl);
+rte_spinlock_trylock(rte_spinlock_t *sl) RTE_EXC_LOCK_TRYLOCK(1, sl);
#ifdef RTE_FORCE_INTRINSICS
static inline int
-rte_spinlock_trylock (rte_spinlock_t *sl)
+rte_spinlock_trylock(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int exp = 0;
return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
@@ -211,6 +215,7 @@ static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
* A pointer to the recursive spinlock.
*/
static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int id = rte_gettid();
@@ -227,6 +232,7 @@ static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
* A pointer to the recursive spinlock.
*/
static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (--(slr->count) == 0) {
slr->user = -1;
@@ -244,6 +250,7 @@ static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
* 1 if the lock is successfully taken; 0 otherwise.
*/
static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int id = rte_gettid();
@@ -27,6 +27,7 @@ headers += files(
'rte_keepalive.h',
'rte_launch.h',
'rte_lcore.h',
+ 'rte_lock_annotations.h',
'rte_log.h',
'rte_malloc.h',
'rte_memory.h',
new file mode 100644
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Red Hat, Inc.
+ */
+
+#ifndef RTE_LOCK_ANNOTATIONS_H
+#define RTE_LOCK_ANNOTATIONS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef RTE_ANNOTATE_LOCKS
+
+#define RTE_ANNOTATED_LOCK \
+ __attribute__((lockable))
+
+#define RTE_GUARDED_BY(...) \
+ __attribute__((guarded_by(__VA_ARGS__)))
+#define RTE_GUARDED_VAR \
+ __attribute__((guarded_var))
+
+#define RTE_EXC_LOCK_REQUIRES(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define RTE_EXC_LOCK_ACQUIRES(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define RTE_EXC_LOCK_TRYLOCK(ret, ...) \
+ __attribute__((exclusive_trylock_function(ret, __VA_ARGS__)))
+
+#define RTE_SHARED_LOCK_REQUIRES(...) \
+ __attribute__((shared_locks_required(__VA_ARGS__)))
+#define RTE_SHARED_LOCK_ACQUIRES(...) \
+ __attribute__((shared_lock_function(__VA_ARGS__)))
+#define RTE_SHARED_LOCK_TRYLOCK(ret, ...) \
+ __attribute__((shared_trylock_function(ret, __VA_ARGS__)))
+
+#define RTE_LOCK_RELEASES(...) \
+ __attribute__((unlock_function(__VA_ARGS__)))
+
+#define RTE_NO_ANNOTATED_LOCK_CHECK \
+ __attribute__((no_thread_safety_analysis))
+
+#else /* ! RTE_ANNOTATE_LOCKS */
+
+#define RTE_ANNOTATED_LOCK
+
+#define RTE_GUARDED_BY(...)
+#define RTE_GUARDED_VAR
+
+#define RTE_EXC_LOCK_REQUIRES(...)
+#define RTE_EXC_LOCK_ACQUIRES(...)
+#define RTE_EXC_LOCK_TRYLOCK(...)
+
+#define RTE_SHARED_LOCK_REQUIRES(...)
+#define RTE_SHARED_LOCK_ACQUIRES(...)
+#define RTE_SHARED_LOCK_TRYLOCK(...)
+
+#define RTE_LOCK_RELEASES(...)
+
+#define RTE_NO_ANNOTATED_LOCK_CHECK
+
+#endif /* RTE_ANNOTATE_LOCKS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_LOCK_ANNOTATIONS_H */
@@ -11,24 +11,28 @@ extern "C" {
static inline void
rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_read_lock(rwl);
}
static inline void
rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_read_unlock(rwl);
}
static inline void
rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_write_lock(rwl);
}
static inline void
rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_rwlock_write_unlock(rwl);
}
@@ -20,6 +20,7 @@ extern "C" {
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
while (__sync_lock_test_and_set(&sl->locked, 1))
while (sl->locked)
@@ -28,12 +29,14 @@ rte_spinlock_lock(rte_spinlock_t *sl)
static inline void
rte_spinlock_unlock(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
__sync_lock_release(&sl->locked);
}
static inline int
rte_spinlock_trylock(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
return __sync_lock_test_and_set(&sl->locked, 1) == 0;
}
@@ -47,36 +50,42 @@ static inline int rte_tm_supported(void)
static inline void
rte_spinlock_lock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_lock(sl); /* fall-back */
}
static inline int
rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
return rte_spinlock_trylock(sl);
}
static inline void
rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_unlock(sl);
}
static inline void
rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_recursive_lock(slr); /* fall-back */
}
static inline void
rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
rte_spinlock_recursive_unlock(slr);
}
static inline int
rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
return rte_spinlock_recursive_trylock(slr);
}
@@ -14,6 +14,7 @@ extern "C" {
static inline void
rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (likely(rte_try_tm(&rwl->cnt)))
return;
@@ -22,6 +23,7 @@ rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
static inline void
rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (unlikely(rwl->cnt))
rte_rwlock_read_unlock(rwl);
@@ -31,6 +33,7 @@ rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
static inline void
rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (likely(rte_try_tm(&rwl->cnt)))
return;
@@ -39,6 +42,7 @@ rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
static inline void
rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (unlikely(rwl->cnt))
rte_rwlock_write_unlock(rwl);
@@ -23,6 +23,7 @@ extern "C" {
#ifndef RTE_FORCE_INTRINSICS
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int lock_val = 1;
asm volatile (
@@ -43,6 +44,7 @@ rte_spinlock_lock(rte_spinlock_t *sl)
static inline void
rte_spinlock_unlock (rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int unlock_val = 0;
asm volatile (
@@ -54,6 +56,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl)
static inline int
rte_spinlock_trylock (rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
int lockval = 1;
@@ -121,6 +124,7 @@ rte_try_tm(volatile int *lock)
static inline void
rte_spinlock_lock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (likely(rte_try_tm(&sl->locked)))
return;
@@ -130,6 +134,7 @@ rte_spinlock_lock_tm(rte_spinlock_t *sl)
static inline int
rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (likely(rte_try_tm(&sl->locked)))
return 1;
@@ -139,6 +144,7 @@ rte_spinlock_trylock_tm(rte_spinlock_t *sl)
static inline void
rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (unlikely(sl->locked))
rte_spinlock_unlock(sl);
@@ -148,6 +154,7 @@ rte_spinlock_unlock_tm(rte_spinlock_t *sl)
static inline void
rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (likely(rte_try_tm(&slr->sl.locked)))
return;
@@ -157,6 +164,7 @@ rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
static inline void
rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (unlikely(slr->sl.locked))
rte_spinlock_recursive_unlock(slr);
@@ -166,6 +174,7 @@ rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
static inline int
rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+ RTE_NO_ANNOTATED_LOCK_CHECK
{
if (likely(rte_try_tm(&slr->sl.locked)))
return 1;
@@ -112,6 +112,7 @@ foreach l:libraries
reason = '<unknown reason>' # set if build == false to explain why
name = l
use_function_versioning = false
+ annotate_locks = false
sources = []
headers = []
indirect_headers = [] # public headers not directly included by apps
@@ -184,6 +185,10 @@ foreach l:libraries
cflags += '-DRTE_USE_FUNCTION_VERSIONING'
endif
cflags += '-DRTE_LOG_DEFAULT_LOGTYPE=lib.' + l
+ if annotate_locks and cc.has_argument('-Wthread-safety')
+ cflags += '-DRTE_ANNOTATE_LOCKS'
+ cflags += '-Wthread-safety'
+ endif
# first build static lib
static_lib = static_library(libname,