@@ -17,6 +17,9 @@ elif (toolchain == 'icc' and cc.version().version_compare('>=16.0.0'))
endif
dpdk_conf.set('RTE_LIBRTE_VHOST_POSTCOPY', cc.has_header('linux/userfaultfd.h'))
cflags += '-fno-strict-aliasing'
+if cc.has_argument('-Wthread-safety')
+ cflags += '-Wthread-safety'
+endif
sources = files(
'fd_man.c',
'iotlb.c',
@@ -622,7 +622,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
dev->virtqueue[i] = vq;
init_vring_queue(dev, i);
- rte_spinlock_init(&vq->access_lock);
+ vhost_spinlock_init(&vq->access_lock);
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
vq->signalled_used_valid = false;
@@ -1291,14 +1291,14 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
if (!vq)
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
if (vq_is_packed(dev))
vhost_vring_call_packed(dev, vq);
else
vhost_vring_call_split(dev, vq);
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return 0;
}
@@ -1321,7 +1321,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
if (!vq)
return 0;
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;
@@ -1329,7 +1329,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
out:
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return ret;
}
@@ -1413,12 +1413,12 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
if (!vq)
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
vq->notif_enable = enable;
ret = vhost_enable_guest_notification(dev, vq, enable);
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return ret;
}
@@ -1475,7 +1475,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
if (vq == NULL)
return 0;
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;
@@ -1483,7 +1483,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
out:
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return ret;
}
@@ -1708,9 +1708,9 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id)
if (unlikely(vq == NULL || !dev->async_copy))
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
ret = async_channel_register(vid, queue_id);
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return ret;
}
@@ -1758,7 +1758,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
if (!vq->async)
return ret;
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (vhost_spinlock_trylock(&vq->access_lock) == 0) {
VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n",
dev->ifname);
return -1;
@@ -1774,7 +1774,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
vhost_free_async_mem(vq);
out:
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return ret;
}
@@ -1894,7 +1894,7 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
if (!vq->async)
return ret;
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (vhost_spinlock_trylock(&vq->access_lock) == 0) {
VHOST_LOG_CONFIG(DEBUG,
"(%s) failed to check in-flight packets. virtqueue busy.\n",
dev->ifname);
@@ -1902,7 +1902,7 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
}
ret = vq->async->pkts_inflight_n;
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return ret;
}
@@ -85,6 +85,71 @@
for (iter = val; iter < num; iter++)
#endif
+#ifndef __has_feature
+#define __vhost_has_feature(x) 0
+#else
+#define __vhost_has_feature __has_feature
+#endif
+
+#if __vhost_has_feature(c_thread_safety_attributes)
+#define VHOST_NO_THREAD_SAFETY_ANALYSIS \
+ __attribute__((no_thread_safety_analysis))
+#define VHOST_LOCKABLE \
+ __attribute__((lockable))
+
+#define VHOST_SPINLOCK_REQUIRES(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define VHOST_SPINLOCK_ACQUIRES(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define VHOST_SPINLOCK_TRYLOCK(ret, ...) \
+ __attribute__((exclusive_trylock_function(ret, __VA_ARGS__)))
+#define VHOST_SPINLOCK_RELEASES(...) \
+ __attribute__((unlock_function(__VA_ARGS__)))
+
+#else
+#define VHOST_NO_THREAD_SAFETY_ANALYSIS
+#define VHOST_LOCKABLE
+
+#define VHOST_SPINLOCK_REQUIRES(...)
+#define VHOST_SPINLOCK_ACQUIRES(...)
+#define VHOST_SPINLOCK_TRYLOCK(...)
+#define VHOST_SPINLOCK_RELEASES(...)
+#endif
+
+typedef struct VHOST_LOCKABLE {
+ rte_spinlock_t l;
+} vhost_spinlock_t;
+
+static __rte_always_inline void
+vhost_spinlock_init(vhost_spinlock_t *l)
+{
+ rte_spinlock_init(&l->l);
+}
+
+static __rte_always_inline void
+vhost_spinlock_lock(vhost_spinlock_t *l)
+ VHOST_SPINLOCK_ACQUIRES(l)
+ VHOST_NO_THREAD_SAFETY_ANALYSIS
+{
+ rte_spinlock_lock(&l->l);
+}
+
+static __rte_always_inline int
+vhost_spinlock_trylock(vhost_spinlock_t *l)
+ VHOST_SPINLOCK_TRYLOCK(1, l)
+ VHOST_NO_THREAD_SAFETY_ANALYSIS
+{
+ return rte_spinlock_trylock(&l->l);
+}
+
+static __rte_always_inline void
+vhost_spinlock_unlock(vhost_spinlock_t *l)
+ VHOST_SPINLOCK_RELEASES(l)
+ VHOST_NO_THREAD_SAFETY_ANALYSIS
+{
+ rte_spinlock_unlock(&l->l);
+}
+
/**
* Structure contains buffer address, length and descriptor index
* from vring to do scatter RX.
@@ -255,8 +320,7 @@ struct vhost_virtqueue {
bool access_ok;
bool ready;
- rte_spinlock_t access_lock;
-
+ vhost_spinlock_t access_lock;
union {
struct vring_used_elem *shadow_used_split;
@@ -834,6 +898,7 @@ vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
static __rte_always_inline void
vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
/* Flush used->idx update before we read avail->flags. */
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
@@ -872,6 +937,7 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
static __rte_always_inline void
vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
uint16_t old, new, off, off_wrap;
bool signalled_used_valid, kick = false;
@@ -2571,9 +2571,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
len, imsg->perm);
if (is_vring_iotlb(dev, vq, imsg)) {
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
*pdev = dev = translate_ring_addresses(dev, i);
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
}
}
break;
@@ -2588,9 +2588,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
imsg->size);
if (is_vring_iotlb(dev, vq, imsg)) {
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
vring_invalidate(dev, vq);
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
}
}
break;
@@ -2909,6 +2909,7 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
static void
vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
+ VHOST_NO_THREAD_SAFETY_ANALYSIS
{
unsigned int i = 0;
unsigned int vq_num = 0;
@@ -2917,7 +2918,7 @@ vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (vq) {
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
vq_num++;
}
i++;
@@ -2926,6 +2927,7 @@ vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
static void
vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
+ VHOST_NO_THREAD_SAFETY_ANALYSIS
{
unsigned int i = 0;
unsigned int vq_num = 0;
@@ -2934,7 +2936,7 @@ vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (vq) {
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
vq_num++;
}
i++;
@@ -1246,6 +1246,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
static __rte_noinline uint32_t
virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint32_t count)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
uint32_t pkt_idx = 0;
uint16_t num_buffers;
@@ -1441,6 +1442,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq,
struct rte_mbuf **__rte_restrict pkts,
uint32_t count)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
uint32_t pkt_idx = 0;
@@ -1488,7 +1490,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
if (unlikely(!vq->enabled))
goto out_access_unlock;
@@ -1514,7 +1516,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return nb_tx;
}
@@ -1955,11 +1957,11 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
}
static __rte_always_inline uint16_t
-vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
+vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
- struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
uint16_t nr_cpl_pkts = 0;
@@ -2050,7 +2052,7 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (vhost_spinlock_trylock(&vq->access_lock) == 0) {
VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n", dev->ifname, __func__,
queue_id);
return 0;
@@ -2062,10 +2064,10 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
goto out;
}
- n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
out:
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return n_pkts_cpl;
}
@@ -2074,6 +2076,7 @@ uint16_t
rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id)
+ VHOST_NO_THREAD_SAFETY_ANALYSIS
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
@@ -2104,7 +2107,7 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
return 0;
}
- n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
return n_pkts_cpl;
}
@@ -2132,7 +2135,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- rte_spinlock_lock(&vq->access_lock);
+ vhost_spinlock_lock(&vq->access_lock);
if (unlikely(!vq->enabled || !vq->async))
goto out_access_unlock;
@@ -2160,7 +2163,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
return nb_tx;
}
@@ -2679,6 +2682,7 @@ static uint16_t
virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
bool legacy_ol_flags)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
uint16_t i;
uint16_t free_entries;
@@ -2774,6 +2778,7 @@ static uint16_t
virtio_dev_tx_split_legacy(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
}
@@ -2783,6 +2788,7 @@ static uint16_t
virtio_dev_tx_split_compliant(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
}
@@ -2982,6 +2988,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
struct rte_mbuf **__rte_restrict pkts,
uint32_t count,
bool legacy_ol_flags)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
uint32_t pkt_idx = 0;
@@ -3025,6 +3032,7 @@ static uint16_t
virtio_dev_tx_packed_legacy(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
}
@@ -3034,6 +3042,7 @@ static uint16_t
virtio_dev_tx_packed_compliant(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+ VHOST_SPINLOCK_REQUIRES(vq->access_lock)
{
return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
}
@@ -3065,7 +3074,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+ if (unlikely(vhost_spinlock_trylock(&vq->access_lock) == 0))
return 0;
if (unlikely(!vq->enabled)) {
@@ -3134,7 +3143,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ vhost_spinlock_unlock(&vq->access_lock);
if (unlikely(rarp_mbuf != NULL))
count += 1;