@@ -334,6 +334,7 @@ cleanup_device(struct virtio_net *dev, int destroy)
static void
vhost_free_async_mem(struct vhost_virtqueue *vq)
+ __rte_exclusive_locks_required(vq->access_lock)
{
if (!vq->async)
return;
@@ -352,6 +353,7 @@ vhost_free_async_mem(struct vhost_virtqueue *vq)
void
free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
+ __rte_no_thread_safety_analysis
{
if (vq_is_packed(dev))
rte_free(vq->shadow_used_packed);
@@ -1622,10 +1624,10 @@ rte_vhost_extern_callback_register(int vid,
}
static __rte_always_inline int
-async_channel_register(int vid, uint16_t queue_id)
+async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint16_t queue_id)
+ __rte_exclusive_locks_required(vq->access_lock)
{
- struct virtio_net *dev = get_device(vid);
- struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
struct vhost_async *async;
int node = vq->numa_node;
@@ -1709,7 +1711,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id)
return -1;
rte_spinlock_lock(&vq->access_lock);
- ret = async_channel_register(vid, queue_id);
+ ret = async_channel_register(dev, vq, queue_id);
rte_spinlock_unlock(&vq->access_lock);
return ret;
@@ -1717,6 +1719,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id)
int
rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
+ __rte_no_thread_safety_analysis
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
@@ -1732,7 +1735,7 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
if (unlikely(vq == NULL || !dev->async_copy))
return -1;
- return async_channel_register(vid, queue_id);
+ return async_channel_register(dev, vq, queue_id);
}
int
@@ -1777,6 +1780,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
int
rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
+ __rte_no_thread_safety_analysis
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
@@ -299,7 +299,7 @@ struct vhost_virtqueue {
struct rte_vhost_resubmit_info *resubmit_inflight;
uint64_t global_counter;
- struct vhost_async *async;
+ struct vhost_async *async __rte_guarded_var;
int notif_enable;
#define VIRTIO_UNINITIALIZED_NOTIF (-1)
@@ -2199,6 +2199,8 @@ static int
vhost_user_set_vring_enable(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
int main_fd __rte_unused)
+ /* vq->access_lock is taken in vhost_user_lock_all_queue_pairs() */
+ __rte_no_thread_safety_analysis
{
struct virtio_net *dev = *pdev;
bool enable = !!ctx->msg.payload.state.num;
@@ -51,6 +51,7 @@ static __rte_always_inline int64_t
vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
struct vhost_iov_iter *pkt)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
uint16_t ring_mask = dma_info->ring_mask;
@@ -99,6 +100,7 @@ static __rte_always_inline uint16_t
vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
struct vhost_iov_iter *pkts, uint16_t nr_pkts)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
int64_t ret, nr_copies = 0;
@@ -1000,6 +1002,7 @@ static __rte_always_inline int
async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, uint32_t mbuf_offset,
uint64_t buf_iova, uint32_t cpy_len)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct vhost_async *async = vq->async;
uint64_t mapped_len;
@@ -1057,6 +1060,7 @@ static __rte_always_inline int
mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, struct buf_vector *buf_vec,
uint16_t nr_vec, uint16_t num_buffers, bool is_async)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint32_t vec_idx = 0;
uint32_t mbuf_offset, mbuf_avail;
@@ -1186,6 +1190,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
struct rte_mbuf *pkt,
struct buf_vector *buf_vec,
uint16_t *nr_descs)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint16_t nr_vec = 0;
uint16_t avail_idx = vq->last_avail_idx;
@@ -1417,6 +1422,7 @@ static __rte_always_inline int16_t
virtio_dev_rx_single_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
struct rte_mbuf *pkt)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint16_t nr_descs = 0;
@@ -1541,6 +1547,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
static __rte_always_inline uint16_t
async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct vhost_async *async = vq->async;
@@ -1587,6 +1594,7 @@ static __rte_noinline uint32_t
virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
int16_t dma_id, uint16_t vchan_id)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint32_t pkt_idx = 0;
@@ -1691,6 +1699,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
struct buf_vector *buf_vec,
uint16_t *nr_descs,
uint16_t *nr_buffers)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint16_t nr_vec = 0;
uint16_t avail_idx = vq->last_avail_idx;
@@ -1748,6 +1757,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
static __rte_always_inline int16_t
virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
@@ -1766,6 +1776,7 @@ virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
static __rte_always_inline void
dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
uint32_t nr_err, uint32_t *pkt_idx)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint16_t descs_err = 0;
uint16_t buffers_err = 0;
@@ -1793,6 +1804,7 @@ static __rte_noinline uint32_t
virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
int16_t dma_id, uint16_t vchan_id)
+ __rte_exclusive_locks_required(vq->access_lock)
{
uint32_t pkt_idx = 0;
uint32_t remained = count;
@@ -1863,6 +1875,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
static __rte_always_inline void
write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct vhost_async *async = vq->async;
uint16_t nr_left = n_descs;
@@ -1895,6 +1908,7 @@ write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
static __rte_always_inline void
write_back_completed_descs_packed(struct vhost_virtqueue *vq,
uint16_t n_buffers)
+ __rte_exclusive_locks_required(vq->access_lock)
{
struct vhost_async *async = vq->async;
uint16_t from = async->last_buffer_idx_packed;
@@ -2076,6 +2090,7 @@ uint16_t
rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id)
+ __rte_no_thread_safety_analysis
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;