[RFC,v3,2/8] vhost: annotate virtqueue access lock

Message ID 20220411110013.18624-3-david.marchand@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series vhost lock annotations |

Commit Message

David Marchand April 11, 2022, 11 a.m. UTC
  This change simply annotates existing paths of the code leading to
manipulations of the vq->access_lock.

One small change is required: vhost_poll_enqueue_completed was getting
a queue_id to get hold of the vq, while its callers already knew of
the vq. For the annotation sake, vq is now directly passed.

vhost_user_lock_all_queue_pairs and vhost_user_unlock_all_queue_pairs
are skipped since vq->access_lock are conditionally held.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/vhost/vhost.h      |  2 ++
 lib/vhost/vhost_user.c |  2 ++
 lib/vhost/virtio_net.c | 16 ++++++++++++----
 3 files changed, 16 insertions(+), 4 deletions(-)
  

Comments

Maxime Coquelin April 21, 2022, 3:25 p.m. UTC | #1
On 4/11/22 13:00, David Marchand wrote:
> This change simply annotates existing paths of the code leading to
> manipulations of the vq->access_lock.
> 
> One small change is required: vhost_poll_enqueue_completed was getting
> a queue_id to get hold of the vq, while its callers already knew of
> the vq. For the annotation sake, vq is now directly passed.

It is anyway more consistent with the rest of the code to pass directly
the vq in internal API when queue ID is not needed.

> vhost_user_lock_all_queue_pairs and vhost_user_unlock_all_queue_pairs
> are skipped since vq->access_lock are conditionally held.

As discussed off-list, I wonder whether it could be possible to rework
the conditional lock holding using the static array and some macros so
that we could statically specify for each request if the lock is
required.

> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
>   lib/vhost/vhost.h      |  2 ++
>   lib/vhost/vhost_user.c |  2 ++
>   lib/vhost/virtio_net.c | 16 ++++++++++++----
>   3 files changed, 16 insertions(+), 4 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
  
David Marchand April 22, 2022, 9:49 a.m. UTC | #2
On Thu, Apr 21, 2022 at 5:25 PM Maxime Coquelin
<maxime.coquelin@redhat.com> wrote:
> On 4/11/22 13:00, David Marchand wrote:
> > This change simply annotates existing paths of the code leading to
> > manipulations of the vq->access_lock.
> >
> > One small change is required: vhost_poll_enqueue_completed was getting
> > a queue_id to get hold of the vq, while its callers already knew of
> > the vq. For the annotation sake, vq is now directly passed.
>
> It is anyway more consistent with the rest of the code to pass directly
> the vq in internal API when queue ID is not needed.
>
> > vhost_user_lock_all_queue_pairs and vhost_user_unlock_all_queue_pairs
> > are skipped since vq->access_lock are conditionally held.
>
> As discussed off-list, I wonder whether it could be possible to rework
> the conditional lock holding using the static array and some macros so
> that we could statically specify for each request if the lock is
> required.

We did discuss some ideas off-list, but in the end, since we have
multiple locks being dynamically taken in
vhost_user_lock_all_queue_pairs, I see no way to statically annotate
the code.

We could rework the code to have message handlers in a consolidated
static array, but that would not help with annotations.
I had some patches going in that direction (related to some fd fixes I
sent before), but it needs more work.
I'll see if I can send this later in the release or it will go to next release.
  

Patch

diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index a9edc271aa..4a0aa35306 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -834,6 +834,7 @@  vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
 
 static __rte_always_inline void
 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	/* Flush used->idx update before we read avail->flags. */
 	rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
@@ -872,6 +873,7 @@  vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 
 static __rte_always_inline void
 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	uint16_t old, new, off, off_wrap;
 	bool signalled_used_valid, kick = false;
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 1d390677fa..dfa24fec09 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2909,6 +2909,7 @@  vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
 
 static void
 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
+	__rte_no_thread_safety_analysis
 {
 	unsigned int i = 0;
 	unsigned int vq_num = 0;
@@ -2926,6 +2927,7 @@  vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
 
 static void
 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
+	__rte_no_thread_safety_analysis
 {
 	unsigned int i = 0;
 	unsigned int vq_num = 0;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 5f432b0d77..a8b91d4b20 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1246,6 +1246,7 @@  vhost_enqueue_single_packed(struct virtio_net *dev,
 static __rte_noinline uint32_t
 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	uint32_t pkt_idx = 0;
 	uint16_t num_buffers;
@@ -1441,6 +1442,7 @@  virtio_dev_rx_packed(struct virtio_net *dev,
 		     struct vhost_virtqueue *__rte_restrict vq,
 		     struct rte_mbuf **__rte_restrict pkts,
 		     uint32_t count)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -1955,11 +1957,11 @@  write_back_completed_descs_packed(struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline uint16_t
-vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
+vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
 		uint16_t vchan_id)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
-	struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
 	struct vhost_async *async = vq->async;
 	struct async_inflight_info *pkts_info = async->pkts_info;
 	uint16_t nr_cpl_pkts = 0;
@@ -2062,7 +2064,7 @@  rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		goto out;
 	}
 
-	n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+	n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
 
 out:
 	rte_spinlock_unlock(&vq->access_lock);
@@ -2104,7 +2106,7 @@  rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
 		return 0;
 	}
 
-	n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+	n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
 
 	return n_pkts_cpl;
 }
@@ -2679,6 +2681,7 @@  static uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
 	bool legacy_ol_flags)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	uint16_t i;
 	uint16_t free_entries;
@@ -2774,6 +2777,7 @@  static uint16_t
 virtio_dev_tx_split_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -2783,6 +2787,7 @@  static uint16_t
 virtio_dev_tx_split_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **pkts, uint16_t count)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
 }
@@ -2982,6 +2987,7 @@  virtio_dev_tx_packed(struct virtio_net *dev,
 		     struct rte_mbuf **__rte_restrict pkts,
 		     uint32_t count,
 		     bool legacy_ol_flags)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	uint32_t pkt_idx = 0;
 
@@ -3025,6 +3031,7 @@  static uint16_t
 virtio_dev_tx_packed_legacy(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
 }
@@ -3034,6 +3041,7 @@  static uint16_t
 virtio_dev_tx_packed_compliant(struct virtio_net *dev,
 	struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
 	struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+	__rte_exclusive_locks_required(vq->access_lock)
 {
 	return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
 }