[v5,8/9] vhost: annotate vDPA device list accesses

Message ID 20230201111411.1509520-9-david.marchand@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Lock annotations |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

David Marchand Feb. 1, 2023, 11:14 a.m. UTC
  Access to vdpa_device_list must be protected with vdpa_device_list_lock
spinlock.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
Changes since RFC v3:
- rebased,

---
 lib/vhost/vdpa.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)
  

Patch

diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c
index a430a66970..6284ea2ed1 100644
--- a/lib/vhost/vdpa.c
+++ b/lib/vhost/vdpa.c
@@ -23,21 +23,22 @@ 
 /** Double linked list of vDPA devices. */
 TAILQ_HEAD(vdpa_device_list, rte_vdpa_device);
 
-static struct vdpa_device_list vdpa_device_list =
-		TAILQ_HEAD_INITIALIZER(vdpa_device_list);
+static struct vdpa_device_list vdpa_device_list__ =
+	TAILQ_HEAD_INITIALIZER(vdpa_device_list__);
 static rte_spinlock_t vdpa_device_list_lock = RTE_SPINLOCK_INITIALIZER;
+static struct vdpa_device_list * const vdpa_device_list
+	__rte_guarded_by(&vdpa_device_list_lock) = &vdpa_device_list__;
 
-
-/* Unsafe, needs to be called with vdpa_device_list_lock held */
 static struct rte_vdpa_device *
 __vdpa_find_device_by_name(const char *name)
+	__rte_exclusive_locks_required(&vdpa_device_list_lock)
 {
 	struct rte_vdpa_device *dev, *ret = NULL;
 
 	if (name == NULL)
 		return NULL;
 
-	TAILQ_FOREACH(dev, &vdpa_device_list, next) {
+	TAILQ_FOREACH(dev, vdpa_device_list, next) {
 		if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) {
 			ret = dev;
 			break;
@@ -116,7 +117,7 @@  rte_vdpa_register_device(struct rte_device *rte_dev,
 		dev->type = RTE_VHOST_VDPA_DEVICE_TYPE_NET;
 	}
 
-	TAILQ_INSERT_TAIL(&vdpa_device_list, dev, next);
+	TAILQ_INSERT_TAIL(vdpa_device_list, dev, next);
 out_unlock:
 	rte_spinlock_unlock(&vdpa_device_list_lock);
 
@@ -130,11 +131,11 @@  rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
 	int ret = -1;
 
 	rte_spinlock_lock(&vdpa_device_list_lock);
-	RTE_TAILQ_FOREACH_SAFE(cur_dev, &vdpa_device_list, next, tmp_dev) {
+	RTE_TAILQ_FOREACH_SAFE(cur_dev, vdpa_device_list, next, tmp_dev) {
 		if (dev != cur_dev)
 			continue;
 
-		TAILQ_REMOVE(&vdpa_device_list, dev, next);
+		TAILQ_REMOVE(vdpa_device_list, dev, next);
 		rte_free(dev);
 		ret = 0;
 		break;
@@ -336,7 +337,7 @@  vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
 
 	rte_spinlock_lock(&vdpa_device_list_lock);
 	if (start == NULL)
-		dev = TAILQ_FIRST(&vdpa_device_list);
+		dev = TAILQ_FIRST(vdpa_device_list);
 	else
 		dev = TAILQ_NEXT(start, next);