From patchwork Fri Dec 3 16:34:00 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wang, YuanX" X-Patchwork-Id: 104849 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 955CCA0548; Fri, 3 Dec 2021 09:39:55 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 225FE4014F; Fri, 3 Dec 2021 09:39:55 +0100 (CET) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id AA25940041 for ; Fri, 3 Dec 2021 09:39:53 +0100 (CET) X-IronPort-AV: E=McAfee;i="6200,9189,10186"; a="297733899" X-IronPort-AV: E=Sophos;i="5.87,283,1631602800"; d="scan'208";a="297733899" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Dec 2021 00:39:52 -0800 X-IronPort-AV: E=Sophos;i="5.87,283,1631602800"; d="scan'208";a="513627219" Received: from unknown (HELO localhost.localdomain) ([10.240.183.50]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Dec 2021 00:39:47 -0800 From: Yuan Wang To: maxime.coquelin@redhat.com, chenbo.xia@intel.com Cc: dev@dpdk.org, jiayu.hu@intel.com, xuan.ding@intel.com, wenwux.ma@intel.com, weix.ling@intel.com Subject: [PATCH] vhost: fix data-plane access to released vq Date: Fri, 3 Dec 2021 16:34:00 +0000 Message-Id: <20211203163400.164545-1-yuanx.wang@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: yuan wang When numa reallocation occurs, numa_realoc() on the control plane will free the old vq. If rte_vhost_dequeue_burst() on the data plane get the vq just before release, then it will access the released vq. We need to put the vq->access_lock into struct virtio_net to ensure that it can prevents this situation. Signed-off-by: Yuan Wang --- lib/vhost/vhost.c | 26 +++++++++++++------------- lib/vhost/vhost.h | 4 +--- lib/vhost/vhost_user.c | 4 ++-- lib/vhost/virtio_net.c | 16 ++++++++-------- 4 files changed, 24 insertions(+), 26 deletions(-) diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index 13a9bb9dd1..4259931be9 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -627,7 +627,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) dev->virtqueue[i] = vq; init_vring_queue(dev, i); - rte_spinlock_init(&vq->access_lock); + rte_spinlock_init(&dev->vq_access_lock[i]); vq->avail_wrap_counter = 1; vq->used_wrap_counter = 1; vq->signalled_used_valid = false; @@ -1325,7 +1325,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id) if (!vq) return 0; - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[queue_id]); if (unlikely(!vq->enabled || vq->avail == NULL)) goto out; @@ -1333,7 +1333,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id) ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; out: - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return ret; } @@ -1417,12 +1417,12 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) if (!vq) return -1; - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[queue_id]); vq->notif_enable = enable; ret = vhost_enable_guest_notification(dev, vq, enable); - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return ret; } @@ -1479,7 +1479,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) if (vq == NULL) return 0; - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[qid]); if (unlikely(!vq->enabled || vq->avail == NULL)) goto out; @@ -1487,7 +1487,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; out: - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[qid]); return ret; } @@ -1721,9 +1721,9 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, ops->transfer_data == NULL)) return -1; - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[queue_id]); ret = async_channel_register(vid, queue_id, ops); - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return ret; } @@ -1784,7 +1784,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) if (!vq->async) return ret; - if (!rte_spinlock_trylock(&vq->access_lock)) { + if (!rte_spinlock_trylock(&dev->vq_access_lock[queue_id])) { VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " "virt queue busy.\n"); return -1; @@ -1799,7 +1799,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) vhost_free_async_mem(vq); out: - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return ret; } @@ -1856,14 +1856,14 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id) if (!vq->async) return ret; - if (!rte_spinlock_trylock(&vq->access_lock)) { + if (!rte_spinlock_trylock(&dev->vq_access_lock[queue_id])) { VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. " "virt queue busy.\n"); return ret; } ret = vq->async->pkts_inflight_n; - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return ret; } diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 7085e0885c..f85ce4fda5 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -185,9 +185,6 @@ struct vhost_virtqueue { bool access_ok; bool ready; - rte_spinlock_t access_lock; - - union { struct vring_used_elem *shadow_used_split; struct vring_used_elem_packed *shadow_used_packed; @@ -384,6 +381,7 @@ struct virtio_net { int extbuf; int linearbuf; struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; + rte_spinlock_t vq_access_lock[VHOST_MAX_QUEUE_PAIRS * 2]; struct inflight_mem_info *inflight_info; #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) char ifname[IF_NAME_SZ]; diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index a781346c4d..305b4059bb 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -2899,7 +2899,7 @@ vhost_user_lock_all_queue_pairs(struct virtio_net *dev) struct vhost_virtqueue *vq = dev->virtqueue[i]; if (vq) { - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[i]); vq_num++; } i++; @@ -2916,7 +2916,7 @@ vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) struct vhost_virtqueue *vq = dev->virtqueue[i]; if (vq) { - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[i]); vq_num++; } i++; diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index b3d954aab4..c5a05292ab 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -1354,7 +1354,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, vq = dev->virtqueue[queue_id]; - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[queue_id]); if (unlikely(!vq->enabled)) goto out_access_unlock; @@ -1380,7 +1380,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, vhost_user_iotlb_rd_unlock(vq); out_access_unlock: - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return nb_tx; } @@ -1906,11 +1906,11 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, return 0; } - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[queue_id]); n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count); - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return n_pkts_cpl; } @@ -1962,7 +1962,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id, vq = dev->virtqueue[queue_id]; - rte_spinlock_lock(&vq->access_lock); + rte_spinlock_lock(&dev->vq_access_lock[queue_id]); if (unlikely(!vq->enabled || !vq->async)) goto out_access_unlock; @@ -1990,7 +1990,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id, vhost_user_iotlb_rd_unlock(vq); out_access_unlock: - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); return nb_tx; } @@ -2900,7 +2900,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vq = dev->virtqueue[queue_id]; - if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) + if (unlikely(rte_spinlock_trylock(&dev->vq_access_lock[queue_id]) == 0)) return 0; if (unlikely(!vq->enabled)) { @@ -2969,7 +2969,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vhost_user_iotlb_rd_unlock(vq); out_access_unlock: - rte_spinlock_unlock(&vq->access_lock); + rte_spinlock_unlock(&dev->vq_access_lock[queue_id]); if (unlikely(rarp_mbuf != NULL)) count += 1;