get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/104849/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 104849,
    "url": "http://patchwork.dpdk.org/api/patches/104849/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211203163400.164545-1-yuanx.wang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211203163400.164545-1-yuanx.wang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211203163400.164545-1-yuanx.wang@intel.com",
    "date": "2021-12-03T16:34:00",
    "name": "vhost: fix data-plane access to released vq",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "7e583c36ae97065026973d7aa35ad741638e0da6",
    "submitter": {
        "id": 2087,
        "url": "http://patchwork.dpdk.org/api/people/2087/?format=api",
        "name": "Wang, YuanX",
        "email": "yuanx.wang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211203163400.164545-1-yuanx.wang@intel.com/mbox/",
    "series": [
        {
            "id": 20847,
            "url": "http://patchwork.dpdk.org/api/series/20847/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=20847",
            "date": "2021-12-03T16:34:00",
            "name": "vhost: fix data-plane access to released vq",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/20847/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/104849/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/104849/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 955CCA0548;\n\tFri,  3 Dec 2021 09:39:55 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 225FE4014F;\n\tFri,  3 Dec 2021 09:39:55 +0100 (CET)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id AA25940041\n for <dev@dpdk.org>; Fri,  3 Dec 2021 09:39:53 +0100 (CET)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 03 Dec 2021 00:39:52 -0800",
            "from unknown (HELO localhost.localdomain) ([10.240.183.50])\n by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 03 Dec 2021 00:39:47 -0800"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10186\"; a=\"297733899\"",
            "E=Sophos;i=\"5.87,283,1631602800\"; d=\"scan'208\";a=\"297733899\"",
            "E=Sophos;i=\"5.87,283,1631602800\"; d=\"scan'208\";a=\"513627219\""
        ],
        "From": "Yuan Wang <yuanx.wang@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tchenbo.xia@intel.com",
        "Cc": "dev@dpdk.org, jiayu.hu@intel.com, xuan.ding@intel.com,\n wenwux.ma@intel.com,\n weix.ling@intel.com",
        "Subject": "[PATCH] vhost: fix data-plane access to released vq",
        "Date": "Fri,  3 Dec 2021 16:34:00 +0000",
        "Message-Id": "<20211203163400.164545-1-yuanx.wang@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: yuan wang <yuanx.wang@intel.com>\n\nWhen numa reallocation occurs, numa_realoc() on the control\nplane will free the old vq. If rte_vhost_dequeue_burst()\non the data plane get the vq just before release, then it\nwill access the released vq. We need to put the\nvq->access_lock into struct virtio_net to ensure that it\ncan prevents this situation.\n\nSigned-off-by: Yuan Wang <yuanx.wang@intel.com>\n---\n lib/vhost/vhost.c      | 26 +++++++++++++-------------\n lib/vhost/vhost.h      |  4 +---\n lib/vhost/vhost_user.c |  4 ++--\n lib/vhost/virtio_net.c | 16 ++++++++--------\n 4 files changed, 24 insertions(+), 26 deletions(-)",
    "diff": "diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c\nindex 13a9bb9dd1..4259931be9 100644\n--- a/lib/vhost/vhost.c\n+++ b/lib/vhost/vhost.c\n@@ -627,7 +627,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)\n \n \t\tdev->virtqueue[i] = vq;\n \t\tinit_vring_queue(dev, i);\n-\t\trte_spinlock_init(&vq->access_lock);\n+\t\trte_spinlock_init(&dev->vq_access_lock[i]);\n \t\tvq->avail_wrap_counter = 1;\n \t\tvq->used_wrap_counter = 1;\n \t\tvq->signalled_used_valid = false;\n@@ -1325,7 +1325,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)\n \tif (!vq)\n \t\treturn 0;\n \n-\trte_spinlock_lock(&vq->access_lock);\n+\trte_spinlock_lock(&dev->vq_access_lock[queue_id]);\n \n \tif (unlikely(!vq->enabled || vq->avail == NULL))\n \t\tgoto out;\n@@ -1333,7 +1333,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)\n \tret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;\n \n out:\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \treturn ret;\n }\n \n@@ -1417,12 +1417,12 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)\n \tif (!vq)\n \t\treturn -1;\n \n-\trte_spinlock_lock(&vq->access_lock);\n+\trte_spinlock_lock(&dev->vq_access_lock[queue_id]);\n \n \tvq->notif_enable = enable;\n \tret = vhost_enable_guest_notification(dev, vq, enable);\n \n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \treturn ret;\n }\n@@ -1479,7 +1479,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)\n \tif (vq == NULL)\n \t\treturn 0;\n \n-\trte_spinlock_lock(&vq->access_lock);\n+\trte_spinlock_lock(&dev->vq_access_lock[qid]);\n \n \tif (unlikely(!vq->enabled || vq->avail == NULL))\n \t\tgoto out;\n@@ -1487,7 +1487,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)\n \tret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;\n \n out:\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[qid]);\n \treturn ret;\n }\n \n@@ -1721,9 +1721,9 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,\n \t\tops->transfer_data == NULL))\n \t\treturn -1;\n \n-\trte_spinlock_lock(&vq->access_lock);\n+\trte_spinlock_lock(&dev->vq_access_lock[queue_id]);\n \tret = async_channel_register(vid, queue_id, ops);\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \treturn ret;\n }\n@@ -1784,7 +1784,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)\n \tif (!vq->async)\n \t\treturn ret;\n \n-\tif (!rte_spinlock_trylock(&vq->access_lock)) {\n+\tif (!rte_spinlock_trylock(&dev->vq_access_lock[queue_id])) {\n \t\tVHOST_LOG_CONFIG(ERR, \"Failed to unregister async channel. \"\n \t\t\t\"virt queue busy.\\n\");\n \t\treturn -1;\n@@ -1799,7 +1799,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)\n \n \tvhost_free_async_mem(vq);\n out:\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \treturn ret;\n }\n@@ -1856,14 +1856,14 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)\n \tif (!vq->async)\n \t\treturn ret;\n \n-\tif (!rte_spinlock_trylock(&vq->access_lock)) {\n+\tif (!rte_spinlock_trylock(&dev->vq_access_lock[queue_id])) {\n \t\tVHOST_LOG_CONFIG(DEBUG, \"Failed to check in-flight packets. \"\n \t\t\t\"virt queue busy.\\n\");\n \t\treturn ret;\n \t}\n \n \tret = vq->async->pkts_inflight_n;\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \treturn ret;\n }\ndiff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h\nindex 7085e0885c..f85ce4fda5 100644\n--- a/lib/vhost/vhost.h\n+++ b/lib/vhost/vhost.h\n@@ -185,9 +185,6 @@ struct vhost_virtqueue {\n \tbool\t\t\taccess_ok;\n \tbool\t\t\tready;\n \n-\trte_spinlock_t\t\taccess_lock;\n-\n-\n \tunion {\n \t\tstruct vring_used_elem  *shadow_used_split;\n \t\tstruct vring_used_elem_packed *shadow_used_packed;\n@@ -384,6 +381,7 @@ struct virtio_net {\n \tint\t\t\textbuf;\n \tint\t\t\tlinearbuf;\n \tstruct vhost_virtqueue\t*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];\n+\trte_spinlock_t\t\tvq_access_lock[VHOST_MAX_QUEUE_PAIRS * 2];\n \tstruct inflight_mem_info *inflight_info;\n #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)\n \tchar\t\t\tifname[IF_NAME_SZ];\ndiff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c\nindex a781346c4d..305b4059bb 100644\n--- a/lib/vhost/vhost_user.c\n+++ b/lib/vhost/vhost_user.c\n@@ -2899,7 +2899,7 @@ vhost_user_lock_all_queue_pairs(struct virtio_net *dev)\n \t\tstruct vhost_virtqueue *vq = dev->virtqueue[i];\n \n \t\tif (vq) {\n-\t\t\trte_spinlock_lock(&vq->access_lock);\n+\t\t\trte_spinlock_lock(&dev->vq_access_lock[i]);\n \t\t\tvq_num++;\n \t\t}\n \t\ti++;\n@@ -2916,7 +2916,7 @@ vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)\n \t\tstruct vhost_virtqueue *vq = dev->virtqueue[i];\n \n \t\tif (vq) {\n-\t\t\trte_spinlock_unlock(&vq->access_lock);\n+\t\t\trte_spinlock_unlock(&dev->vq_access_lock[i]);\n \t\t\tvq_num++;\n \t\t}\n \t\ti++;\ndiff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex b3d954aab4..c5a05292ab 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -1354,7 +1354,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,\n \n \tvq = dev->virtqueue[queue_id];\n \n-\trte_spinlock_lock(&vq->access_lock);\n+\trte_spinlock_lock(&dev->vq_access_lock[queue_id]);\n \n \tif (unlikely(!vq->enabled))\n \t\tgoto out_access_unlock;\n@@ -1380,7 +1380,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,\n \t\tvhost_user_iotlb_rd_unlock(vq);\n \n out_access_unlock:\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \treturn nb_tx;\n }\n@@ -1906,11 +1906,11 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \t\treturn 0;\n \t}\n \n-\trte_spinlock_lock(&vq->access_lock);\n+\trte_spinlock_lock(&dev->vq_access_lock[queue_id]);\n \n \tn_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);\n \n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \treturn n_pkts_cpl;\n }\n@@ -1962,7 +1962,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,\n \n \tvq = dev->virtqueue[queue_id];\n \n-\trte_spinlock_lock(&vq->access_lock);\n+\trte_spinlock_lock(&dev->vq_access_lock[queue_id]);\n \n \tif (unlikely(!vq->enabled || !vq->async))\n \t\tgoto out_access_unlock;\n@@ -1990,7 +1990,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,\n \t\tvhost_user_iotlb_rd_unlock(vq);\n \n out_access_unlock:\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \treturn nb_tx;\n }\n@@ -2900,7 +2900,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,\n \n \tvq = dev->virtqueue[queue_id];\n \n-\tif (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))\n+\tif (unlikely(rte_spinlock_trylock(&dev->vq_access_lock[queue_id]) == 0))\n \t\treturn 0;\n \n \tif (unlikely(!vq->enabled)) {\n@@ -2969,7 +2969,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,\n \t\tvhost_user_iotlb_rd_unlock(vq);\n \n out_access_unlock:\n-\trte_spinlock_unlock(&vq->access_lock);\n+\trte_spinlock_unlock(&dev->vq_access_lock[queue_id]);\n \n \tif (unlikely(rarp_mbuf != NULL))\n \t\tcount += 1;\n",
    "prefixes": []
}