get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96230/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96230,
    "url": "http://patchwork.dpdk.org/api/patches/96230/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210723080937.20256-3-cheng1.jiang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210723080937.20256-3-cheng1.jiang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210723080937.20256-3-cheng1.jiang@intel.com",
    "date": "2021-07-23T08:09:35",
    "name": "[v8,2/4] vhost: add unsafe API to clear packets in async vhost",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "cf84701f92aa652978dd94eebc89291092667fc7",
    "submitter": {
        "id": 1530,
        "url": "http://patchwork.dpdk.org/api/people/1530/?format=api",
        "name": "Jiang, Cheng1",
        "email": "Cheng1.jiang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210723080937.20256-3-cheng1.jiang@intel.com/mbox/",
    "series": [
        {
            "id": 17963,
            "url": "http://patchwork.dpdk.org/api/series/17963/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=17963",
            "date": "2021-07-23T08:09:33",
            "name": "vhost: handle memory hotplug for async vhost",
            "version": 8,
            "mbox": "http://patchwork.dpdk.org/series/17963/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/96230/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/96230/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D0AA0A0C46;\n\tFri, 23 Jul 2021 10:27:14 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 92708410DD;\n\tFri, 23 Jul 2021 10:27:07 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 0D43D4003C\n for <dev@dpdk.org>; Fri, 23 Jul 2021 10:27:05 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 23 Jul 2021 01:27:05 -0700",
            "from dpdk_jiangcheng.sh.intel.com ([10.67.119.149])\n by FMSMGA003.fm.intel.com with ESMTP; 23 Jul 2021 01:27:03 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10053\"; a=\"199042817\"",
            "E=Sophos;i=\"5.84,263,1620716400\"; d=\"scan'208\";a=\"199042817\"",
            "E=Sophos;i=\"5.84,263,1620716400\"; d=\"scan'208\";a=\"502430095\""
        ],
        "X-ExtLoop1": "1",
        "From": "Cheng Jiang <cheng1.jiang@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tChenbo.Xia@intel.com",
        "Cc": "dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com,\n Cheng Jiang <cheng1.jiang@intel.com>",
        "Date": "Fri, 23 Jul 2021 08:09:35 +0000",
        "Message-Id": "<20210723080937.20256-3-cheng1.jiang@intel.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20210723080937.20256-1-cheng1.jiang@intel.com>",
        "References": "<20210602042802.31943-1-cheng1.jiang@intel.com>\n <20210723080937.20256-1-cheng1.jiang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v8 2/4] vhost: add unsafe API to clear packets in\n async vhost",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Applications need to stop DMA transfers and finish all the inflight\npackets when in VM memory hot-plug case and async vhost is used. This\npatch is to provide an unsafe API to clear inflight packets which\nare submitted to DMA engine in vhost async data path. Update the\nprogram guide and release notes for virtqueue inflight packets clear\nAPI in vhost lib.\n\nSigned-off-by: Cheng Jiang <cheng1.jiang@intel.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n doc/guides/prog_guide/vhost_lib.rst    |  5 ++\n doc/guides/rel_notes/release_21_08.rst |  5 ++\n lib/vhost/rte_vhost_async.h            | 22 ++++++\n lib/vhost/version.map                  |  1 +\n lib/vhost/virtio_net.c                 | 93 +++++++++++++++++++-------\n 5 files changed, 102 insertions(+), 24 deletions(-)",
    "diff": "diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst\nindex 70ce4974df..8874033165 100644\n--- a/doc/guides/prog_guide/vhost_lib.rst\n+++ b/doc/guides/prog_guide/vhost_lib.rst\n@@ -305,6 +305,11 @@ The following is an overview of some key Vhost API functions:\n   This function returns the amount of in-flight packets for the vhost\n   queue using async acceleration.\n \n+* ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count)``\n+\n+  Clear inflight packets which are submitted to DMA engine in vhost async data\n+  path. Completed packets are returned to applications through ``pkts``.\n+\n Vhost-user Implementations\n --------------------------\n \ndiff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst\nindex 543e93ff1d..d9c4cc5df0 100644\n--- a/doc/guides/rel_notes/release_21_08.rst\n+++ b/doc/guides/rel_notes/release_21_08.rst\n@@ -155,6 +155,11 @@ New Features\n   The experimental PMD power management API now supports managing\n   multiple Ethernet Rx queues per lcore.\n \n+* **Added inflight packets clear API in vhost library.**\n+\n+  Added an API which can clear the inflight packets submitted to DMA\n+  engine in vhost async data path.\n+\n \n Removed Items\n -------------\ndiff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h\nindex 02d012ae23..b25ff446f7 100644\n--- a/lib/vhost/rte_vhost_async.h\n+++ b/lib/vhost/rte_vhost_async.h\n@@ -246,4 +246,26 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n __rte_experimental\n int rte_vhost_async_get_inflight(int vid, uint16_t queue_id);\n \n+/**\n+ * This function checks async completion status and clear packets for\n+ * a specific vhost device queue. Packets which are inflight will be\n+ * returned in an array.\n+ *\n+ * @note This function does not perform any locking\n+ *\n+ * @param vid\n+ *  ID of vhost device to clear data\n+ * @param queue_id\n+ *  Queue id to clear data\n+ * @param pkts\n+ *  Blank array to get return packet pointer\n+ * @param count\n+ *  Size of the packet array\n+ * @return\n+ *  Number of packets returned\n+ */\n+__rte_experimental\n+uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,\n+\t\tstruct rte_mbuf **pkts, uint16_t count);\n+\n #endif /* _RTE_VHOST_ASYNC_H_ */\ndiff --git a/lib/vhost/version.map b/lib/vhost/version.map\nindex e0c89646e8..e2504ba657 100644\n--- a/lib/vhost/version.map\n+++ b/lib/vhost/version.map\n@@ -84,4 +84,5 @@ EXPERIMENTAL {\n \trte_vhost_async_get_inflight;\n \trte_vhost_async_channel_register_thread_unsafe;\n \trte_vhost_async_channel_unregister_thread_unsafe;\n+\trte_vhost_clear_queue_thread_unsafe;\n };\ndiff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex 3ab5229f76..8549afbbe1 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -2214,10 +2214,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,\n \t} while (nr_left > 0);\n }\n \n-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n+static __rte_always_inline uint16_t\n+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,\n \t\tstruct rte_mbuf **pkts, uint16_t count)\n {\n-\tstruct virtio_net *dev = get_device(vid);\n \tstruct vhost_virtqueue *vq;\n \tuint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;\n \tuint16_t start_idx, pkts_idx, vq_size;\n@@ -2225,26 +2225,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tuint16_t from, i;\n \tint32_t n_cpl;\n \n-\tif (!dev)\n-\t\treturn 0;\n-\n-\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n-\tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n-\t\treturn 0;\n-\t}\n-\n \tvq = dev->virtqueue[queue_id];\n \n-\tif (unlikely(!vq->async_registered)) {\n-\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: async not registered for queue id %d.\\n\",\n-\t\t\tdev->vid, __func__, queue_id);\n-\t\treturn 0;\n-\t}\n-\n-\trte_spinlock_lock(&vq->access_lock);\n-\n \tpkts_idx = vq->async_pkts_idx % vq->size;\n \tpkts_info = vq->async_pkts_info;\n \tvq_size = vq->size;\n@@ -2252,7 +2234,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \t\tvq_size, vq->async_pkts_inflight_n);\n \n \tif (count > vq->async_last_pkts_n) {\n-\t\tn_cpl = vq->async_ops.check_completed_copies(vid,\n+\t\tn_cpl = vq->async_ops.check_completed_copies(dev->vid,\n \t\t\tqueue_id, 0, count - vq->async_last_pkts_n);\n \t\tif (n_cpl >= 0) {\n \t\t\tn_pkts_cpl = n_cpl;\n@@ -2268,7 +2250,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tn_pkts_put = RTE_MIN(count, n_pkts_cpl);\n \tif (unlikely(n_pkts_put == 0)) {\n \t\tvq->async_last_pkts_n = n_pkts_cpl;\n-\t\tgoto done;\n+\t\treturn 0;\n \t}\n \n \tif (vq_is_packed(dev)) {\n@@ -2310,10 +2292,73 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \t\t}\n \t}\n \n-done:\n+\treturn n_pkts_put;\n+}\n+\n+uint16_t\n+rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n+\t\tstruct rte_mbuf **pkts, uint16_t count)\n+{\n+\tstruct virtio_net *dev = get_device(vid);\n+\tstruct vhost_virtqueue *vq;\n+\tuint16_t n_pkts_cpl = 0;\n+\n+\tif (!dev)\n+\t\treturn 0;\n+\n+\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n+\tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n+\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\tdev->vid, __func__, queue_id);\n+\t\treturn 0;\n+\t}\n+\n+\tvq = dev->virtqueue[queue_id];\n+\n+\tif (unlikely(!vq->async_registered)) {\n+\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: async not registered for queue id %d.\\n\",\n+\t\t\tdev->vid, __func__, queue_id);\n+\t\treturn 0;\n+\t}\n+\n+\trte_spinlock_lock(&vq->access_lock);\n+\n+\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);\n+\n \trte_spinlock_unlock(&vq->access_lock);\n \n-\treturn n_pkts_put;\n+\treturn n_pkts_cpl;\n+}\n+\n+uint16_t\n+rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,\n+\t\tstruct rte_mbuf **pkts, uint16_t count)\n+{\n+\tstruct virtio_net *dev = get_device(vid);\n+\tstruct vhost_virtqueue *vq;\n+\tuint16_t n_pkts_cpl = 0;\n+\n+\tif (!dev)\n+\t\treturn 0;\n+\n+\tVHOST_LOG_DATA(DEBUG, \"(%d) %s\\n\", dev->vid, __func__);\n+\tif (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {\n+\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: invalid virtqueue idx %d.\\n\",\n+\t\t\tdev->vid, __func__, queue_id);\n+\t\treturn 0;\n+\t}\n+\n+\tvq = dev->virtqueue[queue_id];\n+\n+\tif (unlikely(!vq->async_registered)) {\n+\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: async not registered for queue id %d.\\n\",\n+\t\t\tdev->vid, __func__, queue_id);\n+\t\treturn 0;\n+\t}\n+\n+\tn_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);\n+\n+\treturn n_pkts_cpl;\n }\n \n static __rte_always_inline uint32_t\n",
    "prefixes": [
        "v8",
        "2/4"
    ]
}