get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96045/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96045,
    "url": "http://patchwork.dpdk.org/api/patches/96045/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210719081022.12949-2-cheng1.jiang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210719081022.12949-2-cheng1.jiang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210719081022.12949-2-cheng1.jiang@intel.com",
    "date": "2021-07-19T08:10:18",
    "name": "[v6,1/5] vhost: fix async vhost ops return type",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "36d6127d5e242b8ce36b66c1f02e7e487282d130",
    "submitter": {
        "id": 1530,
        "url": "http://patchwork.dpdk.org/api/people/1530/?format=api",
        "name": "Jiang, Cheng1",
        "email": "Cheng1.jiang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210719081022.12949-2-cheng1.jiang@intel.com/mbox/",
    "series": [
        {
            "id": 17889,
            "url": "http://patchwork.dpdk.org/api/series/17889/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=17889",
            "date": "2021-07-19T08:10:17",
            "name": "vhost: handle memory hotplug for async vhost",
            "version": 6,
            "mbox": "http://patchwork.dpdk.org/series/17889/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/96045/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/96045/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C84BDA034F;\n\tMon, 19 Jul 2021 10:27:24 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id BBBE1410EE;\n\tMon, 19 Jul 2021 10:27:20 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n by mails.dpdk.org (Postfix) with ESMTP id 4CCE4410DD;\n Mon, 19 Jul 2021 10:27:19 +0200 (CEST)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 19 Jul 2021 01:27:18 -0700",
            "from dpdk_jiangcheng.sh.intel.com ([10.67.119.149])\n by fmsmga002.fm.intel.com with ESMTP; 19 Jul 2021 01:27:17 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10049\"; a=\"274841379\"",
            "E=Sophos;i=\"5.84,251,1620716400\"; d=\"scan'208\";a=\"274841379\"",
            "E=Sophos;i=\"5.84,251,1620716400\"; d=\"scan'208\";a=\"509279690\""
        ],
        "X-ExtLoop1": "1",
        "From": "Cheng Jiang <cheng1.jiang@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tChenbo.Xia@intel.com",
        "Cc": "dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com,\n Cheng Jiang <cheng1.jiang@intel.com>, stable@dpdk.org",
        "Date": "Mon, 19 Jul 2021 08:10:18 +0000",
        "Message-Id": "<20210719081022.12949-2-cheng1.jiang@intel.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20210719081022.12949-1-cheng1.jiang@intel.com>",
        "References": "<20210602042802.31943-1-cheng1.jiang@intel.com>\n <20210719081022.12949-1-cheng1.jiang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The async vhost callback ops should return negative value when there\nare something wrong in the callback, so the return type should be\nchanged into int32_t. The issue in vhost example is also fixed.\n\nFixes: cd6760da1076 (\"vhost: introduce async enqueue for split ring\")\nFixes: 819a71685826 (\"vhost: fix async callback return type\")\nFixes: 6b3c81db8bb7 (\"vhost: simplify async copy completion\")\nFixes: abec60e7115d (\"examples/vhost: support vhost async data path\")\nFixes: 873e8dad6f49 (\"vhost: support packed ring in async datapath\")\nCc: stable@dpdk.org\n\nSigned-off-by: Cheng Jiang <cheng1.jiang@intel.com>\n---\n examples/vhost/ioat.c       |  4 +--\n examples/vhost/ioat.h       |  4 +--\n lib/vhost/rte_vhost_async.h |  8 ++---\n lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----\n 4 files changed, 61 insertions(+), 16 deletions(-)",
    "diff": "diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c\nindex 2a2c2d7202..457f8171f0 100644\n--- a/examples/vhost/ioat.c\n+++ b/examples/vhost/ioat.c\n@@ -122,7 +122,7 @@ open_ioat(const char *value)\n \treturn ret;\n }\n \n-uint32_t\n+int32_t\n ioat_transfer_data_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_desc *descs,\n \t\tstruct rte_vhost_async_status *opaque_data, uint16_t count)\n@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,\n \treturn i_desc;\n }\n \n-uint32_t\n+int32_t\n ioat_check_completed_copies_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t max_packets)\ndiff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h\nindex 1aa28ed6a3..b57b5645b0 100644\n--- a/examples/vhost/ioat.h\n+++ b/examples/vhost/ioat.h\n@@ -27,12 +27,12 @@ struct dma_for_vhost {\n #ifdef RTE_RAW_IOAT\n int open_ioat(const char *value);\n \n-uint32_t\n+int32_t\n ioat_transfer_data_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_desc *descs,\n \t\tstruct rte_vhost_async_status *opaque_data, uint16_t count);\n \n-uint32_t\n+int32_t\n ioat_check_completed_copies_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t max_packets);\ndiff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h\nindex 6faa31f5ad..e964d83837 100644\n--- a/lib/vhost/rte_vhost_async.h\n+++ b/lib/vhost/rte_vhost_async.h\n@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {\n \t * @param count\n \t *  number of elements in the \"descs\" array\n \t * @return\n-\t *  number of descs processed\n+\t *  number of descs processed, negative value means error\n \t */\n-\tuint32_t (*transfer_data)(int vid, uint16_t queue_id,\n+\tint32_t (*transfer_data)(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_desc *descs,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t count);\n@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {\n \t * @param max_packets\n \t *  max number of packets could be completed\n \t * @return\n-\t *  number of async descs completed\n+\t *  number of async descs completed, negative value means error\n \t */\n-\tuint32_t (*check_completed_copies)(int vid, uint16_t queue_id,\n+\tint32_t (*check_completed_copies)(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t max_packets);\n };\ndiff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex b93482587c..16ae4d9e19 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \tstruct async_inflight_info *pkts_info = vq->async_pkts_info;\n \tuint32_t n_pkts = 0, pkt_err = 0;\n \tuint32_t num_async_pkts = 0, num_done_pkts = 0;\n+\tint32_t n_xfer;\n \tstruct {\n \t\tuint16_t pkt_idx;\n \t\tuint16_t last_avail_idx;\n@@ -1608,8 +1609,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \t\tif (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||\n \t\t\t((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <\n \t\t\tBUF_VECTOR_MAX))) {\n-\t\t\tn_pkts = vq->async_ops.transfer_data(dev->vid,\n+\t\t\tn_xfer = vq->async_ops.transfer_data(dev->vid,\n \t\t\t\t\tqueue_id, tdes, 0, pkt_burst_idx);\n+\t\t\tif (n_xfer >= 0) {\n+\t\t\t\tn_pkts = n_xfer;\n+\t\t\t} else {\n+\t\t\t\tVHOST_LOG_DATA(ERR,\n+\t\t\t\t\t\"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\t\tn_pkts = 0;\n+\t\t\t}\n+\n \t\t\tiovec_idx = 0;\n \t\t\tit_idx = 0;\n \n@@ -1632,8 +1642,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \t}\n \n \tif (pkt_burst_idx) {\n-\t\tn_pkts = vq->async_ops.transfer_data(dev->vid,\n-\t\t\t\tqueue_id, tdes, 0, pkt_burst_idx);\n+\t\tn_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);\n+\t\tif (n_xfer >= 0) {\n+\t\t\tn_pkts = n_xfer;\n+\t\t} else {\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\tn_pkts = 0;\n+\t\t}\n+\n \t\tvq->async_pkts_inflight_n += n_pkts;\n \n \t\tif (unlikely(n_pkts < pkt_burst_idx))\n@@ -1903,6 +1920,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,\n \tuint16_t async_descs_idx = 0;\n \tuint16_t num_buffers;\n \tuint16_t num_descs;\n+\tint32_t n_xfer;\n \n \tstruct rte_vhost_iov_iter *it_pool = vq->it_pool;\n \tstruct iovec *vec_pool = vq->vec_pool;\n@@ -1983,8 +2001,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,\n \t\t */\n \t\tif (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||\n \t\t\t((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {\n-\t\t\tn_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,\n-\t\t\t\ttdes, 0, pkt_burst_idx);\n+\t\t\tn_xfer = vq->async_ops.transfer_data(dev->vid,\n+\t\t\t\t\tqueue_id, tdes, 0, pkt_burst_idx);\n+\t\t\tif (n_xfer >= 0) {\n+\t\t\t\tn_pkts = n_xfer;\n+\t\t\t} else {\n+\t\t\t\tVHOST_LOG_DATA(ERR,\n+\t\t\t\t\t\"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\t\tn_pkts = 0;\n+\t\t\t}\n+\n \t\t\tiovec_idx = 0;\n \t\t\tit_idx = 0;\n \t\t\tsegs_await = 0;\n@@ -2006,7 +2033,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,\n \t} while (pkt_idx < count);\n \n \tif (pkt_burst_idx) {\n-\t\tn_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);\n+\t\tn_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);\n+\t\tif (n_xfer >= 0) {\n+\t\t\tn_pkts = n_xfer;\n+\t\t} else {\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\tn_pkts = 0;\n+\t\t}\n+\n \t\tvq->async_pkts_inflight_n += n_pkts;\n \n \t\tif (unlikely(n_pkts < pkt_burst_idx))\n@@ -2091,6 +2126,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tuint16_t start_idx, pkts_idx, vq_size;\n \tstruct async_inflight_info *pkts_info;\n \tuint16_t from, i;\n+\tint32_t n_cpl;\n \n \tif (!dev)\n \t\treturn 0;\n@@ -2118,9 +2154,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tstart_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,\n \t\tvq_size, vq->async_pkts_inflight_n);\n \n-\tif (count > vq->async_last_pkts_n)\n-\t\tn_pkts_cpl = vq->async_ops.check_completed_copies(vid,\n+\tif (count > vq->async_last_pkts_n) {\n+\t\tn_cpl = vq->async_ops.check_completed_copies(vid,\n \t\t\tqueue_id, 0, count - vq->async_last_pkts_n);\n+\t\tif (n_cpl >= 0) {\n+\t\t\tn_pkts_cpl = n_cpl;\n+\t\t} else {\n+\t\t\tVHOST_LOG_DATA(ERR,\n+\t\t\t\t\"(%d) %s: failed to check completed copies for queue id %d.\\n\",\n+\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\tn_pkts_cpl = 0;\n+\t\t}\n+\t}\n \tn_pkts_cpl += vq->async_last_pkts_n;\n \n \tn_pkts_put = RTE_MIN(count, n_pkts_cpl);\n",
    "prefixes": [
        "v6",
        "1/5"
    ]
}