get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96229/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96229,
    "url": "http://patchwork.dpdk.org/api/patches/96229/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210723080937.20256-2-cheng1.jiang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210723080937.20256-2-cheng1.jiang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210723080937.20256-2-cheng1.jiang@intel.com",
    "date": "2021-07-23T08:09:34",
    "name": "[v8,1/4] vhost: fix async vhost ops return type",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8054846ce1785c1a72e4a87ae3837e4d0fa9ff26",
    "submitter": {
        "id": 1530,
        "url": "http://patchwork.dpdk.org/api/people/1530/?format=api",
        "name": "Jiang, Cheng1",
        "email": "Cheng1.jiang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210723080937.20256-2-cheng1.jiang@intel.com/mbox/",
    "series": [
        {
            "id": 17963,
            "url": "http://patchwork.dpdk.org/api/series/17963/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=17963",
            "date": "2021-07-23T08:09:33",
            "name": "vhost: handle memory hotplug for async vhost",
            "version": 8,
            "mbox": "http://patchwork.dpdk.org/series/17963/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/96229/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/96229/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4796BA0C46;\n\tFri, 23 Jul 2021 10:27:08 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 43BE340DF5;\n\tFri, 23 Jul 2021 10:27:05 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id B6912406A2;\n Fri, 23 Jul 2021 10:27:03 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 23 Jul 2021 01:27:03 -0700",
            "from dpdk_jiangcheng.sh.intel.com ([10.67.119.149])\n by FMSMGA003.fm.intel.com with ESMTP; 23 Jul 2021 01:27:01 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10053\"; a=\"199042812\"",
            "E=Sophos;i=\"5.84,263,1620716400\"; d=\"scan'208\";a=\"199042812\"",
            "E=Sophos;i=\"5.84,263,1620716400\"; d=\"scan'208\";a=\"502430071\""
        ],
        "X-ExtLoop1": "1",
        "From": "Cheng Jiang <cheng1.jiang@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tChenbo.Xia@intel.com",
        "Cc": "dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com,\n Cheng Jiang <cheng1.jiang@intel.com>, stable@dpdk.org",
        "Date": "Fri, 23 Jul 2021 08:09:34 +0000",
        "Message-Id": "<20210723080937.20256-2-cheng1.jiang@intel.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20210723080937.20256-1-cheng1.jiang@intel.com>",
        "References": "<20210602042802.31943-1-cheng1.jiang@intel.com>\n <20210723080937.20256-1-cheng1.jiang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v8 1/4] vhost: fix async vhost ops return type",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The async vhost callback ops should return negative value when there\nare something wrong in the callback, so the return type should be\nchanged into int32_t. The issue in vhost example is also fixed.\n\nFixes: cd6760da1076 (\"vhost: introduce async enqueue for split ring\")\nFixes: 819a71685826 (\"vhost: fix async callback return type\")\nFixes: 6b3c81db8bb7 (\"vhost: simplify async copy completion\")\nFixes: abec60e7115d (\"examples/vhost: support vhost async data path\")\nFixes: 6e9a9d2a02ae (\"examples/vhost: fix ioat dependency\")\nFixes: 873e8dad6f49 (\"vhost: support packed ring in async datapath\")\nCc: stable@dpdk.org\n\nSigned-off-by: Cheng Jiang <cheng1.jiang@intel.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n examples/vhost/ioat.c       |  4 +--\n examples/vhost/ioat.h       |  8 ++---\n lib/vhost/rte_vhost_async.h |  8 ++---\n lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----\n 4 files changed, 63 insertions(+), 18 deletions(-)",
    "diff": "diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c\nindex 2a2c2d7202..457f8171f0 100644\n--- a/examples/vhost/ioat.c\n+++ b/examples/vhost/ioat.c\n@@ -122,7 +122,7 @@ open_ioat(const char *value)\n \treturn ret;\n }\n \n-uint32_t\n+int32_t\n ioat_transfer_data_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_desc *descs,\n \t\tstruct rte_vhost_async_status *opaque_data, uint16_t count)\n@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,\n \treturn i_desc;\n }\n \n-uint32_t\n+int32_t\n ioat_check_completed_copies_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t max_packets)\ndiff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h\nindex 1aa28ed6a3..62e163c585 100644\n--- a/examples/vhost/ioat.h\n+++ b/examples/vhost/ioat.h\n@@ -27,12 +27,12 @@ struct dma_for_vhost {\n #ifdef RTE_RAW_IOAT\n int open_ioat(const char *value);\n \n-uint32_t\n+int32_t\n ioat_transfer_data_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_desc *descs,\n \t\tstruct rte_vhost_async_status *opaque_data, uint16_t count);\n \n-uint32_t\n+int32_t\n ioat_check_completed_copies_cb(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t max_packets);\n@@ -42,7 +42,7 @@ static int open_ioat(const char *value __rte_unused)\n \treturn -1;\n }\n \n-static uint32_t\n+static int32_t\n ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,\n \t\tstruct rte_vhost_async_desc *descs __rte_unused,\n \t\tstruct rte_vhost_async_status *opaque_data __rte_unused,\n@@ -51,7 +51,7 @@ ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,\n \treturn -1;\n }\n \n-static uint32_t\n+static int32_t\n ioat_check_completed_copies_cb(int vid __rte_unused,\n \t\tuint16_t queue_id __rte_unused,\n \t\tstruct rte_vhost_async_status *opaque_data __rte_unused,\ndiff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h\nindex 69ec66bba5..02d012ae23 100644\n--- a/lib/vhost/rte_vhost_async.h\n+++ b/lib/vhost/rte_vhost_async.h\n@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {\n \t * @param count\n \t *  number of elements in the \"descs\" array\n \t * @return\n-\t *  number of descs processed\n+\t *  number of descs processed, negative value means error\n \t */\n-\tuint32_t (*transfer_data)(int vid, uint16_t queue_id,\n+\tint32_t (*transfer_data)(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_desc *descs,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t count);\n@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {\n \t * @param max_packets\n \t *  max number of packets could be completed\n \t * @return\n-\t *  number of async descs completed\n+\t *  number of async descs completed, negative value means error\n \t */\n-\tuint32_t (*check_completed_copies)(int vid, uint16_t queue_id,\n+\tint32_t (*check_completed_copies)(int vid, uint16_t queue_id,\n \t\tstruct rte_vhost_async_status *opaque_data,\n \t\tuint16_t max_packets);\n };\ndiff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex 6e5d82c1a8..3ab5229f76 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -1644,6 +1644,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \tstruct async_inflight_info *pkts_info = vq->async_pkts_info;\n \tuint32_t n_pkts = 0, pkt_err = 0;\n \tuint32_t num_async_pkts = 0, num_done_pkts = 0;\n+\tint32_t n_xfer;\n \tstruct {\n \t\tuint16_t pkt_idx;\n \t\tuint16_t last_avail_idx;\n@@ -1724,8 +1725,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \t\tif (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||\n \t\t\t((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <\n \t\t\tBUF_VECTOR_MAX))) {\n-\t\t\tn_pkts = vq->async_ops.transfer_data(dev->vid,\n+\t\t\tn_xfer = vq->async_ops.transfer_data(dev->vid,\n \t\t\t\t\tqueue_id, tdes, 0, pkt_burst_idx);\n+\t\t\tif (n_xfer >= 0) {\n+\t\t\t\tn_pkts = n_xfer;\n+\t\t\t} else {\n+\t\t\t\tVHOST_LOG_DATA(ERR,\n+\t\t\t\t\t\"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\t\tn_pkts = 0;\n+\t\t\t}\n+\n \t\t\tiovec_idx = 0;\n \t\t\tit_idx = 0;\n \n@@ -1748,8 +1758,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \t}\n \n \tif (pkt_burst_idx) {\n-\t\tn_pkts = vq->async_ops.transfer_data(dev->vid,\n-\t\t\t\tqueue_id, tdes, 0, pkt_burst_idx);\n+\t\tn_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);\n+\t\tif (n_xfer >= 0) {\n+\t\t\tn_pkts = n_xfer;\n+\t\t} else {\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\tn_pkts = 0;\n+\t\t}\n+\n \t\tvq->async_pkts_inflight_n += n_pkts;\n \n \t\tif (unlikely(n_pkts < pkt_burst_idx))\n@@ -1996,6 +2013,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,\n \tuint16_t async_descs_idx = 0;\n \tuint16_t num_buffers;\n \tuint16_t num_descs;\n+\tint32_t n_xfer;\n \n \tstruct rte_vhost_iov_iter *it_pool = vq->it_pool;\n \tstruct iovec *vec_pool = vq->vec_pool;\n@@ -2078,8 +2096,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,\n \t\t */\n \t\tif (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||\n \t\t\t((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {\n-\t\t\tn_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,\n-\t\t\t\ttdes, 0, pkt_burst_idx);\n+\t\t\tn_xfer = vq->async_ops.transfer_data(dev->vid,\n+\t\t\t\t\tqueue_id, tdes, 0, pkt_burst_idx);\n+\t\t\tif (n_xfer >= 0) {\n+\t\t\t\tn_pkts = n_xfer;\n+\t\t\t} else {\n+\t\t\t\tVHOST_LOG_DATA(ERR,\n+\t\t\t\t\t\"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\t\tn_pkts = 0;\n+\t\t\t}\n+\n \t\t\tiovec_idx = 0;\n \t\t\tit_idx = 0;\n \t\t\tsegs_await = 0;\n@@ -2101,7 +2128,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,\n \t} while (pkt_idx < count);\n \n \tif (pkt_burst_idx) {\n-\t\tn_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);\n+\t\tn_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);\n+\t\tif (n_xfer >= 0) {\n+\t\t\tn_pkts = n_xfer;\n+\t\t} else {\n+\t\t\tVHOST_LOG_DATA(ERR, \"(%d) %s: failed to transfer data for queue id %d.\\n\",\n+\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\tn_pkts = 0;\n+\t\t}\n+\n \t\tvq->async_pkts_inflight_n += n_pkts;\n \n \t\tif (unlikely(n_pkts < pkt_burst_idx))\n@@ -2188,6 +2223,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tuint16_t start_idx, pkts_idx, vq_size;\n \tstruct async_inflight_info *pkts_info;\n \tuint16_t from, i;\n+\tint32_t n_cpl;\n \n \tif (!dev)\n \t\treturn 0;\n@@ -2215,9 +2251,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tstart_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,\n \t\tvq_size, vq->async_pkts_inflight_n);\n \n-\tif (count > vq->async_last_pkts_n)\n-\t\tn_pkts_cpl = vq->async_ops.check_completed_copies(vid,\n+\tif (count > vq->async_last_pkts_n) {\n+\t\tn_cpl = vq->async_ops.check_completed_copies(vid,\n \t\t\tqueue_id, 0, count - vq->async_last_pkts_n);\n+\t\tif (n_cpl >= 0) {\n+\t\t\tn_pkts_cpl = n_cpl;\n+\t\t} else {\n+\t\t\tVHOST_LOG_DATA(ERR,\n+\t\t\t\t\"(%d) %s: failed to check completed copies for queue id %d.\\n\",\n+\t\t\t\tdev->vid, __func__, queue_id);\n+\t\t\tn_pkts_cpl = 0;\n+\t\t}\n+\t}\n \tn_pkts_cpl += vq->async_last_pkts_n;\n \n \tn_pkts_put = RTE_MIN(count, n_pkts_cpl);\n",
    "prefixes": [
        "v8",
        "1/4"
    ]
}