get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/92233/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 92233,
    "url": "http://patchwork.dpdk.org/api/patches/92233/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210427080335.20246-2-Cheng1.jiang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210427080335.20246-2-Cheng1.jiang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210427080335.20246-2-Cheng1.jiang@intel.com",
    "date": "2021-04-27T08:03:32",
    "name": "[v9,1/4] vhost: abstract and reorganize async split ring code",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "0c3f170bd0a710b062d6dc0b09b15583a330ec2e",
    "submitter": {
        "id": 1530,
        "url": "http://patchwork.dpdk.org/api/people/1530/?format=api",
        "name": "Jiang, Cheng1",
        "email": "Cheng1.jiang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210427080335.20246-2-Cheng1.jiang@intel.com/mbox/",
    "series": [
        {
            "id": 16691,
            "url": "http://patchwork.dpdk.org/api/series/16691/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=16691",
            "date": "2021-04-27T08:03:31",
            "name": "add support for packed ring in async vhost",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/16691/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/92233/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/92233/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 76EECA0548;\n\tTue, 27 Apr 2021 10:17:52 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8DD79411FA;\n\tTue, 27 Apr 2021 10:17:47 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n by mails.dpdk.org (Postfix) with ESMTP id A8F0740A4B\n for <dev@dpdk.org>; Tue, 27 Apr 2021 10:17:45 +0200 (CEST)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 27 Apr 2021 01:17:45 -0700",
            "from dpdk_jiangcheng.sh.intel.com ([10.67.119.112])\n by fmsmga002.fm.intel.com with ESMTP; 27 Apr 2021 01:17:43 -0700"
        ],
        "IronPort-SDR": [
            "\n PBfaABgTodTy64joEiPQj1PGX207c7KU2zZeGh/8WV9VURIQ2E3YjFRQBdo/38b38W6i87kZrl\n FMqiGARzmv3A==",
            "\n EZczWnpMqwu4IfTS8UP1u5ylY8hVmMIoBUxre9RSpmeBQGwtVnEYqTimIPo46zAkfXuomCRD8l\n 09Tf/BKgPQhg=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,9966\"; a=\"193285054\"",
            "E=Sophos;i=\"5.82,254,1613462400\"; d=\"scan'208\";a=\"193285054\"",
            "E=Sophos;i=\"5.82,254,1613462400\"; d=\"scan'208\";a=\"457521377\""
        ],
        "X-ExtLoop1": "1",
        "From": "Cheng Jiang <Cheng1.jiang@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tchenbo.xia@intel.com",
        "Cc": "dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com,\n yinan.wang@intel.com, yong.liu@intel.com,\n Cheng Jiang <Cheng1.jiang@intel.com>",
        "Date": "Tue, 27 Apr 2021 08:03:32 +0000",
        "Message-Id": "<20210427080335.20246-2-Cheng1.jiang@intel.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20210427080335.20246-1-Cheng1.jiang@intel.com>",
        "References": "<20210317085426.10119-1-Cheng1.jiang@intel.com>\n <20210427080335.20246-1-Cheng1.jiang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v9 1/4] vhost: abstract and reorganize async\n split ring code",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch puts some codes of async vhost split ring into inline\nfunctions to improve the readability of the code. And, it changes\nthe pointer index style of iterator to make the code more concise.\n\nSigned-off-by: Cheng Jiang <Cheng1.jiang@intel.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\nReviewed-by: Jiayu Hu <jiayu.hu@intel.com>\n---\n lib/vhost/virtio_net.c | 132 ++++++++++++++++++++---------------------\n 1 file changed, 66 insertions(+), 66 deletions(-)",
    "diff": "diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c\nindex ff39878609..438bdafd14 100644\n--- a/lib/vhost/virtio_net.c\n+++ b/lib/vhost/virtio_net.c\n@@ -1458,6 +1458,22 @@ virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,\n \t\t(vq_size - n_inflight + pkts_idx) & (vq_size - 1);\n }\n \n+static __rte_always_inline void\n+store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,\n+\t\tuint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)\n+{\n+\tuint16_t elem_size = sizeof(struct vring_used_elem);\n+\n+\tif (d_idx + count <= ring_size) {\n+\t\trte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);\n+\t} else {\n+\t\tuint16_t size = ring_size - d_idx;\n+\n+\t\trte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);\n+\t\trte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);\n+\t}\n+}\n+\n static __rte_noinline uint32_t\n virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \tstruct vhost_virtqueue *vq, uint16_t queue_id,\n@@ -1474,10 +1490,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \tstruct rte_vhost_async_desc tdes[MAX_PKT_BURST];\n \tstruct iovec *src_iovec = vec_pool;\n \tstruct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);\n-\tstruct rte_vhost_iov_iter *src_it = it_pool;\n-\tstruct rte_vhost_iov_iter *dst_it = it_pool + 1;\n \tuint16_t slot_idx = 0;\n \tuint16_t segs_await = 0;\n+\tuint16_t iovec_idx = 0, it_idx = 0;\n \tstruct async_inflight_info *pkts_info = vq->async_pkts_info;\n \tuint32_t n_pkts = 0, pkt_err = 0;\n \tuint32_t num_async_pkts = 0, num_done_pkts = 0;\n@@ -1511,29 +1526,30 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \t\t\tdev->vid, vq->last_avail_idx,\n \t\t\tvq->last_avail_idx + num_buffers);\n \n-\t\tif (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],\n-\t\t\t\tbuf_vec, nr_vec, num_buffers,\n-\t\t\t\tsrc_iovec, dst_iovec, src_it, dst_it) < 0) {\n+\t\tif (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers,\n+\t\t\t\t&src_iovec[iovec_idx], &dst_iovec[iovec_idx],\n+\t\t\t\t&it_pool[it_idx], &it_pool[it_idx + 1]) < 0) {\n \t\t\tvq->shadow_used_idx -= num_buffers;\n \t\t\tbreak;\n \t\t}\n \n \t\tslot_idx = (vq->async_pkts_idx + num_async_pkts) &\n \t\t\t(vq->size - 1);\n-\t\tif (src_it->count) {\n+\t\tif (it_pool[it_idx].count) {\n \t\t\tuint16_t from, to;\n \n-\t\t\tasync_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);\n+\t\t\tasync_fill_desc(&tdes[pkt_burst_idx++],\n+\t\t\t\t&it_pool[it_idx], &it_pool[it_idx + 1]);\n \t\t\tpkts_info[slot_idx].descs = num_buffers;\n \t\t\tpkts_info[slot_idx].mbuf = pkts[pkt_idx];\n \t\t\tasync_pkts_log[num_async_pkts].pkt_idx = pkt_idx;\n \t\t\tasync_pkts_log[num_async_pkts++].last_avail_idx =\n \t\t\t\tvq->last_avail_idx;\n-\t\t\tsrc_iovec += src_it->nr_segs;\n-\t\t\tdst_iovec += dst_it->nr_segs;\n-\t\t\tsrc_it += 2;\n-\t\t\tdst_it += 2;\n-\t\t\tsegs_await += src_it->nr_segs;\n+\n+\t\t\tiovec_idx += it_pool[it_idx].nr_segs;\n+\t\t\tit_idx += 2;\n+\n+\t\t\tsegs_await += it_pool[it_idx].nr_segs;\n \n \t\t\t/**\n \t\t\t * recover shadow used ring and keep DMA-occupied\n@@ -1541,23 +1557,10 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \t\t\t */\n \t\t\tfrom = vq->shadow_used_idx - num_buffers;\n \t\t\tto = vq->async_desc_idx & (vq->size - 1);\n-\t\t\tif (num_buffers + to <= vq->size) {\n-\t\t\t\trte_memcpy(&vq->async_descs_split[to],\n-\t\t\t\t\t\t&vq->shadow_used_split[from],\n-\t\t\t\t\t\tnum_buffers *\n-\t\t\t\t\t\tsizeof(struct vring_used_elem));\n-\t\t\t} else {\n-\t\t\t\tint size = vq->size - to;\n-\n-\t\t\t\trte_memcpy(&vq->async_descs_split[to],\n-\t\t\t\t\t\t&vq->shadow_used_split[from],\n-\t\t\t\t\t\tsize *\n-\t\t\t\t\t\tsizeof(struct vring_used_elem));\n-\t\t\t\trte_memcpy(vq->async_descs_split,\n-\t\t\t\t\t\t&vq->shadow_used_split[from +\n-\t\t\t\t\t\tsize], (num_buffers - size) *\n-\t\t\t\t\t   sizeof(struct vring_used_elem));\n-\t\t\t}\n+\n+\t\t\tstore_dma_desc_info_split(vq->shadow_used_split,\n+\t\t\t\t\tvq->async_descs_split, vq->size, from, to, num_buffers);\n+\n \t\t\tvq->async_desc_idx += num_buffers;\n \t\t\tvq->shadow_used_idx -= num_buffers;\n \t\t} else\n@@ -1575,10 +1578,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \t\t\tBUF_VECTOR_MAX))) {\n \t\t\tn_pkts = vq->async_ops.transfer_data(dev->vid,\n \t\t\t\t\tqueue_id, tdes, 0, pkt_burst_idx);\n-\t\t\tsrc_iovec = vec_pool;\n-\t\t\tdst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);\n-\t\t\tsrc_it = it_pool;\n-\t\t\tdst_it = it_pool + 1;\n+\t\t\tiovec_idx = 0;\n+\t\t\tit_idx = 0;\n+\n \t\t\tsegs_await = 0;\n \t\t\tvq->async_pkts_inflight_n += n_pkts;\n \n@@ -1639,6 +1641,36 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,\n \treturn pkt_idx;\n }\n \n+static __rte_always_inline void\n+write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)\n+{\n+\tuint16_t nr_left = n_descs;\n+\tuint16_t nr_copy;\n+\tuint16_t to, from;\n+\n+\tdo {\n+\t\tfrom = vq->last_async_desc_idx & (vq->size - 1);\n+\t\tnr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;\n+\t\tto = vq->last_used_idx & (vq->size - 1);\n+\n+\t\tif (to + nr_copy <= vq->size) {\n+\t\t\trte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],\n+\t\t\t\t\tnr_copy * sizeof(struct vring_used_elem));\n+\t\t} else {\n+\t\t\tuint16_t size = vq->size - to;\n+\n+\t\t\trte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],\n+\t\t\t\t\tsize * sizeof(struct vring_used_elem));\n+\t\t\trte_memcpy(&vq->used->ring[0], &vq->async_descs_split[from + size],\n+\t\t\t\t\t(nr_copy - size) * sizeof(struct vring_used_elem));\n+\t\t}\n+\n+\t\tvq->last_async_desc_idx += nr_copy;\n+\t\tvq->last_used_idx += nr_copy;\n+\t\tnr_left -= nr_copy;\n+\t} while (nr_left > 0);\n+}\n+\n uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \t\tstruct rte_mbuf **pkts, uint16_t count)\n {\n@@ -1695,39 +1727,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,\n \tvq->async_pkts_inflight_n -= n_pkts_put;\n \n \tif (likely(vq->enabled && vq->access_ok)) {\n-\t\tuint16_t nr_left = n_descs;\n-\t\tuint16_t nr_copy;\n-\t\tuint16_t to;\n-\n-\t\t/* write back completed descriptors to used ring */\n-\t\tdo {\n-\t\t\tfrom = vq->last_async_desc_idx & (vq->size - 1);\n-\t\t\tnr_copy = nr_left + from <= vq->size ? nr_left :\n-\t\t\t\tvq->size - from;\n-\t\t\tto = vq->last_used_idx & (vq->size - 1);\n-\n-\t\t\tif (to + nr_copy <= vq->size) {\n-\t\t\t\trte_memcpy(&vq->used->ring[to],\n-\t\t\t\t\t\t&vq->async_descs_split[from],\n-\t\t\t\t\t\tnr_copy *\n-\t\t\t\t\t\tsizeof(struct vring_used_elem));\n-\t\t\t} else {\n-\t\t\t\tuint16_t size = vq->size - to;\n-\n-\t\t\t\trte_memcpy(&vq->used->ring[to],\n-\t\t\t\t\t\t&vq->async_descs_split[from],\n-\t\t\t\t\t\tsize *\n-\t\t\t\t\t\tsizeof(struct vring_used_elem));\n-\t\t\t\trte_memcpy(vq->used->ring,\n-\t\t\t\t\t\t&vq->async_descs_split[from +\n-\t\t\t\t\t\tsize], (nr_copy - size) *\n-\t\t\t\t\t\tsizeof(struct vring_used_elem));\n-\t\t\t}\n-\n-\t\t\tvq->last_async_desc_idx += nr_copy;\n-\t\t\tvq->last_used_idx += nr_copy;\n-\t\t\tnr_left -= nr_copy;\n-\t\t} while (nr_left > 0);\n+\t\twrite_back_completed_descs_split(vq, n_descs);\n \n \t\t__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);\n \t\tvhost_vring_call_split(dev, vq);\n",
    "prefixes": [
        "v9",
        "1/4"
    ]
}