get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/51322/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 51322,
    "url": "http://patchwork.dpdk.org/api/patches/51322/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20190319064312.13743-6-tiwei.bie@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190319064312.13743-6-tiwei.bie@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190319064312.13743-6-tiwei.bie@intel.com",
    "date": "2019-03-19T06:43:07",
    "name": "[05/10] net/virtio: refactor virtqueue structure",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "1040451b6943f858d8da3f0b66ae5875ec3f614b",
    "submitter": {
        "id": 617,
        "url": "http://patchwork.dpdk.org/api/people/617/?format=api",
        "name": "Tiwei Bie",
        "email": "tiwei.bie@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20190319064312.13743-6-tiwei.bie@intel.com/mbox/",
    "series": [
        {
            "id": 3795,
            "url": "http://patchwork.dpdk.org/api/series/3795/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=3795",
            "date": "2019-03-19T06:43:02",
            "name": "net/virtio: cleanups and fixes for packed/split ring",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/3795/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/51322/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/51322/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C774B4D3A;\n\tTue, 19 Mar 2019 07:43:53 +0100 (CET)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n\tby dpdk.org (Postfix) with ESMTP id 4FC512C54\n\tfor <dev@dpdk.org>; Tue, 19 Mar 2019 07:43:41 +0100 (CET)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n\tby orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t18 Mar 2019 23:43:40 -0700",
            "from dpdk-tbie.sh.intel.com ([10.67.104.173])\n\tby orsmga007.jf.intel.com with ESMTP; 18 Mar 2019 23:43:39 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.58,496,1544515200\"; d=\"scan'208\";a=\"123847055\"",
        "From": "Tiwei Bie <tiwei.bie@intel.com>",
        "To": "maxime.coquelin@redhat.com,\n\tzhihong.wang@intel.com,\n\tdev@dpdk.org",
        "Date": "Tue, 19 Mar 2019 14:43:07 +0800",
        "Message-Id": "<20190319064312.13743-6-tiwei.bie@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190319064312.13743-1-tiwei.bie@intel.com>",
        "References": "<20190319064312.13743-1-tiwei.bie@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 05/10] net/virtio: refactor virtqueue structure",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Put split ring and packed ring specific fields into separate\nsub-structures, and also union them as they won't be available\nat the same time.\n\nSigned-off-by: Tiwei Bie <tiwei.bie@intel.com>\n---\n drivers/net/virtio/virtio_ethdev.c           | 71 +++++++++---------\n drivers/net/virtio/virtio_rxtx.c             | 66 ++++++++---------\n drivers/net/virtio/virtio_rxtx_simple.h      |  2 +-\n drivers/net/virtio/virtio_rxtx_simple_neon.c |  2 +-\n drivers/net/virtio/virtio_rxtx_simple_sse.c  |  2 +-\n drivers/net/virtio/virtqueue.c               |  6 +-\n drivers/net/virtio/virtqueue.h               | 77 +++++++++++---------\n 7 files changed, 117 insertions(+), 109 deletions(-)",
    "diff": "diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c\nindex 9060b6b33..bc91ad493 100644\n--- a/drivers/net/virtio/virtio_ethdev.c\n+++ b/drivers/net/virtio/virtio_ethdev.c\n@@ -147,7 +147,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,\n {\n \tstruct virtqueue *vq = cvq->vq;\n \tint head;\n-\tstruct vring_packed_desc *desc = vq->ring_packed.desc_packed;\n+\tstruct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;\n \tstruct virtio_pmd_ctrl *result;\n \tuint16_t flags;\n \tint sum = 0;\n@@ -161,14 +161,14 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,\n \t * One RX packet for ACK.\n \t */\n \thead = vq->vq_avail_idx;\n-\tflags = vq->cached_flags;\n+\tflags = vq->vq_packed.cached_flags;\n \tdesc[head].addr = cvq->virtio_net_hdr_mem;\n \tdesc[head].len = sizeof(struct virtio_net_ctrl_hdr);\n \tvq->vq_free_cnt--;\n \tnb_descs++;\n \tif (++vq->vq_avail_idx >= vq->vq_nentries) {\n \t\tvq->vq_avail_idx -= vq->vq_nentries;\n-\t\tvq->cached_flags ^=\n+\t\tvq->vq_packed.cached_flags ^=\n \t\t\tVRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);\n \t}\n \n@@ -178,13 +178,13 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,\n \t\t\t+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;\n \t\tdesc[vq->vq_avail_idx].len = dlen[k];\n \t\tdesc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |\n-\t\t\tvq->cached_flags;\n+\t\t\tvq->vq_packed.cached_flags;\n \t\tsum += dlen[k];\n \t\tvq->vq_free_cnt--;\n \t\tnb_descs++;\n \t\tif (++vq->vq_avail_idx >= vq->vq_nentries) {\n \t\t\tvq->vq_avail_idx -= vq->vq_nentries;\n-\t\t\tvq->cached_flags ^=\n+\t\t\tvq->vq_packed.cached_flags ^=\n \t\t\t\tVRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);\n \t\t}\n \t}\n@@ -192,12 +192,13 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,\n \tdesc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem\n \t\t+ sizeof(struct virtio_net_ctrl_hdr);\n \tdesc[vq->vq_avail_idx].len = sizeof(ctrl->status);\n-\tdesc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | vq->cached_flags;\n+\tdesc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |\n+\t\tvq->vq_packed.cached_flags;\n \tvq->vq_free_cnt--;\n \tnb_descs++;\n \tif (++vq->vq_avail_idx >= vq->vq_nentries) {\n \t\tvq->vq_avail_idx -= vq->vq_nentries;\n-\t\tvq->cached_flags ^=\n+\t\tvq->vq_packed.cached_flags ^=\n \t\t\tVRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);\n \t}\n \n@@ -218,19 +219,19 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,\n \tvq->vq_used_cons_idx += nb_descs;\n \tif (vq->vq_used_cons_idx >= vq->vq_nentries) {\n \t\tvq->vq_used_cons_idx -= vq->vq_nentries;\n-\t\tvq->used_wrap_counter ^= 1;\n+\t\tvq->vq_packed.used_wrap_counter ^= 1;\n \t}\n \n \tPMD_INIT_LOG(DEBUG, \"vq->vq_free_cnt=%d\\n\"\n \t\t\t\"vq->vq_avail_idx=%d\\n\"\n \t\t\t\"vq->vq_used_cons_idx=%d\\n\"\n-\t\t\t\"vq->cached_flags=0x%x\\n\"\n-\t\t\t\"vq->used_wrap_counter=%d\\n\",\n+\t\t\t\"vq->vq_packed.cached_flags=0x%x\\n\"\n+\t\t\t\"vq->vq_packed.used_wrap_counter=%d\\n\",\n \t\t\tvq->vq_free_cnt,\n \t\t\tvq->vq_avail_idx,\n \t\t\tvq->vq_used_cons_idx,\n-\t\t\tvq->cached_flags,\n-\t\t\tvq->used_wrap_counter);\n+\t\t\tvq->vq_packed.cached_flags,\n+\t\t\tvq->vq_packed.used_wrap_counter);\n \n \tresult = cvq->virtio_net_hdr_mz->addr;\n \treturn result;\n@@ -280,30 +281,30 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,\n \t * At least one TX packet per argument;\n \t * One RX packet for ACK.\n \t */\n-\tvq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;\n-\tvq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;\n-\tvq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);\n+\tvq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;\n+\tvq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;\n+\tvq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);\n \tvq->vq_free_cnt--;\n-\ti = vq->vq_ring.desc[head].next;\n+\ti = vq->vq_split.ring.desc[head].next;\n \n \tfor (k = 0; k < pkt_num; k++) {\n-\t\tvq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;\n-\t\tvq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem\n+\t\tvq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;\n+\t\tvq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem\n \t\t\t+ sizeof(struct virtio_net_ctrl_hdr)\n \t\t\t+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;\n-\t\tvq->vq_ring.desc[i].len = dlen[k];\n+\t\tvq->vq_split.ring.desc[i].len = dlen[k];\n \t\tsum += dlen[k];\n \t\tvq->vq_free_cnt--;\n-\t\ti = vq->vq_ring.desc[i].next;\n+\t\ti = vq->vq_split.ring.desc[i].next;\n \t}\n \n-\tvq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;\n-\tvq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem\n+\tvq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;\n+\tvq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem\n \t\t\t+ sizeof(struct virtio_net_ctrl_hdr);\n-\tvq->vq_ring.desc[i].len = sizeof(ctrl->status);\n+\tvq->vq_split.ring.desc[i].len = sizeof(ctrl->status);\n \tvq->vq_free_cnt--;\n \n-\tvq->vq_desc_head_idx = vq->vq_ring.desc[i].next;\n+\tvq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;\n \n \tvq_update_avail_ring(vq, head);\n \tvq_update_avail_idx(vq);\n@@ -324,16 +325,17 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,\n \n \t\tused_idx = (uint32_t)(vq->vq_used_cons_idx\n \t\t\t\t& (vq->vq_nentries - 1));\n-\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tuep = &vq->vq_split.ring.used->ring[used_idx];\n \t\tidx = (uint32_t) uep->id;\n \t\tdesc_idx = idx;\n \n-\t\twhile (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {\n-\t\t\tdesc_idx = vq->vq_ring.desc[desc_idx].next;\n+\t\twhile (vq->vq_split.ring.desc[desc_idx].flags &\n+\t\t\t\tVRING_DESC_F_NEXT) {\n+\t\t\tdesc_idx = vq->vq_split.ring.desc[desc_idx].next;\n \t\t\tvq->vq_free_cnt++;\n \t\t}\n \n-\t\tvq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;\n+\t\tvq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;\n \t\tvq->vq_desc_head_idx = idx;\n \n \t\tvq->vq_used_cons_idx++;\n@@ -395,7 +397,6 @@ static void\n virtio_init_vring(struct virtqueue *vq)\n {\n \tint size = vq->vq_nentries;\n-\tstruct vring *vr = &vq->vq_ring;\n \tuint8_t *ring_mem = vq->vq_ring_virt_mem;\n \n \tPMD_INIT_FUNC_TRACE();\n@@ -409,10 +410,12 @@ virtio_init_vring(struct virtqueue *vq)\n \tvq->vq_free_cnt = vq->vq_nentries;\n \tmemset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);\n \tif (vtpci_packed_queue(vq->hw)) {\n-\t\tvring_init_packed(&vq->ring_packed, ring_mem,\n+\t\tvring_init_packed(&vq->vq_packed.ring, ring_mem,\n \t\t\t\t  VIRTIO_PCI_VRING_ALIGN, size);\n \t\tvring_desc_init_packed(vq, size);\n \t} else {\n+\t\tstruct vring *vr = &vq->vq_split.ring;\n+\n \t\tvring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);\n \t\tvring_desc_init_split(vr->desc, size);\n \t}\n@@ -487,12 +490,12 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)\n \tvq->hw = hw;\n \tvq->vq_queue_index = vtpci_queue_idx;\n \tvq->vq_nentries = vq_size;\n-\tvq->event_flags_shadow = 0;\n \tif (vtpci_packed_queue(hw)) {\n-\t\tvq->used_wrap_counter = 1;\n-\t\tvq->cached_flags = VRING_DESC_F_AVAIL(1);\n+\t\tvq->vq_packed.used_wrap_counter = 1;\n+\t\tvq->vq_packed.cached_flags = VRING_DESC_F_AVAIL(1);\n+\t\tvq->vq_packed.event_flags_shadow = 0;\n \t\tif (queue_type == VTNET_RQ)\n-\t\t\tvq->cached_flags |= VRING_DESC_F_WRITE;\n+\t\t\tvq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;\n \t}\n \n \t/*\ndiff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c\nindex 3c354baef..02f8d9451 100644\n--- a/drivers/net/virtio/virtio_rxtx.c\n+++ b/drivers/net/virtio/virtio_rxtx.c\n@@ -62,13 +62,13 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)\n \tstruct vq_desc_extra *dxp;\n \tuint16_t desc_idx_last = desc_idx;\n \n-\tdp  = &vq->vq_ring.desc[desc_idx];\n+\tdp  = &vq->vq_split.ring.desc[desc_idx];\n \tdxp = &vq->vq_descx[desc_idx];\n \tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);\n \tif ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {\n \t\twhile (dp->flags & VRING_DESC_F_NEXT) {\n \t\t\tdesc_idx_last = dp->next;\n-\t\t\tdp = &vq->vq_ring.desc[dp->next];\n+\t\t\tdp = &vq->vq_split.ring.desc[dp->next];\n \t\t}\n \t}\n \tdxp->ndescs = 0;\n@@ -81,7 +81,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)\n \tif (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {\n \t\tvq->vq_desc_head_idx = desc_idx;\n \t} else {\n-\t\tdp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];\n+\t\tdp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];\n \t\tdp_tail->next = desc_idx;\n \t}\n \n@@ -118,7 +118,7 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,\n \tstruct vring_packed_desc *desc;\n \tuint16_t i;\n \n-\tdesc = vq->ring_packed.desc_packed;\n+\tdesc = vq->vq_packed.ring.desc_packed;\n \n \tfor (i = 0; i < num; i++) {\n \t\tused_idx = vq->vq_used_cons_idx;\n@@ -141,7 +141,7 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,\n \t\tvq->vq_used_cons_idx++;\n \t\tif (vq->vq_used_cons_idx >= vq->vq_nentries) {\n \t\t\tvq->vq_used_cons_idx -= vq->vq_nentries;\n-\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n \t\t}\n \t}\n \n@@ -160,7 +160,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,\n \t/*  Caller does the check */\n \tfor (i = 0; i < num ; i++) {\n \t\tused_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));\n-\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tuep = &vq->vq_split.ring.used->ring[used_idx];\n \t\tdesc_idx = (uint16_t) uep->id;\n \t\tlen[i] = uep->len;\n \t\tcookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;\n@@ -199,7 +199,7 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,\n \tfor (i = 0; i < num; i++) {\n \t\tused_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);\n \t\t/* Desc idx same as used idx */\n-\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tuep = &vq->vq_split.ring.used->ring[used_idx];\n \t\tlen[i] = uep->len;\n \t\tcookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;\n \n@@ -229,7 +229,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)\n {\n \tuint16_t used_idx, id, curr_id, free_cnt = 0;\n \tuint16_t size = vq->vq_nentries;\n-\tstruct vring_packed_desc *desc = vq->ring_packed.desc_packed;\n+\tstruct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;\n \tstruct vq_desc_extra *dxp;\n \n \tused_idx = vq->vq_used_cons_idx;\n@@ -244,7 +244,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)\n \t\t\tnum -= dxp->ndescs;\n \t\t\tif (used_idx >= size) {\n \t\t\t\tused_idx -= size;\n-\t\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n \t\t\t}\n \t\t\tif (dxp->cookie != NULL) {\n \t\t\t\trte_pktmbuf_free(dxp->cookie);\n@@ -261,7 +261,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)\n {\n \tuint16_t used_idx, id;\n \tuint16_t size = vq->vq_nentries;\n-\tstruct vring_packed_desc *desc = vq->ring_packed.desc_packed;\n+\tstruct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;\n \tstruct vq_desc_extra *dxp;\n \n \tused_idx = vq->vq_used_cons_idx;\n@@ -272,7 +272,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)\n \t\tvq->vq_used_cons_idx += dxp->ndescs;\n \t\tif (vq->vq_used_cons_idx >= size) {\n \t\t\tvq->vq_used_cons_idx -= size;\n-\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n \t\t}\n \t\tvq_ring_free_id_packed(vq, id);\n \t\tif (dxp->cookie != NULL) {\n@@ -302,7 +302,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)\n \t\tstruct vq_desc_extra *dxp;\n \n \t\tused_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));\n-\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tuep = &vq->vq_split.ring.used->ring[used_idx];\n \n \t\tdesc_idx = (uint16_t) uep->id;\n \t\tdxp = &vq->vq_descx[desc_idx];\n@@ -356,7 +356,7 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq,\n \t\treturn -EMSGSIZE;\n \n \thead_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);\n-\tstart_dp = vq->vq_ring.desc;\n+\tstart_dp = vq->vq_split.ring.desc;\n \n \twhile (i < num) {\n \t\tidx = head_idx & (vq->vq_nentries - 1);\n@@ -389,7 +389,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,\n {\n \tstruct vq_desc_extra *dxp;\n \tstruct virtio_hw *hw = vq->hw;\n-\tstruct vring_desc *start_dp = vq->vq_ring.desc;\n+\tstruct vring_desc *start_dp = vq->vq_split.ring.desc;\n \tuint16_t idx, i;\n \n \tif (unlikely(vq->vq_free_cnt == 0))\n@@ -430,8 +430,8 @@ static inline int\n virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,\n \t\t\t\t     struct rte_mbuf **cookie, uint16_t num)\n {\n-\tstruct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;\n-\tuint16_t flags = vq->cached_flags;\n+\tstruct vring_packed_desc *start_dp = vq->vq_packed.ring.desc_packed;\n+\tuint16_t flags = vq->vq_packed.cached_flags;\n \tstruct virtio_hw *hw = vq->hw;\n \tstruct vq_desc_extra *dxp;\n \tuint16_t idx;\n@@ -460,9 +460,9 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,\n \t\tstart_dp[idx].flags = flags;\n \t\tif (++vq->vq_avail_idx >= vq->vq_nentries) {\n \t\t\tvq->vq_avail_idx -= vq->vq_nentries;\n-\t\t\tvq->cached_flags ^=\n+\t\t\tvq->vq_packed.cached_flags ^=\n \t\t\t\tVRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);\n-\t\t\tflags = vq->cached_flags;\n+\t\t\tflags = vq->vq_packed.cached_flags;\n \t\t}\n \t}\n \tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);\n@@ -589,7 +589,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,\n \tuint16_t i = 0;\n \n \tidx = vq->vq_desc_head_idx;\n-\tstart_dp = vq->vq_ring.desc;\n+\tstart_dp = vq->vq_split.ring.desc;\n \n \twhile (i < num) {\n \t\tidx = idx & (vq->vq_nentries - 1);\n@@ -635,13 +635,13 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,\n \n \tid = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;\n \tidx = vq->vq_avail_idx;\n-\tdp = &vq->ring_packed.desc_packed[idx];\n+\tdp = &vq->vq_packed.ring.desc_packed[idx];\n \n \tdxp = &vq->vq_descx[id];\n \tdxp->ndescs = 1;\n \tdxp->cookie = cookie;\n \n-\tflags = vq->cached_flags;\n+\tflags = vq->vq_packed.cached_flags;\n \n \t/* prepend cannot fail, checked by caller */\n \thdr = (struct virtio_net_hdr *)\n@@ -660,7 +660,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,\n \n \tif (++vq->vq_avail_idx >= vq->vq_nentries) {\n \t\tvq->vq_avail_idx -= vq->vq_nentries;\n-\t\tvq->cached_flags ^=\n+\t\tvq->vq_packed.cached_flags ^=\n \t\t\tVRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);\n \t}\n \n@@ -698,11 +698,11 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \thead_idx = vq->vq_avail_idx;\n \tidx = head_idx;\n \tprev = head_idx;\n-\tstart_dp = vq->ring_packed.desc_packed;\n+\tstart_dp = vq->vq_packed.ring.desc_packed;\n \n-\thead_dp = &vq->ring_packed.desc_packed[idx];\n+\thead_dp = &vq->vq_packed.ring.desc_packed[idx];\n \thead_flags = cookie->next ? VRING_DESC_F_NEXT : 0;\n-\thead_flags |= vq->cached_flags;\n+\thead_flags |= vq->vq_packed.cached_flags;\n \n \tif (can_push) {\n \t\t/* prepend cannot fail, checked by caller */\n@@ -727,7 +727,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \t\tidx++;\n \t\tif (idx >= vq->vq_nentries) {\n \t\t\tidx -= vq->vq_nentries;\n-\t\t\tvq->cached_flags ^=\n+\t\t\tvq->vq_packed.cached_flags ^=\n \t\t\t\tVRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);\n \t\t}\n \t}\n@@ -741,14 +741,14 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \t\tstart_dp[idx].len  = cookie->data_len;\n \t\tif (likely(idx != head_idx)) {\n \t\t\tflags = cookie->next ? VRING_DESC_F_NEXT : 0;\n-\t\t\tflags |= vq->cached_flags;\n+\t\t\tflags |= vq->vq_packed.cached_flags;\n \t\t\tstart_dp[idx].flags = flags;\n \t\t}\n \t\tprev = idx;\n \t\tidx++;\n \t\tif (idx >= vq->vq_nentries) {\n \t\t\tidx -= vq->vq_nentries;\n-\t\t\tvq->cached_flags ^=\n+\t\t\tvq->vq_packed.cached_flags ^=\n \t\t\t\tVRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);\n \t\t}\n \t} while ((cookie = cookie->next) != NULL);\n@@ -791,7 +791,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \tdxp->cookie = (void *)cookie;\n \tdxp->ndescs = needed;\n \n-\tstart_dp = vq->vq_ring.desc;\n+\tstart_dp = vq->vq_split.ring.desc;\n \n \tif (can_push) {\n \t\t/* prepend cannot fail, checked by caller */\n@@ -844,7 +844,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,\n \t} while ((cookie = cookie->next) != NULL);\n \n \tif (use_indirect)\n-\t\tidx = vq->vq_ring.desc[head_idx].next;\n+\t\tidx = vq->vq_split.ring.desc[head_idx].next;\n \n \tvq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);\n \n@@ -919,8 +919,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)\n \tif (hw->use_simple_rx) {\n \t\tfor (desc_idx = 0; desc_idx < vq->vq_nentries;\n \t\t     desc_idx++) {\n-\t\t\tvq->vq_ring.avail->ring[desc_idx] = desc_idx;\n-\t\t\tvq->vq_ring.desc[desc_idx].flags =\n+\t\t\tvq->vq_split.ring.avail->ring[desc_idx] = desc_idx;\n+\t\t\tvq->vq_split.ring.desc[desc_idx].flags =\n \t\t\t\tVRING_DESC_F_WRITE;\n \t\t}\n \n@@ -1050,7 +1050,7 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,\n \n \tif (!vtpci_packed_queue(hw)) {\n \t\tif (hw->use_inorder_tx)\n-\t\t\tvq->vq_ring.desc[vq->vq_nentries - 1].next = 0;\n+\t\t\tvq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;\n \t}\n \n \tVIRTQUEUE_DUMP(vq);\ndiff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h\nindex dc97e4ccf..3d1296a23 100644\n--- a/drivers/net/virtio/virtio_rxtx_simple.h\n+++ b/drivers/net/virtio/virtio_rxtx_simple.h\n@@ -27,7 +27,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)\n \n \tdesc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);\n \tsw_ring = &vq->sw_ring[desc_idx];\n-\tstart_dp = &vq->vq_ring.desc[desc_idx];\n+\tstart_dp = &vq->vq_split.ring.desc[desc_idx];\n \n \tret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,\n \t\tRTE_VIRTIO_VPMD_RX_REARM_THRESH);\ndiff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c\nindex d6207d7bb..cdc2a4d28 100644\n--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c\n+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c\n@@ -93,7 +93,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tnb_used = RTE_MIN(nb_used, nb_pkts);\n \n \tdesc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));\n-\trused = &vq->vq_ring.used->ring[desc_idx];\n+\trused = &vq->vq_split.ring.used->ring[desc_idx];\n \tsw_ring  = &vq->sw_ring[desc_idx];\n \tsw_ring_end = &vq->sw_ring[vq->vq_nentries];\n \ndiff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c\nindex d768d0757..af76708d6 100644\n--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c\n+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c\n@@ -95,7 +95,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tnb_used = RTE_MIN(nb_used, nb_pkts);\n \n \tdesc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));\n-\trused = &vq->vq_ring.used->ring[desc_idx];\n+\trused = &vq->vq_split.ring.used->ring[desc_idx];\n \tsw_ring  = &vq->sw_ring[desc_idx];\n \tsw_ring_end = &vq->sw_ring[vq->vq_nentries];\n \ndiff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c\nindex 5b03f7a27..79491db32 100644\n--- a/drivers/net/virtio/virtqueue.c\n+++ b/drivers/net/virtio/virtqueue.c\n@@ -61,7 +61,7 @@ virtqueue_rxvq_flush_packed(struct virtqueue *vq)\n \tstruct vq_desc_extra *dxp;\n \tuint16_t i;\n \n-\tstruct vring_packed_desc *descs = vq->ring_packed.desc_packed;\n+\tstruct vring_packed_desc *descs = vq->vq_packed.ring.desc_packed;\n \tint cnt = 0;\n \n \ti = vq->vq_used_cons_idx;\n@@ -75,7 +75,7 @@ virtqueue_rxvq_flush_packed(struct virtqueue *vq)\n \t\tvq->vq_used_cons_idx++;\n \t\tif (vq->vq_used_cons_idx >= vq->vq_nentries) {\n \t\t\tvq->vq_used_cons_idx -= vq->vq_nentries;\n-\t\t\tvq->used_wrap_counter ^= 1;\n+\t\t\tvq->vq_packed.used_wrap_counter ^= 1;\n \t\t}\n \t\ti = vq->vq_used_cons_idx;\n \t}\n@@ -96,7 +96,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)\n \n \tfor (i = 0; i < nb_used; i++) {\n \t\tused_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);\n-\t\tuep = &vq->vq_ring.used->ring[used_idx];\n+\t\tuep = &vq->vq_split.ring.used->ring[used_idx];\n \t\tif (hw->use_simple_rx) {\n \t\t\tdesc_idx = used_idx;\n \t\t\trte_pktmbuf_free(vq->sw_ring[desc_idx]);\ndiff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h\nindex 80c0c43c3..48b3912e6 100644\n--- a/drivers/net/virtio/virtqueue.h\n+++ b/drivers/net/virtio/virtqueue.h\n@@ -191,17 +191,22 @@ struct vq_desc_extra {\n \n struct virtqueue {\n \tstruct virtio_hw  *hw; /**< virtio_hw structure pointer. */\n-\tstruct vring vq_ring;  /**< vring keeping desc, used and avail */\n-\tstruct vring_packed ring_packed;  /**< vring keeping descs */\n-\tbool used_wrap_counter;\n-\tuint16_t cached_flags; /**< cached flags for descs */\n-\tuint16_t event_flags_shadow;\n+\tunion {\n+\t\tstruct {\n+\t\t\t/**< vring keeping desc, used and avail */\n+\t\t\tstruct vring ring;\n+\t\t} vq_split;\n \n-\t/**\n-\t * Last consumed descriptor in the used table,\n-\t * trails vq_ring.used->idx.\n-\t */\n-\tuint16_t vq_used_cons_idx;\n+\t\tstruct {\n+\t\t\t/**< vring keeping descs and events */\n+\t\t\tstruct vring_packed ring;\n+\t\t\tbool used_wrap_counter;\n+\t\t\tuint16_t cached_flags; /**< cached flags for descs */\n+\t\t\tuint16_t event_flags_shadow;\n+\t\t} vq_packed;\n+\t};\n+\n+\tuint16_t vq_used_cons_idx; /**< last consumed descriptor */\n \tuint16_t vq_nentries;  /**< vring desc numbers */\n \tuint16_t vq_free_cnt;  /**< num of desc available */\n \tuint16_t vq_avail_idx; /**< sync until needed */\n@@ -289,7 +294,7 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)\n \tused = !!(flags & VRING_DESC_F_USED(1));\n \tavail = !!(flags & VRING_DESC_F_AVAIL(1));\n \n-\treturn avail == used && used == vq->used_wrap_counter;\n+\treturn avail == used && used == vq->vq_packed.used_wrap_counter;\n }\n \n static inline void\n@@ -297,10 +302,10 @@ vring_desc_init_packed(struct virtqueue *vq, int n)\n {\n \tint i;\n \tfor (i = 0; i < n - 1; i++) {\n-\t\tvq->ring_packed.desc_packed[i].id = i;\n+\t\tvq->vq_packed.ring.desc_packed[i].id = i;\n \t\tvq->vq_descx[i].next = i + 1;\n \t}\n-\tvq->ring_packed.desc_packed[i].id = i;\n+\tvq->vq_packed.ring.desc_packed[i].id = i;\n \tvq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;\n }\n \n@@ -321,10 +326,10 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n)\n static inline void\n virtqueue_disable_intr_packed(struct virtqueue *vq)\n {\n-\tif (vq->event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {\n-\t\tvq->event_flags_shadow = RING_EVENT_FLAGS_DISABLE;\n-\t\tvq->ring_packed.driver_event->desc_event_flags =\n-\t\t\tvq->event_flags_shadow;\n+\tif (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {\n+\t\tvq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;\n+\t\tvq->vq_packed.ring.driver_event->desc_event_flags =\n+\t\t\tvq->vq_packed.event_flags_shadow;\n \t}\n }\n \n@@ -337,7 +342,7 @@ virtqueue_disable_intr(struct virtqueue *vq)\n \tif (vtpci_packed_queue(vq->hw))\n \t\tvirtqueue_disable_intr_packed(vq);\n \telse\n-\t\tvq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;\n+\t\tvq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;\n }\n \n /**\n@@ -346,11 +351,10 @@ virtqueue_disable_intr(struct virtqueue *vq)\n static inline void\n virtqueue_enable_intr_packed(struct virtqueue *vq)\n {\n-\tuint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;\n-\n-\tif (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {\n-\t\tvq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;\n-\t\t*event_flags = vq->event_flags_shadow;\n+\tif (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {\n+\t\tvq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;\n+\t\tvq->vq_packed.ring.driver_event->desc_event_flags =\n+\t\t\tvq->vq_packed.event_flags_shadow;\n \t}\n }\n \n@@ -360,7 +364,7 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)\n static inline void\n virtqueue_enable_intr_split(struct virtqueue *vq)\n {\n-\tvq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);\n+\tvq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);\n }\n \n /**\n@@ -404,7 +408,8 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)\n \t\treturn VTNET_TQ;\n }\n \n-#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))\n+#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_split.ring.used->idx - \\\n+\t\t\t\t\t(vq)->vq_used_cons_idx))\n \n void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);\n void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);\n@@ -415,7 +420,7 @@ static inline void\n vq_update_avail_idx(struct virtqueue *vq)\n {\n \tvirtio_wmb(vq->hw->weak_barriers);\n-\tvq->vq_ring.avail->idx = vq->vq_avail_idx;\n+\tvq->vq_split.ring.avail->idx = vq->vq_avail_idx;\n }\n \n static inline void\n@@ -430,8 +435,8 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)\n \t * descriptor.\n \t */\n \tavail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));\n-\tif (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))\n-\t\tvq->vq_ring.avail->ring[avail_idx] = desc_idx;\n+\tif (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))\n+\t\tvq->vq_split.ring.avail->ring[avail_idx] = desc_idx;\n \tvq->vq_avail_idx++;\n }\n \n@@ -443,7 +448,7 @@ virtqueue_kick_prepare(struct virtqueue *vq)\n \t * the used->flags.\n \t */\n \tvirtio_mb(vq->hw->weak_barriers);\n-\treturn !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);\n+\treturn !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);\n }\n \n static inline int\n@@ -455,7 +460,7 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq)\n \t * Ensure updated data is visible to vhost before reading the flags.\n \t */\n \tvirtio_mb(vq->hw->weak_barriers);\n-\tflags = vq->ring_packed.device_event->desc_event_flags;\n+\tflags = vq->vq_packed.ring.device_event->desc_event_flags;\n \n \treturn flags != RING_EVENT_FLAGS_DISABLE;\n }\n@@ -473,15 +478,15 @@ virtqueue_notify(struct virtqueue *vq)\n #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP\n #define VIRTQUEUE_DUMP(vq) do { \\\n \tuint16_t used_idx, nused; \\\n-\tused_idx = (vq)->vq_ring.used->idx; \\\n+\tused_idx = (vq)->vq_split.ring.used->idx; \\\n \tnused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \\\n \tif (vtpci_packed_queue((vq)->hw)) { \\\n \t\tPMD_INIT_LOG(DEBUG, \\\n \t\t\"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;\" \\\n \t\t\" cached_flags=0x%x; used_wrap_counter=%d\", \\\n \t\t(vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \\\n-\t\t(vq)->vq_avail_idx, (vq)->cached_flags, \\\n-\t\t(vq)->used_wrap_counter); \\\n+\t\t(vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \\\n+\t\t(vq)->vq_packed.used_wrap_counter); \\\n \t\tbreak; \\\n \t} \\\n \tPMD_INIT_LOG(DEBUG, \\\n@@ -489,9 +494,9 @@ virtqueue_notify(struct virtqueue *vq)\n \t  \" avail.idx=%d; used_cons_idx=%d; used.idx=%d;\" \\\n \t  \" avail.flags=0x%x; used.flags=0x%x\", \\\n \t  (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \\\n-\t  (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \\\n-\t  (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \\\n-\t  (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \\\n+\t  (vq)->vq_desc_head_idx, (vq)->vq_split.ring.avail->idx, \\\n+\t  (vq)->vq_used_cons_idx, (vq)->vq_split.ring.used->idx, \\\n+\t  (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \\\n } while (0)\n #else\n #define VIRTQUEUE_DUMP(vq) do { } while (0)\n",
    "prefixes": [
        "05/10"
    ]
}