get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119009/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119009,
    "url": "http://patchwork.dpdk.org/api/patches/119009/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221024131227.1062446-5-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221024131227.1062446-5-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221024131227.1062446-5-junfeng.guo@intel.com",
    "date": "2022-10-24T13:12:13",
    "name": "[v11,04/18] net/idpf: add Rx queue setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "07753804d03c1dc3d63404a82fe5771c313d2244",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221024131227.1062446-5-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25386,
            "url": "http://patchwork.dpdk.org/api/series/25386/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25386",
            "date": "2022-10-24T13:12:09",
            "name": "add support for idpf PMD in DPDK",
            "version": 11,
            "mbox": "http://patchwork.dpdk.org/series/25386/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/119009/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/119009/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B17D5A034C;\n\tMon, 24 Oct 2022 15:14:31 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E0A4E42BA7;\n\tMon, 24 Oct 2022 15:14:17 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by mails.dpdk.org (Postfix) with ESMTP id 6374542B9C\n for <dev@dpdk.org>; Mon, 24 Oct 2022 15:14:14 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 24 Oct 2022 06:14:13 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by orsmga001.jf.intel.com with ESMTP; 24 Oct 2022 06:14:11 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666617254; x=1698153254;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=x5WkIXLnMtobhj14UPdFxs4xGuxYRv5gB/RCzhIadjs=;\n b=PRxIi4SjTVwm+aMHXWdTZ4UrOROtA12/hD1N955JJs5xiAB5rocjZoto\n BefhX9Sd/vJEPXG88oNQNY9NP7yfLYebF+t/v6VM29yKDZGWyUQM3RfgE\n 5Iyr80Ru7VdG/TaDD6uY/1kWl8s0H+2MKl7bZbQbcpImlQPBcTCZjrczF\n qx2SXMLC0WHYZbrOSrMV+uQbOAxnVa8GX+wyKvwCV20oCgoYQJrwgAxM5\n kVdkFfY5RLYDAWSShdouxrqeLIn4nRevdjy/mv4A+OvBk+q/4J2oRkG71\n vcX8Xzp9AUC527VVWL+oUMg7JPM7YtSpWplwCIfsW6jFOKFIDEAS+Y0UD g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10510\"; a=\"309100014\"",
            "E=Sophos;i=\"5.95,209,1661842800\"; d=\"scan'208\";a=\"309100014\"",
            "E=McAfee;i=\"6500,9779,10510\"; a=\"664539984\"",
            "E=Sophos;i=\"5.95,209,1661842800\"; d=\"scan'208\";a=\"664539984\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH v11 04/18] net/idpf: add Rx queue setup",
        "Date": "Mon, 24 Oct 2022 21:12:13 +0800",
        "Message-Id": "<20221024131227.1062446-5-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221024131227.1062446-1-junfeng.guo@intel.com>",
        "References": "<20221024130134.1046536-2-junfeng.guo@intel.com>\n <20221024131227.1062446-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for rx_queue_setup ops.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c |  16 +-\n drivers/net/idpf/idpf_rxtx.c   | 427 +++++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_rxtx.h   |  46 ++++\n 3 files changed, 488 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex c0307128be..1d2075f466 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -37,6 +37,7 @@ static void idpf_adapter_rel(struct idpf_adapter *adapter);\n static const struct eth_dev_ops idpf_eth_dev_ops = {\n \t.dev_configure\t\t\t= idpf_dev_configure,\n \t.dev_close\t\t\t= idpf_dev_close,\n+\t.rx_queue_setup\t\t\t= idpf_rx_queue_setup,\n \t.tx_queue_setup\t\t\t= idpf_tx_queue_setup,\n \t.dev_infos_get\t\t\t= idpf_dev_info_get,\n };\n@@ -56,16 +57,29 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \n \tdev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;\n-\tdev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;\n+\tdev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |\n+\t\tRTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;\n \tdev_info->rx_offload_capa = 0;\n \tdev_info->tx_offload_capa = 0;\n \n+\tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n+\t\t.rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH,\n+\t\t.rx_drop_en = 0,\n+\t\t.offloads = 0,\n+\t};\n+\n \tdev_info->default_txconf = (struct rte_eth_txconf) {\n \t\t.tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH,\n \t\t.tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH,\n \t\t.offloads = 0,\n \t};\n \n+\tdev_info->rx_desc_lim = (struct rte_eth_desc_lim) {\n+\t\t.nb_max = IDPF_MAX_RING_DESC,\n+\t\t.nb_min = IDPF_MIN_RING_DESC,\n+\t\t.nb_align = IDPF_ALIGN_RING_DESC,\n+\t};\n+\n \tdev_info->tx_desc_lim = (struct rte_eth_desc_lim) {\n \t\t.nb_max = IDPF_MAX_RING_DESC,\n \t\t.nb_min = IDPF_MIN_RING_DESC,\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex df0ed772c1..76669504c3 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -8,6 +8,21 @@\n #include \"idpf_ethdev.h\"\n #include \"idpf_rxtx.h\"\n \n+static inline int\n+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n+{\n+\t/* The following constraints must be satisfied:\n+\t *   thresh < rxq->nb_rx_desc\n+\t */\n+\tif (thresh >= nb_desc) {\n+\t\tPMD_INIT_LOG(ERR, \"rx_free_thresh (%u) must be less than %u\",\n+\t\t\t     thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n static inline int\n check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n \t\tuint16_t tx_free_thresh)\n@@ -56,6 +71,95 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n \treturn 0;\n }\n \n+static inline void\n+reset_split_rx_descq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\trxq->rx_tail = 0;\n+\trxq->expected_gen_id = 1;\n+}\n+\n+static inline void\n+reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\t/* The next descriptor id which can be received. */\n+\trxq->rx_next_avail = 0;\n+\n+\t/* The next descriptor id which can be refilled. */\n+\trxq->rx_tail = 0;\n+\t/* The number of descriptors which can be refilled. */\n+\trxq->nb_rx_hold = rxq->nb_rx_desc - 1;\n+\n+\trxq->bufq1 = NULL;\n+\trxq->bufq2 = NULL;\n+}\n+\n+static inline void\n+reset_split_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\treset_split_rx_descq(rxq);\n+\treset_split_rx_bufq(rxq->bufq1);\n+\treset_split_rx_bufq(rxq->bufq2);\n+}\n+\n+static inline void\n+reset_single_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\n+\tif (rxq->pkt_first_seg != NULL)\n+\t\trte_pktmbuf_free(rxq->pkt_first_seg);\n+\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n static inline void\n reset_split_tx_descq(struct idpf_tx_queue *txq)\n {\n@@ -145,6 +249,329 @@ reset_single_tx_queue(struct idpf_tx_queue *txq)\n \ttxq->next_rs = txq->rs_thresh - 1;\n }\n \n+static int\n+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n+\t\t\t uint16_t queue_idx, uint16_t rx_free_thresh,\n+\t\t\t uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t const struct rte_eth_rxconf *rx_conf,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t len;\n+\n+\tbufq->mp = mp;\n+\tbufq->nb_rx_desc = nb_desc;\n+\tbufq->rx_free_thresh = rx_free_thresh;\n+\tbufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;\n+\tbufq->port_id = dev->data->port_id;\n+\tbufq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\tbufq->rx_hdr_len = 0;\n+\tbufq->adapter = adapter;\n+\n+\tlen = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;\n+\tbufq->rx_buf_len = len;\n+\n+\t/* Allocate the software ring. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tbufq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rx bufq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_splitq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_buf_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(bufq->sw_ring);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\tbufq->rx_ring_phys_addr = mz->iova;\n+\tbufq->rx_ring = mz->addr;\n+\n+\tbufq->mz = mz;\n+\treset_split_rx_bufq(bufq);\n+\tbufq->q_set = true;\n+\tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n+\t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t  const struct rte_eth_rxconf *rx_conf,\n+\t\t\t  struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_rx_queue *bufq1, *bufq2;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_rx_queue *rxq;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint64_t offloads;\n+\tuint16_t qid;\n+\tuint16_t len;\n+\tint ret;\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is invalid\", nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n+\t\treturn -EINVAL;\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (rxq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\trxq->offloads = offloads;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_cpmpl_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_rxq;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_split_rx_descq(rxq);\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\t/* setup Rx buffer queue */\n+\tbufq1 = rte_zmalloc_socket(\"idpf bufq1\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq1 == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 1.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_mz;\n+\t}\n+\tqid = 2 * queue_idx;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, rx_conf, mp);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 1\");\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq1;\n+\t}\n+\trxq->bufq1 = bufq1;\n+\n+\tbufq2 = rte_zmalloc_socket(\"idpf bufq2\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq2 == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 2.\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -ENOMEM;\n+\t\tgoto free_bufq1;\n+\t}\n+\tqid = 2 * queue_idx + 1;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, rx_conf, mp);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 2\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq2;\n+\t}\n+\trxq->bufq2 = bufq2;\n+\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\treturn 0;\n+\n+free_bufq2:\n+\trte_free(bufq2);\n+free_bufq1:\n+\trte_free(bufq1);\n+free_mz:\n+\trte_memzone_free(mz);\n+free_rxq:\n+\trte_free(rxq);\n+\n+\treturn ret;\n+}\n+\n+static int\n+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t   const struct rte_eth_rxconf *rx_conf,\n+\t\t\t   struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_rx_queue *rxq;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint64_t offloads;\n+\tuint16_t len;\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n+\t\treturn -EINVAL;\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (rxq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\trxq->offloads = offloads;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\trxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rxq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (rxq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_singleq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_single_rx_queue(rxq);\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_rxconf *rx_conf,\n+\t\t    struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\treturn idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t  socket_id, rx_conf, mp);\n+\telse\n+\t\treturn idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t socket_id, rx_conf, mp);\n+}\n+\n static int\n idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t  uint16_t nb_desc, unsigned int socket_id,\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex c5c43c3033..a79e9a0b26 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -15,11 +15,53 @@\n /* Base address of the HW descriptor ring should be 128B aligned. */\n #define IDPF_RING_BASE_ALIGN\t128\n \n+#define IDPF_RX_MAX_BURST\t32\n #define IDPF_DEFAULT_RX_FREE_THRESH\t32\n \n #define IDPF_DEFAULT_TX_RS_THRESH\t32\n #define IDPF_DEFAULT_TX_FREE_THRESH\t32\n \n+struct idpf_rx_queue {\n+\tstruct idpf_adapter *adapter;\t/* the adapter this queue belongs to */\n+\tstruct rte_mempool *mp;\t\t/* mbuf pool to populate Rx ring */\n+\tconst struct rte_memzone *mz;\t/* memzone for Rx ring */\n+\tvolatile void *rx_ring;\n+\tstruct rte_mbuf **sw_ring;\t/* address of SW ring */\n+\tuint64_t rx_ring_phys_addr;\t/* Rx ring DMA address */\n+\n+\tuint16_t nb_rx_desc;\t\t/* ring length */\n+\tuint16_t rx_tail;\t\t/* current value of tail */\n+\tvolatile uint8_t *qrx_tail;\t/* register address of tail */\n+\tuint16_t rx_free_thresh;\t/* max free RX desc to hold */\n+\tuint16_t nb_rx_hold;\t\t/* number of held free RX desc */\n+\tstruct rte_mbuf *pkt_first_seg;\t/* first segment of current packet */\n+\tstruct rte_mbuf *pkt_last_seg;\t/* last segment of current packet */\n+\tstruct rte_mbuf fake_mbuf;\t/* dummy mbuf */\n+\n+\tuint16_t rx_nb_avail;\n+\tuint16_t rx_next_avail;\n+\n+\tuint16_t port_id;\t/* device port ID */\n+\tuint16_t queue_id;      /* Rx queue index */\n+\tuint16_t rx_buf_len;    /* The packet buffer size */\n+\tuint16_t rx_hdr_len;    /* The header buffer size */\n+\tuint16_t max_pkt_len;   /* Maximum packet length */\n+\tuint8_t rxdid;\n+\n+\tbool q_set;\t\t/* if rx queue has been configured */\n+\tbool q_started;\t\t/* if rx queue has been started */\n+\tbool rx_deferred_start;\t/* don't start this queue in dev start */\n+\tconst struct idpf_rxq_ops *ops;\n+\n+\t/* only valid for split queue mode */\n+\tuint8_t expected_gen_id;\n+\tstruct idpf_rx_queue *bufq1;\n+\tstruct idpf_rx_queue *bufq2;\n+\n+\tuint64_t offloads;\n+\tuint32_t hw_register_set;\n+};\n+\n struct idpf_tx_entry {\n \tstruct rte_mbuf *mbuf;\n \tuint16_t next_id;\n@@ -69,6 +111,10 @@ struct idpf_tx_queue {\n \tstruct idpf_tx_queue *complq;\n };\n \n+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mp);\n int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_txconf *tx_conf);\n",
    "prefixes": [
        "v11",
        "04/18"
    ]
}