get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119218/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119218,
    "url": "http://patchwork.dpdk.org/api/patches/119218/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221027074729.1494529-5-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221027074729.1494529-5-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221027074729.1494529-5-junfeng.guo@intel.com",
    "date": "2022-10-27T07:47:15",
    "name": "[v14,04/18] net/idpf: add Rx queue setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "860fb656ab926247a2acd95a91fa9631a60c1c8a",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221027074729.1494529-5-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25451,
            "url": "http://patchwork.dpdk.org/api/series/25451/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25451",
            "date": "2022-10-27T07:47:11",
            "name": "add support for idpf PMD in DPDK",
            "version": 14,
            "mbox": "http://patchwork.dpdk.org/series/25451/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/119218/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/119218/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9C0D3A00C5;\n\tThu, 27 Oct 2022 09:49:57 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1C9C642BB0;\n\tThu, 27 Oct 2022 09:49:29 +0200 (CEST)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by mails.dpdk.org (Postfix) with ESMTP id 0972B42B85\n for <dev@dpdk.org>; Thu, 27 Oct 2022 09:49:22 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 27 Oct 2022 00:49:22 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by orsmga004.jf.intel.com with ESMTP; 27 Oct 2022 00:49:19 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666856963; x=1698392963;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=Ywe8WWYo9i14lJ6HsFQL/I+CdFwC6ArumYK1ys5zLQM=;\n b=kfD2Nfuwcml3eZhuWxo4uzlXHxTGyzqv4uOYkOWPdDDaFnd53FCW1OFM\n aS8rnZ4fKUH92nwmRG5etUXcG7TndRYimiuYtdJ8yRIk+b5dbtz3eS+QE\n w/UwTB+K2jA2F9E96gQmZ2rVZi0wjKBgPG9YjwPW4n5Sypmm7H7ezZJr2\n n51uezPOduOj5oH9kJMxJ53PVw+40526pUU+iM9PtRs+DLR13DFiSGmfs\n HU+FtMxG28aExYRonutalj8j7xUQreTgDjVNx70Mx0Q9/5yZMLxsauqud\n WxM/hXO6bk2fuFqZbpFg6mBinTMhbhbBHRcufE07ZfO+N6wvKRhr1yCgy w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10512\"; a=\"287873145\"",
            "E=Sophos;i=\"5.95,217,1661842800\"; d=\"scan'208\";a=\"287873145\"",
            "E=McAfee;i=\"6500,9779,10512\"; a=\"757607638\"",
            "E=Sophos;i=\"5.95,217,1661842800\"; d=\"scan'208\";a=\"757607638\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH v14 04/18] net/idpf: add Rx queue setup",
        "Date": "Thu, 27 Oct 2022 15:47:15 +0800",
        "Message-Id": "<20221027074729.1494529-5-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221027074729.1494529-1-junfeng.guo@intel.com>",
        "References": "<20221027054505.1369248-2-junfeng.guo@intel.com>\n <20221027074729.1494529-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for rx_queue_setup ops.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c |  11 +\n drivers/net/idpf/idpf_rxtx.c   | 400 +++++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_rxtx.h   |  46 ++++\n 3 files changed, 457 insertions(+)",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex 11f8b4ba1c..0585153f69 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -38,6 +38,7 @@ static void idpf_adapter_rel(struct idpf_adapter *adapter);\n static const struct eth_dev_ops idpf_eth_dev_ops = {\n \t.dev_configure\t\t\t= idpf_dev_configure,\n \t.dev_close\t\t\t= idpf_dev_close,\n+\t.rx_queue_setup\t\t\t= idpf_rx_queue_setup,\n \t.tx_queue_setup\t\t\t= idpf_tx_queue_setup,\n \t.dev_infos_get\t\t\t= idpf_dev_info_get,\n };\n@@ -63,12 +64,22 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t.tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH,\n \t};\n \n+\tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n+\t\t.rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH,\n+\t};\n+\n \tdev_info->tx_desc_lim = (struct rte_eth_desc_lim) {\n \t\t.nb_max = IDPF_MAX_RING_DESC,\n \t\t.nb_min = IDPF_MIN_RING_DESC,\n \t\t.nb_align = IDPF_ALIGN_RING_DESC,\n \t};\n \n+\tdev_info->rx_desc_lim = (struct rte_eth_desc_lim) {\n+\t\t.nb_max = IDPF_MAX_RING_DESC,\n+\t\t.nb_min = IDPF_MIN_RING_DESC,\n+\t\t.nb_align = IDPF_ALIGN_RING_DESC,\n+\t};\n+\n \treturn 0;\n }\n \ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex 4afa0a2560..25dd5d85d5 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -8,6 +8,21 @@\n #include \"idpf_ethdev.h\"\n #include \"idpf_rxtx.h\"\n \n+static int\n+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n+{\n+\t/* The following constraints must be satisfied:\n+\t *   thresh < rxq->nb_rx_desc\n+\t */\n+\tif (thresh >= nb_desc) {\n+\t\tPMD_INIT_LOG(ERR, \"rx_free_thresh (%u) must be less than %u\",\n+\t\t\t     thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n static int\n check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n \t\tuint16_t tx_free_thresh)\n@@ -56,6 +71,87 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n \treturn 0;\n }\n \n+static void\n+reset_split_rx_descq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\trxq->rx_tail = 0;\n+\trxq->expected_gen_id = 1;\n+}\n+\n+static void\n+reset_split_rx_bufq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\t/* The next descriptor id which can be received. */\n+\trxq->rx_next_avail = 0;\n+\n+\t/* The next descriptor id which can be refilled. */\n+\trxq->rx_tail = 0;\n+\t/* The number of descriptors which can be refilled. */\n+\trxq->nb_rx_hold = rxq->nb_rx_desc - 1;\n+\n+\trxq->bufq1 = NULL;\n+\trxq->bufq2 = NULL;\n+}\n+\n+static void\n+reset_single_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\n+\tif (rxq->pkt_first_seg != NULL)\n+\t\trte_pktmbuf_free(rxq->pkt_first_seg);\n+\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n static void\n reset_split_tx_descq(struct idpf_tx_queue *txq)\n {\n@@ -145,6 +241,310 @@ reset_single_tx_queue(struct idpf_tx_queue *txq)\n \ttxq->next_rs = txq->rs_thresh - 1;\n }\n \n+static int\n+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n+\t\t\t uint16_t queue_idx, uint16_t rx_free_thresh,\n+\t\t\t uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t len;\n+\n+\tbufq->mp = mp;\n+\tbufq->nb_rx_desc = nb_desc;\n+\tbufq->rx_free_thresh = rx_free_thresh;\n+\tbufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;\n+\tbufq->port_id = dev->data->port_id;\n+\tbufq->rx_hdr_len = 0;\n+\tbufq->adapter = adapter;\n+\n+\tlen = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;\n+\tbufq->rx_buf_len = len;\n+\n+\t/* Allocate the software ring. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tbufq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rx bufq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_splitq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_buf_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(bufq->sw_ring);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\tbufq->rx_ring_phys_addr = mz->iova;\n+\tbufq->rx_ring = mz->addr;\n+\n+\tbufq->mz = mz;\n+\treset_split_rx_bufq(bufq);\n+\tbufq->q_set = true;\n+\tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n+\t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t  const struct rte_eth_rxconf *rx_conf,\n+\t\t\t  struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_rx_queue *bufq1, *bufq2;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_rx_queue *rxq;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint64_t offloads;\n+\tuint16_t qid;\n+\tuint16_t len;\n+\tint ret;\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n+\t\treturn -EINVAL;\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (rxq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\trxq->offloads = offloads;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_cpmpl_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_rxq;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_split_rx_descq(rxq);\n+\n+\t/* TODO: allow bulk or vec */\n+\n+\t/* setup Rx buffer queue */\n+\tbufq1 = rte_zmalloc_socket(\"idpf bufq1\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq1 == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 1.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_mz;\n+\t}\n+\tqid = 2 * queue_idx;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, mp);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 1\");\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq1;\n+\t}\n+\trxq->bufq1 = bufq1;\n+\n+\tbufq2 = rte_zmalloc_socket(\"idpf bufq2\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq2 == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue 2.\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -ENOMEM;\n+\t\tgoto free_bufq1;\n+\t}\n+\tqid = 2 * queue_idx + 1;\n+\tret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,\n+\t\t\t\t       nb_desc, socket_id, mp);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 2\");\n+\t\trte_free(bufq1->sw_ring);\n+\t\trte_memzone_free(bufq1->mz);\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq2;\n+\t}\n+\trxq->bufq2 = bufq2;\n+\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\treturn 0;\n+\n+free_bufq2:\n+\trte_free(bufq2);\n+free_bufq1:\n+\trte_free(bufq1);\n+free_mz:\n+\trte_memzone_free(mz);\n+free_rxq:\n+\trte_free(rxq);\n+\n+\treturn ret;\n+}\n+\n+static int\n+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t   const struct rte_eth_rxconf *rx_conf,\n+\t\t\t   struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_rx_queue *rxq;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint64_t offloads;\n+\tuint16_t len;\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh) != 0)\n+\t\treturn -EINVAL;\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (rxq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\trxq->offloads = offloads;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\trxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rxq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (rxq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_singleq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_single_rx_queue(rxq);\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_rxconf *rx_conf,\n+\t\t    struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\treturn idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t  socket_id, rx_conf, mp);\n+\telse\n+\t\treturn idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t socket_id, rx_conf, mp);\n+}\n+\n static int\n idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t  uint16_t nb_desc, unsigned int socket_id,\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex c7ba15b058..3f3932c3eb 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -15,9 +15,51 @@\n /* Base address of the HW descriptor ring should be 128B aligned. */\n #define IDPF_RING_BASE_ALIGN\t128\n \n+#define IDPF_RX_MAX_BURST\t\t32\n+#define IDPF_DEFAULT_RX_FREE_THRESH\t32\n+\n #define IDPF_DEFAULT_TX_RS_THRESH\t32\n #define IDPF_DEFAULT_TX_FREE_THRESH\t32\n \n+struct idpf_rx_queue {\n+\tstruct idpf_adapter *adapter;   /* the adapter this queue belongs to */\n+\tstruct rte_mempool *mp;         /* mbuf pool to populate Rx ring */\n+\tconst struct rte_memzone *mz;   /* memzone for Rx ring */\n+\tvolatile void *rx_ring;\n+\tstruct rte_mbuf **sw_ring;      /* address of SW ring */\n+\tuint64_t rx_ring_phys_addr;     /* Rx ring DMA address */\n+\n+\tuint16_t nb_rx_desc;            /* ring length */\n+\tuint16_t rx_tail;               /* current value of tail */\n+\tvolatile uint8_t *qrx_tail;     /* register address of tail */\n+\tuint16_t rx_free_thresh;        /* max free RX desc to hold */\n+\tuint16_t nb_rx_hold;            /* number of held free RX desc */\n+\tstruct rte_mbuf *pkt_first_seg; /* first segment of current packet */\n+\tstruct rte_mbuf *pkt_last_seg;  /* last segment of current packet */\n+\tstruct rte_mbuf fake_mbuf;      /* dummy mbuf */\n+\n+\tuint16_t rx_nb_avail;\n+\tuint16_t rx_next_avail;\n+\n+\tuint16_t port_id;       /* device port ID */\n+\tuint16_t queue_id;      /* Rx queue index */\n+\tuint16_t rx_buf_len;    /* The packet buffer size */\n+\tuint16_t rx_hdr_len;    /* The header buffer size */\n+\tuint16_t max_pkt_len;   /* Maximum packet length */\n+\tuint8_t rxdid;\n+\n+\tbool q_set;             /* if rx queue has been configured */\n+\tbool q_started;         /* if rx queue has been started */\n+\n+\t/* only valid for split queue mode */\n+\tuint8_t expected_gen_id;\n+\tstruct idpf_rx_queue *bufq1;\n+\tstruct idpf_rx_queue *bufq2;\n+\n+\tuint64_t offloads;\n+\tuint32_t hw_register_set;\n+};\n+\n struct idpf_tx_entry {\n \tstruct rte_mbuf *mbuf;\n \tuint16_t next_id;\n@@ -63,6 +105,10 @@ struct idpf_tx_queue {\n \tstruct idpf_tx_queue *complq;\n };\n \n+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mp);\n int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_txconf *tx_conf);\n",
    "prefixes": [
        "v14",
        "04/18"
    ]
}