get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118742/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118742,
    "url": "http://patchwork.dpdk.org/api/patches/118742/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221020062951.645121-4-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221020062951.645121-4-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221020062951.645121-4-junfeng.guo@intel.com",
    "date": "2022-10-20T06:29:40",
    "name": "[v8,03/14] net/idpf: add queue setup and release in single queue model",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0c5b4770e41b83177e4c50fd7df0d43f3a2b1687",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221020062951.645121-4-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25330,
            "url": "http://patchwork.dpdk.org/api/series/25330/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25330",
            "date": "2022-10-20T06:29:37",
            "name": "add support for idpf PMD in DPDK",
            "version": 8,
            "mbox": "http://patchwork.dpdk.org/series/25330/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/118742/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/118742/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CDA50A0A02;\n\tThu, 20 Oct 2022 08:32:19 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 67B8342CE2;\n\tThu, 20 Oct 2022 08:31:57 +0200 (CEST)",
            "from mga06.intel.com (mga06b.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id 0653A42CCC\n for <dev@dpdk.org>; Thu, 20 Oct 2022 08:31:53 +0200 (CEST)",
            "from orsmga006.jf.intel.com ([10.7.209.51])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 19 Oct 2022 23:31:53 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by orsmga006.jf.intel.com with ESMTP; 19 Oct 2022 23:31:50 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666247514; x=1697783514;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=7kkeuY/4OaMWf5InfiHrX2v9bl+Fl1Hu7skgVvbM7n8=;\n b=OtxxeZr2RDyheM18YJLa986vqFaWczHJAm1MLUZ4o7UkRh6Sze6LCCo9\n iymtYk5vwhOje80nc5HfKzQ0/QtbF5JKNHN04ahjOSitc3B01U/leW+WQ\n 4gtJfeh7z197VA5DWQlAN800iVW3yoc6tmXk2uHVq/FIVFHCbDeUANj4o\n xd3TnVr3pJyURxR8aqXES4+IpEuqpf+801rYEe5I/dAmdQ9l7GsGhNcLs\n E7rZS0LpdTOjwPMmzd952rdkYy2c6S1F60XoE9oU3GrgXvpKJO3wtQwRT\n 0FRtaDytuMDuu7ORY8xGe1hXQLHLpiLL4XhQvi4twF2k3OOoS6kt8dr3i Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10505\"; a=\"368666877\"",
            "E=Sophos;i=\"5.95,198,1661842800\"; d=\"scan'208\";a=\"368666877\"",
            "E=McAfee;i=\"6500,9779,10505\"; a=\"607481347\"",
            "E=Sophos;i=\"5.95,198,1661842800\"; d=\"scan'208\";a=\"607481347\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH v8 03/14] net/idpf: add queue setup and release in single\n queue model",
        "Date": "Thu, 20 Oct 2022 14:29:40 +0800",
        "Message-Id": "<20221020062951.645121-4-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221020062951.645121-1-junfeng.guo@intel.com>",
        "References": "<20221020024135.338280-15-junfeng.guo@intel.com>\n <20221020062951.645121-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for queue operations in single queue model:\n - rx_queue_setup\n - rx_queue_release\n - tx_queue_setup\n - tx_queue_release\n\nIn the single queue model, the same descriptor queue is used by SW to\npost buffer descriptors to HW and by HW to post completed descriptors\nto SW.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n doc/guides/nics/features/idpf.ini |   2 +\n doc/guides/nics/idpf.rst          |  22 ++\n drivers/net/idpf/idpf_ethdev.c    |  58 ++++\n drivers/net/idpf/idpf_ethdev.h    |   9 +\n drivers/net/idpf/idpf_rxtx.c      | 465 ++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_rxtx.h      | 186 ++++++++++++\n drivers/net/idpf/idpf_vchnl.c     | 251 ++++++++++++++++\n drivers/net/idpf/meson.build      |   1 +\n 8 files changed, 994 insertions(+)\n create mode 100644 drivers/net/idpf/idpf_rxtx.c\n create mode 100644 drivers/net/idpf/idpf_rxtx.h",
    "diff": "diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini\nindex f029a279b3..681a908194 100644\n--- a/doc/guides/nics/features/idpf.ini\n+++ b/doc/guides/nics/features/idpf.ini\n@@ -7,6 +7,8 @@\n ; is selected.\n ;\n [Features]\n+Runtime Rx queue setup = Y\n+Runtime Tx queue setup = Y\n Multiprocess aware   = Y\n FreeBSD              = Y\n Linux                = Y\ndiff --git a/doc/guides/nics/idpf.rst b/doc/guides/nics/idpf.rst\nindex 428bf4266a..fbc0f51de6 100644\n--- a/doc/guides/nics/idpf.rst\n+++ b/doc/guides/nics/idpf.rst\n@@ -45,6 +45,28 @@ Runtime Config Options\n   Then idpf PMD will create 3 vports (ethdevs) for device ca:00.0.\n   NOTE: This parameter is MUST, otherwise there'll be no any ethdev created.\n \n+- ``rx_single`` (default ``0``)\n+\n+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue\n+  mode and split queue mode for Rx queue. User can choose Rx queue mode by the ``devargs``\n+  parameter ``rx_single``.\n+\n+    -a ca:00.0,rx_single=1\n+\n+  Then idpf PMD will configure Rx queue with single queue mode. Otherwise, split queue\n+  mode is chosen by default.\n+\n+- ``tx_single`` (default ``0``)\n+\n+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue\n+  mode and split queue mode for Tx queue. User can choose Tx queue mode by the ``devargs``\n+  parameter ``tx_single``.\n+\n+    -a ca:00.0,tx_single=1\n+\n+  Then idpf PMD will configure Tx queue with single queue mode. Otherwise, split queue\n+  mode is chosen by default.\n+\n Driver compilation and testing\n ------------------------------\n \ndiff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex 7806c43668..96af54f47b 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -10,13 +10,18 @@\n #include <rte_dev.h>\n \n #include \"idpf_ethdev.h\"\n+#include \"idpf_rxtx.h\"\n \n+#define IDPF_TX_SINGLE_Q\t\"tx_single\"\n+#define IDPF_RX_SINGLE_Q\t\"rx_single\"\n #define IDPF_VPORT\t\t\"vport\"\n \n struct idpf_adapter_list adapter_list;\n bool adapter_list_init;\n \n static const char * const idpf_valid_args[] = {\n+\tIDPF_TX_SINGLE_Q,\n+\tIDPF_RX_SINGLE_Q,\n \tIDPF_VPORT,\n \tNULL\n };\n@@ -52,6 +57,10 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {\n \t.dev_start\t\t\t= idpf_dev_start,\n \t.dev_stop\t\t\t= idpf_dev_stop,\n \t.dev_close\t\t\t= idpf_dev_close,\n+\t.rx_queue_setup\t\t\t= idpf_rx_queue_setup,\n+\t.rx_queue_release\t\t= idpf_dev_rx_queue_release,\n+\t.tx_queue_setup\t\t\t= idpf_tx_queue_setup,\n+\t.tx_queue_release\t\t= idpf_dev_tx_queue_release,\n \t.link_update\t\t\t= idpf_dev_link_update,\n };\n \n@@ -81,6 +90,18 @@ idpf_init_vport_req_info(struct rte_eth_dev *dev)\n \t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n \n \tvport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);\n+\tif (adapter->txq_model) {\n+\t\tvport_info->txq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\t\tvport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;\n+\t\tvport_info->num_tx_complq = 0;\n+\t}\n+\tif (adapter->rxq_model) {\n+\t\tvport_info->rxq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\t\tvport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;\n+\t\tvport_info->num_rx_bufq = 0;\n+\t}\n \n \treturn 0;\n }\n@@ -110,6 +131,8 @@ idpf_init_vport(struct rte_eth_dev *dev)\n \tint i;\n \n \tvport->vport_id = vport_info->vport_id;\n+\tvport->txq_model = vport_info->txq_model;\n+\tvport->rxq_model = vport_info->rxq_model;\n \tvport->num_tx_q = vport_info->num_tx_q;\n \tvport->num_rx_q = vport_info->num_rx_q;\n \tvport->max_mtu = vport_info->max_mtu;\n@@ -149,6 +172,12 @@ idpf_init_vport(struct rte_eth_dev *dev)\n static int\n idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)\n {\n+\tif (dev->data->nb_tx_queues > IDPF_DEFAULT_TXQ_NUM ||\n+\t    dev->data->nb_rx_queues > IDPF_DEFAULT_RXQ_NUM) {\n+\t\tPMD_INIT_LOG(ERR, \"Invalid queue number.\");\n+\t\treturn -EINVAL;\n+\t}\n+\n \treturn 0;\n }\n \n@@ -312,6 +341,25 @@ parse_vport(const char *key, const char *value, void *args)\n \treturn 0;\n }\n \n+static int\n+parse_bool(const char *key, const char *value, void *args)\n+{\n+\tint *i = (int *)args;\n+\tchar *end;\n+\tint num;\n+\n+\tnum = strtoul(value, &end, 10);\n+\n+\tif (num != 0 && num != 1) {\n+\t\tPMD_INIT_LOG(ERR, \"invalid value:\\\"%s\\\" for key:\\\"%s\\\", value must be 0 or 1\",\n+\t\t\tvalue, key);\n+\t\treturn -1;\n+\t}\n+\n+\t*i = num;\n+\treturn 0;\n+}\n+\n static int\n idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n {\n@@ -333,6 +381,16 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n \tif (ret)\n \t\tgoto bail;\n \n+\tret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,\n+\t\t\t\t &adapter->txq_model);\n+\tif (ret)\n+\t\tgoto bail;\n+\n+\tret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,\n+\t\t\t\t &adapter->rxq_model);\n+\tif (ret)\n+\t\tgoto bail;\n+\n bail:\n \trte_kvargs_free(kvlist);\n \treturn ret;\ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nindex 77824d5f7f..8b7170f49e 100644\n--- a/drivers/net/idpf/idpf_ethdev.h\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -83,6 +83,8 @@ struct idpf_chunks_info {\n struct idpf_vport {\n \tstruct idpf_adapter *adapter; /* Backreference to associated adapter */\n \tuint16_t vport_id;\n+\tuint32_t txq_model;\n+\tuint32_t rxq_model;\n \tuint16_t num_tx_q;\n \tuint16_t num_rx_q;\n \n@@ -118,6 +120,9 @@ struct idpf_adapter {\n \tuint32_t cmd_retval; /* return value of the cmd response from ipf */\n \tuint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */\n \n+\tuint32_t txq_model;\n+\tuint32_t rxq_model;\n+\n \t/* Vport info */\n \tuint8_t **vport_req_info;\n \tuint8_t **vport_recv_info;\n@@ -197,6 +202,10 @@ int idpf_vc_check_api_version(struct idpf_adapter *adapter);\n int idpf_vc_get_caps(struct idpf_adapter *adapter);\n int idpf_vc_create_vport(struct rte_eth_dev *dev);\n int idpf_vc_destroy_vport(struct idpf_vport *vport);\n+int idpf_vc_config_rxqs(struct idpf_vport *vport);\n+int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);\n+int idpf_vc_config_txqs(struct idpf_vport *vport);\n+int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);\n int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);\n int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,\n \t\t      uint16_t buf_len, uint8_t *buf);\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nnew file mode 100644\nindex 0000000000..bff90dd9c6\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -0,0 +1,465 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <ethdev_driver.h>\n+#include <rte_net.h>\n+\n+#include \"idpf_ethdev.h\"\n+#include \"idpf_rxtx.h\"\n+\n+static inline int\n+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)\n+{\n+\t/* The following constraints must be satisfied:\n+\t *   thresh < rxq->nb_rx_desc\n+\t */\n+\tif (thresh >= nb_desc) {\n+\t\tPMD_INIT_LOG(ERR, \"rx_free_thresh (%u) must be less than %u\",\n+\t\t\t     thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,\n+\t\tuint16_t tx_free_thresh)\n+{\n+\t/* TX descriptors will have their RS bit set after tx_rs_thresh\n+\t * descriptors have been used. The TX descriptor ring will be cleaned\n+\t * after tx_free_thresh descriptors are used or if the number of\n+\t * descriptors required to transmit a packet is greater than the\n+\t * number of free TX descriptors.\n+\t *\n+\t * The following constraints must be satisfied:\n+\t *  - tx_rs_thresh must be less than the size of the ring minus 2.\n+\t *  - tx_free_thresh must be less than the size of the ring minus 3.\n+\t *  - tx_rs_thresh must be less than or equal to tx_free_thresh.\n+\t *  - tx_rs_thresh must be a divisor of the ring size.\n+\t *\n+\t * One descriptor in the TX ring is used as a sentinel to avoid a H/W\n+\t * race condition, hence the maximum threshold constraints. When set\n+\t * to zero use default values.\n+\t */\n+\tif (tx_rs_thresh >= (nb_desc - 2)) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be less than the \"\n+\t\t\t     \"number of TX descriptors (%u) minus 2\",\n+\t\t\t     tx_rs_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_free_thresh >= (nb_desc - 3)) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_free_thresh (%u) must be less than the \"\n+\t\t\t     \"number of TX descriptors (%u) minus 3.\",\n+\t\t\t     tx_free_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_rs_thresh > tx_free_thresh) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be less than or \"\n+\t\t\t     \"equal to tx_free_thresh (%u).\",\n+\t\t\t     tx_rs_thresh, tx_free_thresh);\n+\t\treturn -EINVAL;\n+\t}\n+\tif ((nb_desc % tx_rs_thresh) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh (%u) must be a divisor of the \"\n+\t\t\t     \"number of TX descriptors (%u).\",\n+\t\t\t     tx_rs_thresh, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+release_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t i;\n+\n+\tif (!rxq->sw_ring)\n+\t\treturn;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tif (rxq->sw_ring[i]) {\n+\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i]);\n+\t\t\trxq->sw_ring[i] = NULL;\n+\t\t}\n+\t}\n+}\n+\n+static inline void\n+release_txq_mbufs(struct idpf_tx_queue *txq)\n+{\n+\tuint16_t nb_desc, i;\n+\n+\tif (!txq || !txq->sw_ring) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to rxq or sw_ring is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tif (txq->sw_nb_desc) {\n+\t\tnb_desc = 0;\n+\t} else {\n+\t\t/* For single queue model */\n+\t\tnb_desc = txq->nb_tx_desc;\n+\t}\n+\tfor (i = 0; i < nb_desc; i++) {\n+\t\tif (txq->sw_ring[i].mbuf) {\n+\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n+\t\t\ttxq->sw_ring[i].mbuf = NULL;\n+\t\t}\n+\t}\n+}\n+\n+static const struct idpf_rxq_ops def_rxq_ops = {\n+\t.release_mbufs = release_rxq_mbufs,\n+};\n+\n+static const struct idpf_txq_ops def_txq_ops = {\n+\t.release_mbufs = release_txq_mbufs,\n+};\n+\n+static void\n+idpf_rx_queue_release(void *rxq)\n+{\n+\tstruct idpf_rx_queue *q = (struct idpf_rx_queue *)rxq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\t/* Single queue */\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(q);\n+}\n+\n+static void\n+idpf_tx_queue_release(void *txq)\n+{\n+\tstruct idpf_tx_queue *q = (struct idpf_tx_queue *)txq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\trte_free(q->complq);\n+\tq->ops->release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_free(q);\n+}\n+\n+static inline void\n+reset_single_rx_queue(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);\n+\t     i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < IDPF_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;\n+\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\n+\tif (rxq->pkt_first_seg != NULL)\n+\t\trte_pktmbuf_free(rxq->pkt_first_seg);\n+\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+\trxq->rxrearm_start = 0;\n+\trxq->rxrearm_nb = 0;\n+}\n+\n+static inline void\n+reset_single_tx_queue(struct idpf_tx_queue *txq)\n+{\n+\tstruct idpf_tx_entry *txe;\n+\tuint32_t i, size;\n+\tuint16_t prev;\n+\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\ttxe = txq->sw_ring;\n+\tsize = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->tx_ring)[i] = 0;\n+\n+\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\ttxq->tx_ring[i].qw1.cmd_dtype =\n+\t\t\trte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);\n+\t\ttxe[i].mbuf =  NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_used = 0;\n+\n+\ttxq->last_desc_cleaned = txq->nb_tx_desc - 1;\n+\ttxq->nb_free = txq->nb_tx_desc - 1;\n+\n+\ttxq->next_dd = txq->rs_thresh - 1;\n+\ttxq->next_rs = txq->rs_thresh - 1;\n+}\n+\n+static int\n+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t   const struct rte_eth_rxconf *rx_conf,\n+\t\t\t   struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_rx_queue *rxq;\n+\tuint16_t rx_free_thresh;\n+\tuint32_t ring_size;\n+\tuint64_t offloads;\n+\tuint16_t len;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tIDPF_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (check_rx_thresh(nb_desc, rx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed */\n+\tif (dev->data->rx_queues[queue_idx]) {\n+\t\tidpf_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Setup Rx description queue */\n+\trxq = rte_zmalloc_socket(\"idpf rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!rxq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = adapter;\n+\trxq->offloads = offloads;\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\trxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf rxq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!rxq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate a liitle more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tring_size = RTE_ALIGN(len *\n+\t\t\t      sizeof(struct virtchnl2_singleq_rx_buf_desc),\n+\t\t\t      IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX buffer queue.\");\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\n+\trxq->mz = mz;\n+\treset_single_rx_queue(rxq);\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n+\trxq->ops = &def_rxq_ops;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_rxconf *rx_conf,\n+\t\t    struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\treturn idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t  socket_id, rx_conf, mp);\n+\telse\n+\t\treturn -1;\n+}\n+\n+static int\n+idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t   uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t   const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tuint16_t tx_rs_thresh, tx_free_thresh;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_tx_queue *txq;\n+\tuint32_t ring_size;\n+\tuint64_t offloads;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\tif (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > IDPF_MAX_RING_DESC ||\n+\t    nb_desc < IDPF_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of transmit descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?\n+\t\ttx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);\n+\ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?\n+\t\ttx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);\n+\tif (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))\n+\t\treturn -EINVAL;\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->tx_queues[queue_idx]) {\n+\t\tidpf_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"idpf txq\",\n+\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!txq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* TODO: vlan offload */\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->rs_thresh = tx_rs_thresh;\n+\ttxq->free_thresh = tx_free_thresh;\n+\ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->offloads = offloads;\n+\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_ring =\n+\t\trte_zmalloc_socket(\"idpf tx sw ring\",\n+\t\t\t\t   sizeof(struct idpf_tx_entry) * nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!txq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tring_size = sizeof(struct idpf_flex_tx_desc) * nb_desc;\n+\tring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);\n+\tmz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx,\n+\t\t\t\t      ring_size, IDPF_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!mz) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n+\t\trte_free(txq->sw_ring);\n+\t\trte_free(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->tx_ring_phys_addr = mz->iova;\n+\ttxq->tx_ring = (struct idpf_flex_tx_desc *)mz->addr;\n+\n+\ttxq->mz = mz;\n+\treset_single_tx_queue(txq);\n+\ttxq->q_set = true;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n+\ttxq->ops = &def_txq_ops;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\treturn idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,\n+\t\t\t\t\t\t  socket_id, tx_conf);\n+\telse\n+\t\treturn -1;\n+}\n+\n+void\n+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tidpf_rx_queue_release(dev->data->rx_queues[qid]);\n+}\n+\n+void\n+idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tidpf_tx_queue_release(dev->data->tx_queues[qid]);\n+}\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nnew file mode 100644\nindex 0000000000..69a1fa6348\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -0,0 +1,186 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_RXTX_H_\n+#define _IDPF_RXTX_H_\n+\n+#include \"idpf_osdep.h\"\n+#include \"idpf_type.h\"\n+#include \"idpf_devids.h\"\n+#include \"idpf_lan_txrx.h\"\n+#include \"idpf_lan_pf_regs.h\"\n+#include \"virtchnl.h\"\n+#include \"virtchnl2.h\"\n+#include \"virtchnl2_lan_desc.h\"\n+\n+#include \"idpf_ethdev.h\"\n+\n+/* In QLEN must be whole number of 32 descriptors. */\n+#define IDPF_ALIGN_RING_DESC\t32\n+#define IDPF_MIN_RING_DESC\t32\n+#define IDPF_MAX_RING_DESC\t4096\n+#define IDPF_DMA_MEM_ALIGN\t4096\n+/* Base address of the HW descriptor ring should be 128B aligned. */\n+#define IDPF_RING_BASE_ALIGN\t128\n+\n+/* used for Rx Bulk Allocate */\n+#define IDPF_RX_MAX_BURST\t32\n+#define IDPF_TX_MAX_BURST\t32\n+\n+#define IDPF_DEFAULT_RX_FREE_THRESH\t32\n+\n+/* used for Vector PMD */\n+#define IDPF_VPMD_RX_MAX_BURST\t32\n+#define IDPF_VPMD_TX_MAX_BURST\t32\n+#define IDPF_VPMD_DESCS_PER_LOOP\t4\n+#define IDPF_RXQ_REARM_THRESH\t64\n+\n+#define IDPF_DEFAULT_TX_RS_THRESH\t32\n+#define IDPF_DEFAULT_TX_FREE_THRESH\t32\n+\n+#define IDPF_MIN_TSO_MSS\t88\n+#define IDPF_MAX_TSO_MSS\t9728\n+#define IDPF_MAX_TSO_FRAME_SIZE\t262143\n+#define IDPF_TX_MAX_MTU_SEG     10\n+\n+#define IDPF_TX_OFFLOAD_NOTSUP_MASK \\\n+\t\t(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)\n+\n+struct idpf_rx_queue {\n+\tstruct idpf_adapter *adapter;\t/* the adapter this queue belongs to */\n+\tstruct rte_mempool *mp;\t\t/* mbuf pool to populate Rx ring */\n+\tconst struct rte_memzone *mz;\t/* memzone for Rx ring */\n+\tvolatile void *rx_ring;\n+\tstruct rte_mbuf **sw_ring;\t/* address of SW ring */\n+\tuint64_t rx_ring_phys_addr;\t/* Rx ring DMA address */\n+\n+\tuint16_t nb_rx_desc;\t\t/* ring length */\n+\tuint16_t rx_tail;\t\t/* current value of tail */\n+\tvolatile uint8_t *qrx_tail;\t/* register address of tail */\n+\tuint16_t rx_free_thresh;\t/* max free RX desc to hold */\n+\tuint16_t nb_rx_hold;\t\t/* number of held free RX desc */\n+\tstruct rte_mbuf *pkt_first_seg;\t/* first segment of current packet */\n+\tstruct rte_mbuf *pkt_last_seg;\t/* last segment of current packet */\n+\tstruct rte_mbuf fake_mbuf;\t/* dummy mbuf */\n+\n+\t/* used for VPMD */\n+\tuint16_t rxrearm_nb;       /* number of remaining to be re-armed */\n+\tuint16_t rxrearm_start;    /* the idx we start the re-arming from */\n+\tuint64_t mbuf_initializer; /* value to init mbufs */\n+\n+\t/* for rx bulk */\n+\tuint16_t rx_nb_avail;      /* number of staged packets ready */\n+\tuint16_t rx_next_avail;    /* index of next staged packets */\n+\tuint16_t rx_free_trigger;  /* triggers rx buffer allocation */\n+\tstruct rte_mbuf *rx_stage[IDPF_RX_MAX_BURST * 2]; /* store mbuf */\n+\n+\tuint16_t port_id;\t/* device port ID */\n+\tuint16_t queue_id;      /* Rx queue index */\n+\tuint16_t rx_buf_len;    /* The packet buffer size */\n+\tuint16_t rx_hdr_len;    /* The header buffer size */\n+\tuint16_t max_pkt_len;   /* Maximum packet length */\n+\tuint8_t rxdid;\n+\n+\tbool q_set;\t\t/* if rx queue has been configured */\n+\tbool q_started;\t\t/* if rx queue has been started */\n+\tbool rx_deferred_start;\t/* don't start this queue in dev start */\n+\tconst struct idpf_rxq_ops *ops;\n+\n+\t/* only valid for split queue mode */\n+\tuint8_t expected_gen_id;\n+\tstruct idpf_rx_queue *bufq1;\n+\tstruct idpf_rx_queue *bufq2;\n+\n+\tuint64_t offloads;\n+\tuint32_t hw_register_set;\n+};\n+\n+struct idpf_tx_entry {\n+\tstruct rte_mbuf *mbuf;\n+\tuint16_t next_id;\n+\tuint16_t last_id;\n+};\n+\n+struct idpf_tx_vec_entry {\n+\tstruct rte_mbuf *mbuf;\n+};\n+\n+/* Structure associated with each TX queue. */\n+struct idpf_tx_queue {\n+\tconst struct rte_memzone *mz;\t\t/* memzone for Tx ring */\n+\tvolatile struct idpf_flex_tx_desc *tx_ring;\t/* Tx ring virtual address */\n+\tvolatile union {\n+\t\tstruct idpf_flex_tx_sched_desc *desc_ring;\n+\t\tstruct idpf_splitq_tx_compl_desc *compl_ring;\n+\t};\n+\tuint64_t tx_ring_phys_addr;\t\t/* Tx ring DMA address */\n+\tstruct idpf_tx_entry *sw_ring;\t\t/* address array of SW ring */\n+\n+\tuint16_t nb_tx_desc;\t\t/* ring length */\n+\tuint16_t tx_tail;\t\t/* current value of tail */\n+\tvolatile uint8_t *qtx_tail;\t/* register address of tail */\n+\t/* number of used desc since RS bit set */\n+\tuint16_t nb_used;\n+\tuint16_t nb_free;\n+\tuint16_t last_desc_cleaned;\t/* last desc have been cleaned*/\n+\tuint16_t free_thresh;\n+\tuint16_t rs_thresh;\n+\n+\tuint16_t port_id;\n+\tuint16_t queue_id;\n+\tuint64_t offloads;\n+\tuint16_t next_dd;\t/* next to set RS, for VPMD */\n+\tuint16_t next_rs;\t/* next to check DD,  for VPMD */\n+\n+\tbool q_set;\t\t/* if tx queue has been configured */\n+\tbool q_started;\t\t/* if tx queue has been started */\n+\tbool tx_deferred_start;\t/* don't start this queue in dev start */\n+\tconst struct idpf_txq_ops *ops;\n+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1       BIT(0)\n+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2       BIT(1)\n+\tuint8_t vlan_flag;\n+\n+\t/* only valid for split queue mode */\n+\tuint16_t sw_nb_desc;\n+\tuint16_t sw_tail;\n+\tvoid **txqs;\n+\tuint32_t tx_start_qid;\n+\tuint8_t expected_gen_id;\n+\tstruct idpf_tx_queue *complq;\n+};\n+\n+/* Offload features */\n+union idpf_tx_offload {\n+\tuint64_t data;\n+\tstruct {\n+\t\tuint64_t l2_len:7; /* L2 (MAC) Header Length. */\n+\t\tuint64_t l3_len:9; /* L3 (IP) Header Length. */\n+\t\tuint64_t l4_len:8; /* L4 Header Length. */\n+\t\tuint64_t tso_segsz:16; /* TCP TSO segment size */\n+\t\t/* uint64_t unused : 24; */\n+\t};\n+};\n+\n+struct idpf_rxq_ops {\n+\tvoid (*release_mbufs)(struct idpf_rx_queue *rxq);\n+};\n+\n+struct idpf_txq_ops {\n+\tvoid (*release_mbufs)(struct idpf_tx_queue *txq);\n+};\n+\n+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mp);\n+int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+\n+int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_txconf *tx_conf);\n+int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+\n+#endif /* _IDPF_RXTX_H_ */\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nindex ef0288ff45..88cda54a26 100644\n--- a/drivers/net/idpf/idpf_vchnl.c\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -21,6 +21,7 @@\n #include <rte_dev.h>\n \n #include \"idpf_ethdev.h\"\n+#include \"idpf_rxtx.h\"\n \n #include \"idpf_prototype.h\"\n \n@@ -469,6 +470,256 @@ idpf_vc_destroy_vport(struct idpf_vport *vport)\n \treturn err;\n }\n \n+#define IDPF_RX_BUF_STRIDE\t\t64\n+int\n+idpf_vc_config_rxqs(struct idpf_vport *vport)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_rx_queue **rxq =\n+\t\t(struct idpf_rx_queue **)vport->dev_data->rx_queues;\n+\tstruct virtchnl2_config_rx_queues *vc_rxqs = NULL;\n+\tstruct virtchnl2_rxq_info *rxq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t total_qs, num_qs;\n+\tint size, i;\n+\tint err = 0;\n+\tint k = 0;\n+\n+\ttotal_qs = vport->num_rx_q;\n+\twhile (total_qs) {\n+\t\tif (total_qs > adapter->max_rxq_per_msg) {\n+\t\t\tnum_qs = adapter->max_rxq_per_msg;\n+\t\t\ttotal_qs -= adapter->max_rxq_per_msg;\n+\t\t} else {\n+\t\t\tnum_qs = total_qs;\n+\t\t\ttotal_qs = 0;\n+\t\t}\n+\n+\t\tsize = sizeof(*vc_rxqs) + (num_qs - 1) *\n+\t\t\tsizeof(struct virtchnl2_rxq_info);\n+\t\tvc_rxqs = rte_zmalloc(\"cfg_rxqs\", size, 0);\n+\t\tif (vc_rxqs == NULL) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_rx_queues\");\n+\t\t\terr = -ENOMEM;\n+\t\t\tbreak;\n+\t\t}\n+\t\tvc_rxqs->vport_id = vport->vport_id;\n+\t\tvc_rxqs->num_qinfo = num_qs;\n+\t\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\t\tfor (i = 0; i < num_qs; i++, k++) {\n+\t\t\t\trxq_info = &vc_rxqs->qinfo[i];\n+\t\t\t\trxq_info->dma_ring_addr = rxq[k]->rx_ring_phys_addr;\n+\t\t\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\t\t\trxq_info->queue_id = rxq[k]->queue_id;\n+\t\t\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\t\t\trxq_info->data_buffer_size = rxq[k]->rx_buf_len;\n+\t\t\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\t\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;\n+\t\t\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\t\t\trxq_info->ring_len = rxq[k]->nb_rx_desc;\n+\t\t\t}\n+\t\t} else {\n+\t\t\treturn -1;\n+\t\t}\n+\t\tmemset(&args, 0, sizeof(args));\n+\t\targs.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;\n+\t\targs.in_args = (uint8_t *)vc_rxqs;\n+\t\targs.in_args_size = size;\n+\t\targs.out_buffer = adapter->mbx_resp;\n+\t\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\t\terr = idpf_execute_vc_cmd(adapter, &args);\n+\t\trte_free(vc_rxqs);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES\");\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_rx_queue **rxq =\n+\t\t(struct idpf_rx_queue **)vport->dev_data->rx_queues;\n+\tstruct virtchnl2_config_rx_queues *vc_rxqs = NULL;\n+\tstruct virtchnl2_rxq_info *rxq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_qs;\n+\tint size, err;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\tnum_qs = IDPF_RXQ_PER_GRP;\n+\telse\n+\t\tnum_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;\n+\n+\tsize = sizeof(*vc_rxqs) + (num_qs - 1) *\n+\t\tsizeof(struct virtchnl2_rxq_info);\n+\tvc_rxqs = rte_zmalloc(\"cfg_rxqs\", size, 0);\n+\tif (vc_rxqs == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_rx_queues\");\n+\t\terr = -ENOMEM;\n+\t\treturn err;\n+\t}\n+\tvc_rxqs->vport_id = vport->vport_id;\n+\tvc_rxqs->num_qinfo = num_qs;\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\trxq_info = &vc_rxqs->qinfo[0];\n+\t\trxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;\n+\t\trxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t\trxq_info->queue_id = rxq[rxq_id]->queue_id;\n+\t\trxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\trxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;\n+\t\trxq_info->max_pkt_size = vport->max_pkt_len;\n+\n+\t\trxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;\n+\t\trxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;\n+\n+\t\trxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;\n+\t}  else {\n+\t\treturn -1;\n+\t}\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;\n+\targs.in_args = (uint8_t *)vc_rxqs;\n+\targs.in_args_size = size;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\trte_free(vc_rxqs);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES\");\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_config_txqs(struct idpf_vport *vport)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_tx_queue **txq =\n+\t\t(struct idpf_tx_queue **)vport->dev_data->tx_queues;\n+\tstruct virtchnl2_config_tx_queues *vc_txqs = NULL;\n+\tstruct virtchnl2_txq_info *txq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t total_qs, num_qs;\n+\tint size, i;\n+\tint err = 0;\n+\tint k = 0;\n+\n+\ttotal_qs = vport->num_tx_q;\n+\twhile (total_qs) {\n+\t\tif (total_qs > adapter->max_txq_per_msg) {\n+\t\t\tnum_qs = adapter->max_txq_per_msg;\n+\t\t\ttotal_qs -= adapter->max_txq_per_msg;\n+\t\t} else {\n+\t\t\tnum_qs = total_qs;\n+\t\t\ttotal_qs = 0;\n+\t\t}\n+\t\tsize = sizeof(*vc_txqs) + (num_qs - 1) *\n+\t\t\tsizeof(struct virtchnl2_txq_info);\n+\t\tvc_txqs = rte_zmalloc(\"cfg_txqs\", size, 0);\n+\t\tif (vc_txqs == NULL) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_tx_queues\");\n+\t\t\terr = -ENOMEM;\n+\t\t\tbreak;\n+\t\t}\n+\t\tvc_txqs->vport_id = vport->vport_id;\n+\t\tvc_txqs->num_qinfo = num_qs;\n+\t\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\t\tfor (i = 0; i < num_qs; i++, k++) {\n+\t\t\t\ttxq_info = &vc_txqs->qinfo[i];\n+\t\t\t\ttxq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;\n+\t\t\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\t\t\ttxq_info->queue_id = txq[k]->queue_id;\n+\t\t\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\t\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;\n+\t\t\t\ttxq_info->ring_len = txq[k]->nb_tx_desc;\n+\t\t\t}\n+\t\t} else {\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tmemset(&args, 0, sizeof(args));\n+\t\targs.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;\n+\t\targs.in_args = (uint8_t *)vc_txqs;\n+\t\targs.in_args_size = size;\n+\t\targs.out_buffer = adapter->mbx_resp;\n+\t\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\t\terr = idpf_execute_vc_cmd(adapter, &args);\n+\t\trte_free(vc_txqs);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES\");\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_tx_queue **txq =\n+\t\t(struct idpf_tx_queue **)vport->dev_data->tx_queues;\n+\tstruct virtchnl2_config_tx_queues *vc_txqs = NULL;\n+\tstruct virtchnl2_txq_info *txq_info;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_qs;\n+\tint size, err;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)\n+\t\tnum_qs = IDPF_TXQ_PER_GRP;\n+\telse\n+\t\tnum_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;\n+\n+\tsize = sizeof(*vc_txqs) + (num_qs - 1) *\n+\t\tsizeof(struct virtchnl2_txq_info);\n+\tvc_txqs = rte_zmalloc(\"cfg_txqs\", size, 0);\n+\tif (vc_txqs == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate virtchnl2_config_tx_queues\");\n+\t\terr = -ENOMEM;\n+\t\treturn err;\n+\t}\n+\tvc_txqs->vport_id = vport->vport_id;\n+\tvc_txqs->num_qinfo = num_qs;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\ttxq_info = &vc_txqs->qinfo[0];\n+\t\ttxq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;\n+\t\ttxq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;\n+\t\ttxq_info->queue_id = txq[txq_id]->queue_id;\n+\t\ttxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;\n+\t\ttxq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;\n+\t\ttxq_info->ring_len = txq[txq_id]->nb_tx_desc;\n+\t} else {\n+\t\treturn -1;\n+\t}\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;\n+\targs.in_args = (uint8_t *)vc_txqs;\n+\targs.in_args_size = size;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\trte_free(vc_txqs);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES\");\n+\n+\treturn err;\n+}\n+\n int\n idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)\n {\ndiff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build\nindex bf8bf58ef5..832f887296 100644\n--- a/drivers/net/idpf/meson.build\n+++ b/drivers/net/idpf/meson.build\n@@ -12,5 +12,6 @@ deps += ['common_idpf', 'security', 'cryptodev']\n \n sources = files(\n     'idpf_ethdev.c',\n+    'idpf_rxtx.c',\n     'idpf_vchnl.c',\n )\n",
    "prefixes": [
        "v8",
        "03/14"
    ]
}