get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119197/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119197,
    "url": "http://patchwork.dpdk.org/api/patches/119197/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221027054505.1369248-3-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221027054505.1369248-3-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221027054505.1369248-3-junfeng.guo@intel.com",
    "date": "2022-10-27T05:44:49",
    "name": "[v13,02/18] net/idpf: add support for device initialization",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f39d5188b9bdae308d366f718029c3ee6d196014",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221027054505.1369248-3-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25450,
            "url": "http://patchwork.dpdk.org/api/series/25450/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25450",
            "date": "2022-10-27T05:44:47",
            "name": "add support for idpf PMD in DPDK",
            "version": 13,
            "mbox": "http://patchwork.dpdk.org/series/25450/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/119197/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/119197/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 98D3EA00C5;\n\tThu, 27 Oct 2022 07:47:08 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 61FA642BBF;\n\tThu, 27 Oct 2022 07:46:58 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by mails.dpdk.org (Postfix) with ESMTP id 66821400D5\n for <dev@dpdk.org>; Thu, 27 Oct 2022 07:46:55 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 26 Oct 2022 22:46:55 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by orsmga003.jf.intel.com with ESMTP; 26 Oct 2022 22:46:52 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666849615; x=1698385615;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=HYSOS6nd2CHQjC0BAYdGQ4+YAa3xjRhtm2btz70aOSQ=;\n b=Cib8f6eT5Oh9RQS7w40TSDAP/9UDNsylJ2qhXJiv5kmTxAZV8xhxkhwd\n mHGLNMZzOpMUgApwq7Bt+0jkS+BppFFPlcsonUFmugyqsOu2JITBWYvXg\n Cvs1rGi0hxg0XRy4wEOMAJVsYaCNx5zrjQ5GstrM24ZP7c/oWbwFxlAnd\n ks5ETq1rdvUJonjfZSl9zWcdH1UjESOAcTsGVFxSCt474lCimmFxJ+BOF\n DtZYAzx9IlgnAlCMe9TQUclzHSoxZD9F9rEOtVSw+TvPEAmj21gS9MZzf\n 3GPZtW5N7HfseVvG4Wj2OOlXS4b1pfm4P0hlZavTrCiCkHBOS3fU3k32p A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10512\"; a=\"309831045\"",
            "E=Sophos;i=\"5.95,215,1661842800\"; d=\"scan'208\";a=\"309831045\"",
            "E=McAfee;i=\"6500,9779,10512\"; a=\"583429202\"",
            "E=Sophos;i=\"5.95,215,1661842800\"; d=\"scan'208\";a=\"583429202\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiaoyun Li <xiaoyun.li@intel.com>, Xiao Wang <xiao.w.wang@intel.com>,\n Wenjun Wu <wenjun1.wu@intel.com>",
        "Subject": "[PATCH v13 02/18] net/idpf: add support for device initialization",
        "Date": "Thu, 27 Oct 2022 13:44:49 +0800",
        "Message-Id": "<20221027054505.1369248-3-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221027054505.1369248-1-junfeng.guo@intel.com>",
        "References": "<20221026101027.240583-2-junfeng.guo@intel.com>\n <20221027054505.1369248-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Support device init and add the following dev ops:\n - dev_configure\n - dev_close\n - dev_infos_get\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Xiao Wang <xiao.w.wang@intel.com>\nSigned-off-by: Wenjun Wu <wenjun1.wu@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n MAINTAINERS                            |   9 +\n doc/guides/nics/features/idpf.ini      |   9 +\n doc/guides/nics/idpf.rst               |  66 ++\n doc/guides/nics/index.rst              |   1 +\n doc/guides/rel_notes/release_22_11.rst |   6 +\n drivers/net/idpf/idpf_ethdev.c         | 886 +++++++++++++++++++++++++\n drivers/net/idpf/idpf_ethdev.h         | 189 ++++++\n drivers/net/idpf/idpf_logs.h           |  56 ++\n drivers/net/idpf/idpf_vchnl.c          | 468 +++++++++++++\n drivers/net/idpf/meson.build           |  15 +\n drivers/net/idpf/version.map           |   3 +\n drivers/net/meson.build                |   1 +\n 12 files changed, 1709 insertions(+)\n create mode 100644 doc/guides/nics/features/idpf.ini\n create mode 100644 doc/guides/nics/idpf.rst\n create mode 100644 drivers/net/idpf/idpf_ethdev.c\n create mode 100644 drivers/net/idpf/idpf_ethdev.h\n create mode 100644 drivers/net/idpf/idpf_logs.h\n create mode 100644 drivers/net/idpf/idpf_vchnl.c\n create mode 100644 drivers/net/idpf/meson.build\n create mode 100644 drivers/net/idpf/version.map",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 92b381bc30..34f8b9cc61 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -764,6 +764,15 @@ F: drivers/net/ice/\n F: doc/guides/nics/ice.rst\n F: doc/guides/nics/features/ice.ini\n \n+Intel idpf\n+M: Jingjing Wu <jingjing.wu@intel.com>\n+M: Beilei Xing <beilei.xing@intel.com>\n+T: git://dpdk.org/next/dpdk-next-net-intel\n+F: drivers/net/idpf/\n+F: drivers/common/idpf/\n+F: doc/guides/nics/idpf.rst\n+F: doc/guides/nics/features/idpf.ini\n+\n Intel igc\n M: Junfeng Guo <junfeng.guo@intel.com>\n M: Simei Su <simei.su@intel.com>\ndiff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini\nnew file mode 100644\nindex 0000000000..46aab2eb61\n--- /dev/null\n+++ b/doc/guides/nics/features/idpf.ini\n@@ -0,0 +1,9 @@\n+;\n+; Supported features of the 'idpf' network poll mode driver.\n+;\n+; Refer to default.ini for the full list of available PMD features.\n+;\n+[Features]\n+Linux                = Y\n+x86-32               = Y\n+x86-64               = Y\ndiff --git a/doc/guides/nics/idpf.rst b/doc/guides/nics/idpf.rst\nnew file mode 100644\nindex 0000000000..c1001d5d0c\n--- /dev/null\n+++ b/doc/guides/nics/idpf.rst\n@@ -0,0 +1,66 @@\n+..  SPDX-License-Identifier: BSD-3-Clause\n+    Copyright(c) 2022 Intel Corporation.\n+\n+IDPF Poll Mode Driver\n+======================\n+\n+The [*EXPERIMENTAL*] idpf PMD (**librte_net_idpf**) provides poll mode driver support for\n+Intel® Infrastructure Processing Unit (Intel® IPU) E2000.\n+\n+\n+Linux Prerequisites\n+-------------------\n+\n+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.\n+\n+- To get better performance on Intel platforms, please follow the \"How to get best performance with NICs on Intel platforms\"\n+  section of the :ref:`Getting Started Guide for Linux <linux_gsg>`.\n+\n+\n+Pre-Installation Configuration\n+------------------------------\n+\n+Runtime Config Options\n+~~~~~~~~~~~~~~~~~~~~~~\n+\n+- ``vport`` (default ``0``)\n+\n+  The IDPF PMD supports creation of multiple vports for one PCI device, each vport\n+  corresponds to a single ethdev. Using the ``devargs`` parameter ``vport`` the user\n+  can specify the vports with specific ID to be created, for example::\n+\n+    -a ca:00.0,vport=[0,2,3]\n+\n+  Then idpf PMD will create 3 vports (ethdevs) for device ca:00.0.\n+  NOTE: If the parameter is not provided, the vport 0 will be created by default.\n+\n+- ``rx_single`` (default ``0``)\n+\n+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue\n+  mode and split queue mode for Rx queue. User can choose Rx queue mode by the ``devargs``\n+  parameter ``rx_single``.\n+\n+    -a ca:00.0,rx_single=1\n+\n+  Then idpf PMD will configure Rx queue with single queue mode. Otherwise, split queue\n+  mode is chosen by default.\n+\n+- ``tx_single`` (default ``0``)\n+\n+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue\n+  mode and split queue mode for Tx queue. User can choose Tx queue mode by the ``devargs``\n+  parameter ``tx_single``.\n+\n+    -a ca:00.0,tx_single=1\n+\n+  Then idpf PMD will configure Tx queue with single queue mode. Otherwise, split queue\n+  mode is chosen by default.\n+\n+\n+Driver compilation and testing\n+------------------------------\n+\n+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`\n+for details.\n+\n+\ndiff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst\nindex 32c7544968..b4dd5ea522 100644\n--- a/doc/guides/nics/index.rst\n+++ b/doc/guides/nics/index.rst\n@@ -33,6 +33,7 @@ Network Interface Controller Drivers\n     hns3\n     i40e\n     ice\n+    idpf\n     igb\n     igc\n     ionic\ndiff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst\nindex 1c3daf141d..4af790de37 100644\n--- a/doc/guides/rel_notes/release_22_11.rst\n+++ b/doc/guides/rel_notes/release_22_11.rst\n@@ -160,6 +160,12 @@ New Features\n \n   * Added protocol based buffer split support in scalar path.\n \n+* **Added IDPF PMD [*EXPERIMENTAL*].**\n+\n+  Added the new ``idpf`` net driver for Intel® Infrastructure Processing Unit\n+  (Intel® IPU) E2000.\n+  See the :doc:`../nics/idpf` NIC guide for more details on this new driver.\n+\n * **Updated Marvell cnxk driver.**\n \n   * Added support for flow action REPRESENTED_PORT.\ndiff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nnew file mode 100644\nindex 0000000000..112dccf5c9\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -0,0 +1,886 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <rte_atomic.h>\n+#include <rte_eal.h>\n+#include <rte_ether.h>\n+#include <rte_malloc.h>\n+#include <rte_memzone.h>\n+#include <rte_dev.h>\n+#include <errno.h>\n+\n+#include \"idpf_ethdev.h\"\n+\n+#define IDPF_TX_SINGLE_Q\t\"tx_single\"\n+#define IDPF_RX_SINGLE_Q\t\"rx_single\"\n+#define IDPF_VPORT\t\t\"vport\"\n+\n+rte_spinlock_t idpf_adapter_lock;\n+/* A list for all adapters, one adapter matches one PCI device */\n+struct idpf_adapter_list idpf_adapter_list;\n+bool idpf_adapter_list_init;\n+\n+static const char * const idpf_valid_args[] = {\n+\tIDPF_TX_SINGLE_Q,\n+\tIDPF_RX_SINGLE_Q,\n+\tIDPF_VPORT,\n+\tNULL\n+};\n+\n+static int idpf_dev_configure(struct rte_eth_dev *dev);\n+static int idpf_dev_close(struct rte_eth_dev *dev);\n+static int idpf_dev_info_get(struct rte_eth_dev *dev,\n+\t\t\t     struct rte_eth_dev_info *dev_info);\n+static void idpf_adapter_rel(struct idpf_adapter *adapter);\n+\n+static const struct eth_dev_ops idpf_eth_dev_ops = {\n+\t.dev_configure\t\t\t= idpf_dev_configure,\n+\t.dev_close\t\t\t= idpf_dev_close,\n+\t.dev_infos_get\t\t\t= idpf_dev_info_get,\n+};\n+\n+static int\n+idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\n+\tdev_info->max_rx_queues = adapter->caps->max_rx_q;\n+\tdev_info->max_tx_queues = adapter->caps->max_tx_q;\n+\tdev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;\n+\tdev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE;\n+\n+\tdev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n+\n+\tdev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_init_vport_req_info(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_create_vport *vport_info;\n+\tuint16_t idx = adapter->cur_vport_idx;\n+\n+\tif (idx == IDPF_INVALID_VPORT_IDX) {\n+\t\tPMD_INIT_LOG(ERR, \"Invalid vport index.\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (adapter->vport_req_info[idx] == NULL) {\n+\t\tadapter->vport_req_info[idx] = rte_zmalloc(NULL,\n+\t\t\t\tsizeof(struct virtchnl2_create_vport), 0);\n+\t\tif (adapter->vport_req_info[idx] == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_req_info\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tvport_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n+\n+\tvport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);\n+\tif (adapter->txq_model == 0) {\n+\t\tvport_info->txq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\t\tvport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;\n+\t\tvport_info->num_tx_complq =\n+\t\t\tIDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP;\n+\t} else {\n+\t\tvport_info->txq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\t\tvport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;\n+\t\tvport_info->num_tx_complq = 0;\n+\t}\n+\tif (adapter->rxq_model == 0) {\n+\t\tvport_info->rxq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\t\tvport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;\n+\t\tvport_info->num_rx_bufq =\n+\t\t\tIDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP;\n+\t} else {\n+\t\tvport_info->rxq_model =\n+\t\t\trte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);\n+\t\tvport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;\n+\t\tvport_info->num_rx_bufq = 0;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_parse_devarg_id(char *name)\n+{\n+\tuint16_t val;\n+\tchar *p;\n+\n+\tp = strstr(name, \"vport_\");\n+\n+\tif (p == NULL)\n+\t\treturn -1;\n+\n+\tp += sizeof(\"vport_\") - 1;\n+\n+\tval = strtoul(p, NULL, 10);\n+\n+\treturn val;\n+}\n+\n+static int\n+idpf_init_vport(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tuint16_t idx = adapter->cur_vport_idx;\n+\tstruct virtchnl2_create_vport *vport_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];\n+\tint i, type, ret;\n+\n+\tvport->vport_id = vport_info->vport_id;\n+\tvport->txq_model = vport_info->txq_model;\n+\tvport->rxq_model = vport_info->rxq_model;\n+\tvport->num_tx_q = vport_info->num_tx_q;\n+\tvport->num_tx_complq = vport_info->num_tx_complq;\n+\tvport->num_rx_q = vport_info->num_rx_q;\n+\tvport->num_rx_bufq = vport_info->num_rx_bufq;\n+\tvport->max_mtu = vport_info->max_mtu;\n+\trte_memcpy(vport->default_mac_addr,\n+\t\t   vport_info->default_mac_addr, ETH_ALEN);\n+\tvport->sw_idx = idx;\n+\n+\tfor (i = 0; i < vport_info->chunks.num_chunks; i++) {\n+\t\ttype = vport_info->chunks.chunks[i].type;\n+\t\tswitch (type) {\n+\t\tcase VIRTCHNL2_QUEUE_TYPE_TX:\n+\t\t\tvport->chunks_info.tx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.tx_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.tx_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t\tbreak;\n+\t\tcase VIRTCHNL2_QUEUE_TYPE_RX:\n+\t\t\tvport->chunks_info.rx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.rx_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.rx_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t\tbreak;\n+\t\tcase VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:\n+\t\t\tvport->chunks_info.tx_compl_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.tx_compl_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.tx_compl_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t\tbreak;\n+\t\tcase VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:\n+\t\t\tvport->chunks_info.rx_buf_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.rx_buf_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.rx_buf_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tPMD_INIT_LOG(ERR, \"Unsupported queue type\");\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tret = idpf_parse_devarg_id(dev->data->name);\n+\tif (ret < 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to parse devarg id.\");\n+\t\treturn -1;\n+\t}\n+\tvport->devarg_id = ret;\n+\n+\tvport->dev_data = dev->data;\n+\n+\tadapter->vports[idx] = vport;\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_dev_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_eth_conf *conf = &dev->data->dev_conf;\n+\n+\tif (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {\n+\t\tPMD_INIT_LOG(ERR, \"Setting link speed is not supported\");\n+\t\treturn -1;\n+\t}\n+\n+\tif ((dev->data->nb_rx_queues == 1 && conf->rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) ||\n+\t    (dev->data->nb_rx_queues > 1 && conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)) {\n+\t\tPMD_INIT_LOG(ERR, \"Multi-queue packet distribution mode %d is not supported\",\n+\t\t\t     conf->rxmode.mq_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tif (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {\n+\t\tPMD_INIT_LOG(ERR, \"Multi-queue TX mode %d is not supported\",\n+\t\t\t     conf->txmode.mq_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tif (conf->lpbk_mode != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Loopback operation mode %d is not supported\",\n+\t\t\t     conf->lpbk_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tif (conf->dcb_capability_en != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Priority Flow Control(PFC) if not supported\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (conf->intr_conf.lsc != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"LSC interrupt is not supported\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (conf->intr_conf.rxq != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"RXQ interrupt is not supported\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (conf->intr_conf.rmv != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"RMV interrupt is not supported\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_dev_close(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\n+\tidpf_vc_destroy_vport(vport);\n+\n+\n+\tadapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);\n+\n+\trte_free(vport);\n+\tdev->data->dev_private = NULL;\n+\n+\treturn 0;\n+}\n+\n+static int\n+insert_value(struct idpf_adapter *adapter, uint16_t id)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < adapter->req_vport_nb; i++) {\n+\t\tif (adapter->req_vports[i] == id)\n+\t\t\treturn 0;\n+\t}\n+\n+\tif (adapter->req_vport_nb >= RTE_DIM(adapter->req_vports)) {\n+\t\tPMD_INIT_LOG(ERR, \"Total vport number can't be > %d\",\n+\t\t\t     IDPF_MAX_VPORT_NUM);\n+\t\treturn -1;\n+\t}\n+\n+\tadapter->req_vports[adapter->req_vport_nb] = id;\n+\tadapter->req_vport_nb++;\n+\n+\treturn 0;\n+}\n+\n+static const char *\n+parse_range(const char *value, struct idpf_adapter *adapter)\n+{\n+\tuint16_t lo, hi, i;\n+\tint n = 0;\n+\tint result;\n+\tconst char *pos = value;\n+\n+\tresult = sscanf(value, \"%hu%n-%hu%n\", &lo, &n, &hi, &n);\n+\tif (result == 1) {\n+\t\tif (lo >= IDPF_MAX_VPORT_NUM)\n+\t\t\treturn NULL;\n+\t\tif (insert_value(adapter, lo) != 0)\n+\t\t\treturn NULL;\n+\t} else if (result == 2) {\n+\t\tif (lo > hi || hi >= IDPF_MAX_VPORT_NUM)\n+\t\t\treturn NULL;\n+\t\tfor (i = lo; i <= hi; i++) {\n+\t\t\tif (insert_value(adapter, i) != 0)\n+\t\t\t\treturn NULL;\n+\t\t}\n+\t} else {\n+\t\treturn NULL;\n+\t}\n+\n+\treturn pos + n;\n+}\n+\n+static int\n+parse_vport(const char *key, const char *value, void *args)\n+{\n+\tstruct idpf_adapter *adapter = args;\n+\tconst char *pos = value;\n+\tint i;\n+\n+\tadapter->req_vport_nb = 0;\n+\n+\tif (*pos == '[')\n+\t\tpos++;\n+\n+\twhile (1) {\n+\t\tpos = parse_range(pos, adapter);\n+\t\tif (pos == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"invalid value:\\\"%s\\\" for key:\\\"%s\\\", \",\n+\t\t\t\t     value, key);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (*pos != ',')\n+\t\t\tbreak;\n+\t\tpos++;\n+\t}\n+\n+\tif (*value == '[' && *pos != ']') {\n+\t\tPMD_INIT_LOG(ERR, \"invalid value:\\\"%s\\\" for key:\\\"%s\\\", \",\n+\t\t\t     value, key);\n+\t\treturn -1;\n+\t}\n+\n+\tif (adapter->cur_vport_nb + adapter->req_vport_nb >\n+\t    IDPF_MAX_VPORT_NUM) {\n+\t\tPMD_INIT_LOG(ERR, \"Total vport number can't be > %d\",\n+\t\t\t     IDPF_MAX_VPORT_NUM);\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < adapter->req_vport_nb; i++) {\n+\t\tif ((adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) == 0) {\n+\t\t\tadapter->cur_vports |= RTE_BIT32(adapter->req_vports[i]);\n+\t\t\tadapter->cur_vport_nb++;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(ERR, \"Vport %d has been created\",\n+\t\t\t\t     adapter->req_vports[i]);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+parse_bool(const char *key, const char *value, void *args)\n+{\n+\tint *i = args;\n+\tchar *end;\n+\tint num;\n+\n+\terrno = 0;\n+\n+\tnum = strtoul(value, &end, 10);\n+\n+\tif (errno == ERANGE || (num != 0 && num != 1)) {\n+\t\tPMD_INIT_LOG(ERR, \"invalid value:\\\"%s\\\" for key:\\\"%s\\\", value must be 0 or 1\",\n+\t\t\tvalue, key);\n+\t\treturn -1;\n+\t}\n+\n+\t*i = num;\n+\treturn 0;\n+}\n+\n+static int\n+idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n+{\n+\tstruct rte_devargs *devargs = pci_dev->device.devargs;\n+\tstruct rte_kvargs *kvlist;\n+\tint ret;\n+\n+\tif (devargs == NULL)\n+\t\treturn 0;\n+\n+\tkvlist = rte_kvargs_parse(devargs->args, idpf_valid_args);\n+\tif (kvlist == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"invalid kvargs key\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,\n+\t\t\t\t adapter);\n+\tif (ret != 0)\n+\t\tgoto bail;\n+\n+\tret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,\n+\t\t\t\t &adapter->txq_model);\n+\tif (ret != 0)\n+\t\tgoto bail;\n+\n+\tret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,\n+\t\t\t\t &adapter->rxq_model);\n+\tif (ret != 0)\n+\t\tgoto bail;\n+\n+bail:\n+\trte_kvargs_free(kvlist);\n+\treturn ret;\n+}\n+\n+static void\n+idpf_reset_pf(struct idpf_hw *hw)\n+{\n+\tuint32_t reg;\n+\n+\treg = IDPF_READ_REG(hw, PFGEN_CTRL);\n+\tIDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));\n+}\n+\n+#define IDPF_RESET_WAIT_CNT 100\n+static int\n+idpf_check_pf_reset_done(struct idpf_hw *hw)\n+{\n+\tuint32_t reg;\n+\tint i;\n+\n+\tfor (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {\n+\t\treg = IDPF_READ_REG(hw, PFGEN_RSTAT);\n+\t\tif (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))\n+\t\t\treturn 0;\n+\t\trte_delay_ms(1000);\n+\t}\n+\n+\tPMD_INIT_LOG(ERR, \"IDPF reset timeout\");\n+\treturn -EBUSY;\n+}\n+\n+#define CTLQ_NUM 2\n+static int\n+idpf_init_mbx(struct idpf_hw *hw)\n+{\n+\tstruct idpf_ctlq_create_info ctlq_info[CTLQ_NUM] = {\n+\t\t{\n+\t\t\t.type = IDPF_CTLQ_TYPE_MAILBOX_TX,\n+\t\t\t.id = IDPF_CTLQ_ID,\n+\t\t\t.len = IDPF_CTLQ_LEN,\n+\t\t\t.buf_size = IDPF_DFLT_MBX_BUF_SIZE,\n+\t\t\t.reg = {\n+\t\t\t\t.head = PF_FW_ATQH,\n+\t\t\t\t.tail = PF_FW_ATQT,\n+\t\t\t\t.len = PF_FW_ATQLEN,\n+\t\t\t\t.bah = PF_FW_ATQBAH,\n+\t\t\t\t.bal = PF_FW_ATQBAL,\n+\t\t\t\t.len_mask = PF_FW_ATQLEN_ATQLEN_M,\n+\t\t\t\t.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,\n+\t\t\t\t.head_mask = PF_FW_ATQH_ATQH_M,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type = IDPF_CTLQ_TYPE_MAILBOX_RX,\n+\t\t\t.id = IDPF_CTLQ_ID,\n+\t\t\t.len = IDPF_CTLQ_LEN,\n+\t\t\t.buf_size = IDPF_DFLT_MBX_BUF_SIZE,\n+\t\t\t.reg = {\n+\t\t\t\t.head = PF_FW_ARQH,\n+\t\t\t\t.tail = PF_FW_ARQT,\n+\t\t\t\t.len = PF_FW_ARQLEN,\n+\t\t\t\t.bah = PF_FW_ARQBAH,\n+\t\t\t\t.bal = PF_FW_ARQBAL,\n+\t\t\t\t.len_mask = PF_FW_ARQLEN_ARQLEN_M,\n+\t\t\t\t.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,\n+\t\t\t\t.head_mask = PF_FW_ARQH_ARQH_M,\n+\t\t\t}\n+\t\t}\n+\t};\n+\tstruct idpf_ctlq_info *ctlq;\n+\tint ret;\n+\n+\tret = idpf_ctlq_init(hw, CTLQ_NUM, ctlq_info);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\n+\tLIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,\n+\t\t\t\t struct idpf_ctlq_info, cq_list) {\n+\t\tif (ctlq->q_id == IDPF_CTLQ_ID &&\n+\t\t    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)\n+\t\t\thw->asq = ctlq;\n+\t\tif (ctlq->q_id == IDPF_CTLQ_ID &&\n+\t\t    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)\n+\t\t\thw->arq = ctlq;\n+\t}\n+\n+\tif (hw->asq == NULL || hw->arq == NULL) {\n+\t\tidpf_ctlq_deinit(hw);\n+\t\tret = -ENOENT;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n+{\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tint ret = 0;\n+\n+\thw->hw_addr = (void *)pci_dev->mem_resource[0].addr;\n+\thw->hw_addr_len = pci_dev->mem_resource[0].len;\n+\thw->back = adapter;\n+\thw->vendor_id = pci_dev->id.vendor_id;\n+\thw->device_id = pci_dev->id.device_id;\n+\thw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;\n+\n+\tstrncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE);\n+\n+\tidpf_reset_pf(hw);\n+\tret = idpf_check_pf_reset_done(hw);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"IDPF is still resetting\");\n+\t\tgoto err;\n+\t}\n+\n+\tret = idpf_init_mbx(hw);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init mailbox\");\n+\t\tgoto err;\n+\t}\n+\n+\tadapter->mbx_resp = rte_zmalloc(\"idpf_adapter_mbx_resp\",\n+\t\t\t\t\tIDPF_DFLT_MBX_BUF_SIZE, 0);\n+\tif (adapter->mbx_resp == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate idpf_adapter_mbx_resp memory\");\n+\t\tgoto err_mbx;\n+\t}\n+\n+\tif (idpf_vc_check_api_version(adapter) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to check api version\");\n+\t\tgoto err_api;\n+\t}\n+\n+\tadapter->caps = rte_zmalloc(\"idpf_caps\",\n+\t\t\t\tsizeof(struct virtchnl2_get_capabilities), 0);\n+\tif (adapter->caps == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate idpf_caps memory\");\n+\t\tgoto err_api;\n+\t}\n+\n+\tif (idpf_vc_get_caps(adapter) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to get capabilities\");\n+\t\tgoto err_caps;\n+\t}\n+\n+\tadapter->max_vport_nb = adapter->caps->max_vports;\n+\n+\tadapter->vport_req_info = rte_zmalloc(\"vport_req_info\",\n+\t\t\t\t\t      adapter->max_vport_nb *\n+\t\t\t\t\t      sizeof(*adapter->vport_req_info),\n+\t\t\t\t\t      0);\n+\tif (adapter->vport_req_info == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_req_info memory\");\n+\t\tgoto err_caps;\n+\t}\n+\n+\tadapter->vport_recv_info = rte_zmalloc(\"vport_recv_info\",\n+\t\t\t\t\t       adapter->max_vport_nb *\n+\t\t\t\t\t       sizeof(*adapter->vport_recv_info),\n+\t\t\t\t\t       0);\n+\tif (adapter->vport_recv_info == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_recv_info memory\");\n+\t\tgoto err_vport_recv_info;\n+\t}\n+\n+\tadapter->vports = rte_zmalloc(\"vports\",\n+\t\t\t\t      adapter->max_vport_nb *\n+\t\t\t\t      sizeof(*adapter->vports),\n+\t\t\t\t      0);\n+\tif (adapter->vports == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vports memory\");\n+\t\tgoto err_vports;\n+\t}\n+\n+\tadapter->max_rxq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -\n+\t\t\t\tsizeof(struct virtchnl2_config_rx_queues)) /\n+\t\t\t\tsizeof(struct virtchnl2_rxq_info);\n+\tadapter->max_txq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -\n+\t\t\t\tsizeof(struct virtchnl2_config_tx_queues)) /\n+\t\t\t\tsizeof(struct virtchnl2_txq_info);\n+\n+\tadapter->cur_vports = 0;\n+\tadapter->cur_vport_nb = 0;\n+\n+\treturn ret;\n+\n+err_vports:\n+\trte_free(adapter->vport_recv_info);\n+\tadapter->vport_recv_info = NULL;\n+err_vport_recv_info:\n+\trte_free(adapter->vport_req_info);\n+\tadapter->vport_req_info = NULL;\n+err_caps:\n+\trte_free(adapter->caps);\n+\tadapter->caps = NULL;\n+err_api:\n+\trte_free(adapter->mbx_resp);\n+\tadapter->mbx_resp = NULL;\n+err_mbx:\n+\tidpf_ctlq_deinit(hw);\n+err:\n+\treturn -1;\n+}\n+\n+static uint16_t\n+idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)\n+{\n+\tuint16_t vport_idx;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < max_vport_nb; i++) {\n+\t\tif (vports[i] == NULL)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == max_vport_nb)\n+\t\tvport_idx = IDPF_INVALID_VPORT_IDX;\n+\telse\n+\t\tvport_idx = i;\n+\n+\treturn vport_idx;\n+}\n+\n+static int\n+idpf_dev_init(struct rte_eth_dev *dev, void *init_params)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = init_params;\n+\tint ret = 0;\n+\n+\tdev->dev_ops = &idpf_eth_dev_ops;\n+\tvport->adapter = adapter;\n+\n+\tret = idpf_init_vport_req_info(dev);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init vport req_info.\");\n+\t\tgoto err;\n+\t}\n+\n+\tret = idpf_vc_create_vport(adapter);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to create vport.\");\n+\t\tgoto err_create_vport;\n+\t}\n+\n+\tret = idpf_init_vport(dev);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init vports.\");\n+\t\tgoto err_init_vport;\n+\t}\n+\n+\tadapter->cur_vport_idx = idpf_get_vport_idx(adapter->vports,\n+\t\t\t\t\t\t    adapter->max_vport_nb);\n+\n+\tdev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);\n+\tif (dev->data->mac_addrs == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Cannot allocate mac_addr memory.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_init_vport;\n+\t}\n+\n+\trte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,\n+\t\t\t    &dev->data->mac_addrs[0]);\n+\n+\treturn 0;\n+\n+err_init_vport:\n+\tidpf_vc_destroy_vport(vport);\n+err_create_vport:\n+\trte_free(vport->adapter->vport_req_info[vport->adapter->cur_vport_idx]);\n+err:\n+\treturn ret;\n+}\n+\n+static const struct rte_pci_id pci_id_idpf_map[] = {\n+\t{ RTE_PCI_DEVICE(IDPF_INTEL_VENDOR_ID, IDPF_DEV_ID_PF) },\n+\t{ .vendor_id = 0, /* sentinel */ },\n+};\n+\n+struct idpf_adapter *\n+idpf_find_adapter(struct rte_pci_device *pci_dev)\n+{\n+\tstruct idpf_adapter *adapter;\n+\n+\trte_spinlock_lock(&idpf_adapter_lock);\n+\tTAILQ_FOREACH(adapter, &idpf_adapter_list, next) {\n+\t\tif (strncmp(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE) == 0) {\n+\t\t\trte_spinlock_unlock(&idpf_adapter_lock);\n+\t\t\treturn adapter;\n+\t\t}\n+\t}\n+\trte_spinlock_unlock(&idpf_adapter_lock);\n+\n+\treturn NULL;\n+}\n+\n+static int\n+idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t       struct rte_pci_device *pci_dev)\n+{\n+\tstruct idpf_adapter *adapter;\n+\tchar name[RTE_ETH_NAME_MAX_LEN];\n+\tint i, retval;\n+\tbool first_probe = false;\n+\n+\tif (!idpf_adapter_list_init) {\n+\t\trte_spinlock_init(&idpf_adapter_lock);\n+\t\tTAILQ_INIT(&idpf_adapter_list);\n+\t\tidpf_adapter_list_init = true;\n+\t}\n+\n+\tadapter = idpf_find_adapter(pci_dev);\n+\tif (adapter == NULL) {\n+\t\tfirst_probe = true;\n+\t\tadapter = rte_zmalloc(\"idpf_adapter\",\n+\t\t\t\t\t\tsizeof(struct idpf_adapter), 0);\n+\t\tif (adapter == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate adapter.\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tretval = idpf_adapter_init(pci_dev, adapter);\n+\t\tif (retval != 0) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to init adapter.\");\n+\t\t\treturn retval;\n+\t\t}\n+\n+\t\trte_spinlock_lock(&idpf_adapter_lock);\n+\t\tTAILQ_INSERT_TAIL(&idpf_adapter_list, adapter, next);\n+\t\trte_spinlock_unlock(&idpf_adapter_lock);\n+\t}\n+\n+\tretval = idpf_parse_devargs(pci_dev, adapter);\n+\tif (retval != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to parse private devargs\");\n+\t\tgoto err;\n+\t}\n+\n+\tif (adapter->req_vport_nb == 0) {\n+\t\t/* If no vport devarg, create vport 0 by default. */\n+\t\tsnprintf(name, sizeof(name), \"idpf_%s_vport_0\",\n+\t\t\t pci_dev->device.name);\n+\t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n+\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t    NULL, NULL, idpf_dev_init,\n+\t\t\t\t\t    adapter);\n+\t\tif (retval != 0)\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to create default vport 0\");\n+\t\tadapter->cur_vports |= RTE_BIT32(0);\n+\t\tadapter->cur_vport_nb++;\n+\t} else {\n+\t\tfor (i = 0; i < adapter->req_vport_nb; i++) {\n+\t\t\tsnprintf(name, sizeof(name), \"idpf_%s_vport_%d\",\n+\t\t\t\t pci_dev->device.name,\n+\t\t\t\t adapter->req_vports[i]);\n+\t\t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n+\t\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t\t    NULL, NULL, idpf_dev_init,\n+\t\t\t\t\t\t    adapter);\n+\t\t\tif (retval != 0)\n+\t\t\t\tPMD_DRV_LOG(ERR, \"Failed to create vport %d\",\n+\t\t\t\t\t    adapter->req_vports[i]);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+err:\n+\tif (first_probe) {\n+\t\trte_spinlock_lock(&idpf_adapter_lock);\n+\t\tTAILQ_REMOVE(&idpf_adapter_list, adapter, next);\n+\t\trte_spinlock_unlock(&idpf_adapter_lock);\n+\t\tidpf_adapter_rel(adapter);\n+\t\trte_free(adapter);\n+\t}\n+\treturn retval;\n+}\n+\n+static void\n+idpf_adapter_rel(struct idpf_adapter *adapter)\n+{\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tint i;\n+\n+\tidpf_ctlq_deinit(hw);\n+\n+\trte_free(adapter->caps);\n+\tadapter->caps = NULL;\n+\n+\trte_free(adapter->mbx_resp);\n+\tadapter->mbx_resp = NULL;\n+\n+\tif (adapter->vport_req_info != NULL) {\n+\t\tfor (i = 0; i < adapter->max_vport_nb; i++) {\n+\t\t\trte_free(adapter->vport_req_info[i]);\n+\t\t\tadapter->vport_req_info[i] = NULL;\n+\t\t}\n+\t\trte_free(adapter->vport_req_info);\n+\t\tadapter->vport_req_info = NULL;\n+\t}\n+\n+\tif (adapter->vport_recv_info != NULL) {\n+\t\tfor (i = 0; i < adapter->max_vport_nb; i++) {\n+\t\t\trte_free(adapter->vport_recv_info[i]);\n+\t\t\tadapter->vport_recv_info[i] = NULL;\n+\t\t}\n+\t\trte_free(adapter->vport_recv_info);\n+\t\tadapter->vport_recv_info = NULL;\n+\t}\n+\n+\trte_free(adapter->vports);\n+\tadapter->vports = NULL;\n+}\n+\n+static int\n+idpf_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct idpf_adapter *adapter = idpf_find_adapter(pci_dev);\n+\tuint16_t port_id;\n+\n+\t/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */\n+\tRTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {\n+\t\t\trte_eth_dev_close(port_id);\n+\t}\n+\n+\trte_spinlock_lock(&idpf_adapter_lock);\n+\tTAILQ_REMOVE(&idpf_adapter_list, adapter, next);\n+\trte_spinlock_unlock(&idpf_adapter_lock);\n+\tidpf_adapter_rel(adapter);\n+\trte_free(adapter);\n+\n+\treturn 0;\n+}\n+\n+static struct rte_pci_driver rte_idpf_pmd = {\n+\t.id_table\t= pci_id_idpf_map,\n+\t.drv_flags\t= RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_PROBE_AGAIN,\n+\t.probe\t\t= idpf_pci_probe,\n+\t.remove\t\t= idpf_pci_remove,\n+};\n+\n+/**\n+ * Driver initialization routine.\n+ * Invoked once at EAL init time.\n+ * Register itself as the [Poll Mode] Driver of PCI devices.\n+ */\n+RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd);\n+RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map);\n+RTE_PMD_REGISTER_KMOD_DEP(net_ice, \"* igb_uio | uio_pci_generic | vfio-pci\");\n+\n+RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE);\n+RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE);\ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nnew file mode 100644\nindex 0000000000..84ae6641e2\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -0,0 +1,189 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_ETHDEV_H_\n+#define _IDPF_ETHDEV_H_\n+\n+#include <stdint.h>\n+#include <rte_malloc.h>\n+#include <rte_spinlock.h>\n+#include <rte_ethdev.h>\n+#include <rte_kvargs.h>\n+#include <ethdev_driver.h>\n+#include <ethdev_pci.h>\n+\n+#include \"idpf_logs.h\"\n+\n+#include <base/idpf_prototype.h>\n+#include <base/virtchnl2.h>\n+\n+#define IDPF_MAX_VPORT_NUM\t8\n+\n+#define IDPF_DEFAULT_RXQ_NUM\t16\n+#define IDPF_DEFAULT_TXQ_NUM\t16\n+\n+#define IDPF_INVALID_VPORT_IDX\t0xffff\n+#define IDPF_TX_COMPLQ_PER_GRP\t1\n+#define IDPF_RX_BUFQ_PER_GRP\t2\n+\n+#define IDPF_CTLQ_ID\t\t-1\n+#define IDPF_CTLQ_LEN\t\t64\n+#define IDPF_DFLT_MBX_BUF_SIZE\t4096\n+\n+#define IDPF_MIN_BUF_SIZE\t1024\n+#define IDPF_MAX_FRAME_SIZE\t9728\n+\n+#define IDPF_NUM_MACADDR_MAX\t64\n+\n+#define IDPF_VLAN_TAG_SIZE\t4\n+#define IDPF_ETH_OVERHEAD \\\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)\n+\n+#define IDPF_ADAPTER_NAME_LEN\t(PCI_PRI_STR_SIZE + 1)\n+\n+/* Message type read in virtual channel from PF */\n+enum idpf_vc_result {\n+\tIDPF_MSG_ERR = -1, /* Meet error when accessing admin queue */\n+\tIDPF_MSG_NON,      /* Read nothing from admin queue */\n+\tIDPF_MSG_SYS,      /* Read system msg from admin queue */\n+\tIDPF_MSG_CMD,      /* Read async command result */\n+};\n+\n+struct idpf_chunks_info {\n+\tuint32_t tx_start_qid;\n+\tuint32_t rx_start_qid;\n+\t/* Valid only if split queue model */\n+\tuint32_t tx_compl_start_qid;\n+\tuint32_t rx_buf_start_qid;\n+\n+\tuint64_t tx_qtail_start;\n+\tuint32_t tx_qtail_spacing;\n+\tuint64_t rx_qtail_start;\n+\tuint32_t rx_qtail_spacing;\n+\tuint64_t tx_compl_qtail_start;\n+\tuint32_t tx_compl_qtail_spacing;\n+\tuint64_t rx_buf_qtail_start;\n+\tuint32_t rx_buf_qtail_spacing;\n+};\n+\n+struct idpf_vport {\n+\tstruct idpf_adapter *adapter; /* Backreference to associated adapter */\n+\tuint16_t vport_id;\n+\tuint32_t txq_model;\n+\tuint32_t rxq_model;\n+\tuint16_t num_tx_q;\n+\t/* valid only if txq_model is split Q */\n+\tuint16_t num_tx_complq;\n+\tuint16_t num_rx_q;\n+\t/* valid only if rxq_model is split Q */\n+\tuint16_t num_rx_bufq;\n+\n+\tuint16_t max_mtu;\n+\tuint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\n+\n+\tuint16_t sw_idx; /* SW idx */\n+\n+\tstruct rte_eth_dev_data *dev_data; /* Pointer to the device data */\n+\tuint16_t max_pkt_len; /* Maximum packet length */\n+\n+\t/* Chunk info */\n+\tstruct idpf_chunks_info chunks_info;\n+\n+\tuint16_t devarg_id;\n+};\n+\n+struct idpf_adapter {\n+\tTAILQ_ENTRY(idpf_adapter) next;\n+\tstruct idpf_hw hw;\n+\tchar name[IDPF_ADAPTER_NAME_LEN];\n+\n+\tstruct virtchnl2_version_info virtchnl_version;\n+\tstruct virtchnl2_get_capabilities *caps;\n+\n+\tvolatile enum virtchnl_ops pend_cmd; /* pending command not finished */\n+\tuint32_t cmd_retval; /* return value of the cmd response from ipf */\n+\tuint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */\n+\n+\tuint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */\n+\tuint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */\n+\n+\t/* Vport info */\n+\tuint8_t **vport_req_info;\n+\tuint8_t **vport_recv_info;\n+\tstruct idpf_vport **vports;\n+\tuint16_t max_vport_nb;\n+\tuint16_t req_vports[IDPF_MAX_VPORT_NUM];\n+\tuint16_t req_vport_nb;\n+\tuint16_t cur_vports;\n+\tuint16_t cur_vport_nb;\n+\tuint16_t cur_vport_idx;\n+\n+\t/* Max config queue number per VC message */\n+\tuint32_t max_rxq_per_msg;\n+\tuint32_t max_txq_per_msg;\n+};\n+\n+TAILQ_HEAD(idpf_adapter_list, idpf_adapter);\n+\n+#define IDPF_DEV_TO_PCI(eth_dev)\t\t\\\n+\tRTE_DEV_TO_PCI((eth_dev)->device)\n+\n+/* structure used for sending and checking response of virtchnl ops */\n+struct idpf_cmd_info {\n+\tuint32_t ops;\n+\tuint8_t *in_args;       /* buffer for sending */\n+\tuint32_t in_args_size;  /* buffer size for sending */\n+\tuint8_t *out_buffer;    /* buffer for response */\n+\tuint32_t out_size;      /* buffer size for response */\n+};\n+\n+/* notify current command done. Only call in case execute\n+ * _atomic_set_cmd successfully.\n+ */\n+static inline void\n+notify_cmd(struct idpf_adapter *adapter, int msg_ret)\n+{\n+\tadapter->cmd_retval = msg_ret;\n+\t/* Return value may be checked in anither thread, need to ensure the coherence. */\n+\trte_wmb();\n+\tadapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;\n+}\n+\n+/* clear current command. Only call in case execute\n+ * _atomic_set_cmd successfully.\n+ */\n+static inline void\n+clear_cmd(struct idpf_adapter *adapter)\n+{\n+\t/* Return value may be checked in anither thread, need to ensure the coherence. */\n+\trte_wmb();\n+\tadapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;\n+\tadapter->cmd_retval = VIRTCHNL_STATUS_SUCCESS;\n+}\n+\n+/* Check there is pending cmd in execution. If none, set new command. */\n+static inline bool\n+atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)\n+{\n+\tenum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;\n+\tbool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,\n+\t\t\t\t\t    0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);\n+\n+\tif (!ret)\n+\t\tPMD_DRV_LOG(ERR, \"There is incomplete cmd %d\", adapter->pend_cmd);\n+\n+\treturn !ret;\n+}\n+\n+struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);\n+void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);\n+int idpf_vc_check_api_version(struct idpf_adapter *adapter);\n+int idpf_vc_get_caps(struct idpf_adapter *adapter);\n+int idpf_vc_create_vport(struct idpf_adapter *adapter);\n+int idpf_vc_destroy_vport(struct idpf_vport *vport);\n+int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);\n+int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,\n+\t\t      uint16_t buf_len, uint8_t *buf);\n+\n+#endif /* _IDPF_ETHDEV_H_ */\ndiff --git a/drivers/net/idpf/idpf_logs.h b/drivers/net/idpf/idpf_logs.h\nnew file mode 100644\nindex 0000000000..d5f778fefe\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_logs.h\n@@ -0,0 +1,56 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_LOGS_H_\n+#define _IDPF_LOGS_H_\n+\n+#include <rte_log.h>\n+\n+extern int idpf_logtype_init;\n+extern int idpf_logtype_driver;\n+\n+#define PMD_INIT_LOG(level, ...) \\\n+\trte_log(RTE_LOG_ ## level, \\\n+\t\tidpf_logtype_init, \\\n+\t\tRTE_FMT(\"%s(): \" \\\n+\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n+\t\t\t__func__, \\\n+\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n+\n+#define PMD_DRV_LOG_RAW(level, ...) \\\n+\trte_log(RTE_LOG_ ## level, \\\n+\t\tidpf_logtype_driver, \\\n+\t\tRTE_FMT(\"%s(): \" \\\n+\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n+\t\t\t__func__, \\\n+\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n+\n+#define PMD_DRV_LOG(level, fmt, args...) \\\n+\tPMD_DRV_LOG_RAW(level, fmt \"\\n\", ## args)\n+\n+#ifdef RTE_LIBRTE_IDPF_DEBUG_RX\n+#define PMD_RX_LOG(level, ...) \\\n+\tRTE_LOG(level, \\\n+\t\tPMD, \\\n+\t\tRTE_FMT(\"%s(): \" \\\n+\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n+\t\t\t__func__, \\\n+\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n+#else\n+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_IDPF_DEBUG_TX\n+#define PMD_TX_LOG(level, ...) \\\n+\tRTE_LOG(level, \\\n+\t\tPMD, \\\n+\t\tRTE_FMT(\"%s(): \" \\\n+\t\t\tRTE_FMT_HEAD(__VA_ARGS__,) \"\\n\", \\\n+\t\t\t__func__, \\\n+\t\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n+#else\n+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#endif /* _IDPF_LOGS_H_ */\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nnew file mode 100644\nindex 0000000000..9392711b61\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -0,0 +1,468 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <stdarg.h>\n+#include <inttypes.h>\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+\n+#include <rte_debug.h>\n+#include <rte_atomic.h>\n+#include <rte_eal.h>\n+#include <rte_ether.h>\n+#include <ethdev_driver.h>\n+#include <ethdev_pci.h>\n+#include <rte_dev.h>\n+\n+#include \"idpf_ethdev.h\"\n+\n+#define IDPF_CTLQ_LEN\t64\n+\n+static int\n+idpf_vc_clean(struct idpf_adapter *adapter)\n+{\n+\tstruct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN];\n+\tuint16_t num_q_msg = IDPF_CTLQ_LEN;\n+\tstruct idpf_dma_mem *dma_mem;\n+\tint err;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < 10; i++) {\n+\t\terr = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);\n+\t\tmsleep(20);\n+\t\tif (num_q_msg > 0)\n+\t\t\tbreak;\n+\t}\n+\tif (err != 0)\n+\t\treturn err;\n+\n+\t/* Empty queue is not an error */\n+\tfor (i = 0; i < num_q_msg; i++) {\n+\t\tdma_mem = q_msg[i]->ctx.indirect.payload;\n+\t\tif (dma_mem != NULL) {\n+\t\t\tidpf_free_dma_mem(&adapter->hw, dma_mem);\n+\t\t\trte_free(dma_mem);\n+\t\t}\n+\t\trte_free(q_msg[i]);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_send_vc_msg(struct idpf_adapter *adapter, enum virtchnl_ops op,\n+\t\t uint16_t msg_size, uint8_t *msg)\n+{\n+\tstruct idpf_ctlq_msg *ctlq_msg;\n+\tstruct idpf_dma_mem *dma_mem;\n+\tint err;\n+\n+\terr = idpf_vc_clean(adapter);\n+\tif (err != 0)\n+\t\tgoto err;\n+\n+\tctlq_msg = rte_zmalloc(NULL, sizeof(struct idpf_ctlq_msg), 0);\n+\tif (ctlq_msg == NULL) {\n+\t\terr = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\tdma_mem = rte_zmalloc(NULL, sizeof(struct idpf_dma_mem), 0);\n+\tif (dma_mem == NULL) {\n+\t\terr = -ENOMEM;\n+\t\tgoto dma_mem_error;\n+\t}\n+\n+\tdma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;\n+\tidpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);\n+\tif (dma_mem->va == NULL) {\n+\t\terr = -ENOMEM;\n+\t\tgoto dma_alloc_error;\n+\t}\n+\n+\tmemcpy(dma_mem->va, msg, msg_size);\n+\n+\tctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;\n+\tctlq_msg->func_id = 0;\n+\tctlq_msg->data_len = msg_size;\n+\tctlq_msg->cookie.mbx.chnl_opcode = op;\n+\tctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;\n+\tctlq_msg->ctx.indirect.payload = dma_mem;\n+\n+\terr = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);\n+\tif (err != 0)\n+\t\tgoto send_error;\n+\n+\treturn 0;\n+\n+send_error:\n+\tidpf_free_dma_mem(&adapter->hw, dma_mem);\n+dma_alloc_error:\n+\trte_free(dma_mem);\n+dma_mem_error:\n+\trte_free(ctlq_msg);\n+err:\n+\treturn err;\n+}\n+\n+static enum idpf_vc_result\n+idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,\n+\t\t      uint8_t *buf)\n+{\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tstruct idpf_ctlq_msg ctlq_msg;\n+\tstruct idpf_dma_mem *dma_mem = NULL;\n+\tenum idpf_vc_result result = IDPF_MSG_NON;\n+\tenum virtchnl_ops opcode;\n+\tuint16_t pending = 1;\n+\tint ret;\n+\n+\tret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg);\n+\tif (ret != 0) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Can't read msg from AQ\");\n+\t\tif (ret != IDPF_ERR_CTLQ_NO_WORK)\n+\t\t\tresult = IDPF_MSG_ERR;\n+\t\treturn result;\n+\t}\n+\n+\trte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);\n+\n+\topcode = (enum virtchnl_ops)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);\n+\tadapter->cmd_retval =\n+\t\t(enum virtchnl_status_code)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);\n+\n+\tPMD_DRV_LOG(DEBUG, \"CQ from CP carries opcode %u, retval %d\",\n+\t\t    opcode, adapter->cmd_retval);\n+\n+\tif (opcode == VIRTCHNL2_OP_EVENT) {\n+\t\tstruct virtchnl2_event *ve =\n+\t\t\t(struct virtchnl2_event *)ctlq_msg.ctx.indirect.payload->va;\n+\n+\t\tresult = IDPF_MSG_SYS;\n+\t\tswitch (ve->event) {\n+\t\tcase VIRTCHNL2_EVENT_LINK_CHANGE:\n+\t\t\t/* TBD */\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tPMD_DRV_LOG(ERR, \"%s: Unknown event %d from CP\",\n+\t\t\t\t    __func__, ve->event);\n+\t\t\tbreak;\n+\t\t}\n+\t} else {\n+\t\t/* async reply msg on command issued by pf previously */\n+\t\tresult = IDPF_MSG_CMD;\n+\t\tif (opcode != adapter->pend_cmd) {\n+\t\t\tPMD_DRV_LOG(WARNING, \"command mismatch, expect %u, get %u\",\n+\t\t\t\t    adapter->pend_cmd, opcode);\n+\t\t\tresult = IDPF_MSG_ERR;\n+\t\t}\n+\t}\n+\n+\tif (ctlq_msg.data_len != 0)\n+\t\tdma_mem = ctlq_msg.ctx.indirect.payload;\n+\telse\n+\t\tpending = 0;\n+\n+\tret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);\n+\tif (ret != 0 && dma_mem != NULL)\n+\t\tidpf_free_dma_mem(hw, dma_mem);\n+\n+\treturn result;\n+}\n+\n+#define MAX_TRY_TIMES 200\n+#define ASQ_DELAY_MS  10\n+\n+int\n+idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,\n+\t\t  uint8_t *buf)\n+{\n+\tint err = 0;\n+\tint i = 0;\n+\tint ret;\n+\n+\tdo {\n+\t\tret = idpf_read_msg_from_cp(adapter, buf_len, buf);\n+\t\tif (ret == IDPF_MSG_CMD)\n+\t\t\tbreak;\n+\t\trte_delay_ms(ASQ_DELAY_MS);\n+\t} while (i++ < MAX_TRY_TIMES);\n+\tif (i >= MAX_TRY_TIMES ||\n+\t    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {\n+\t\terr = -1;\n+\t\tPMD_DRV_LOG(ERR, \"No response or return failure (%d) for cmd %d\",\n+\t\t\t    adapter->cmd_retval, ops);\n+\t}\n+\n+\treturn err;\n+}\n+\n+static int\n+idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)\n+{\n+\tint err = 0;\n+\tint i = 0;\n+\tint ret;\n+\n+\tif (atomic_set_cmd(adapter, args->ops))\n+\t\treturn -1;\n+\n+\tret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args->in_args);\n+\tif (ret != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"fail to send cmd %d\", args->ops);\n+\t\tclear_cmd(adapter);\n+\t\treturn ret;\n+\t}\n+\n+\tswitch (args->ops) {\n+\tcase VIRTCHNL_OP_VERSION:\n+\tcase VIRTCHNL2_OP_GET_CAPS:\n+\tcase VIRTCHNL2_OP_CREATE_VPORT:\n+\tcase VIRTCHNL2_OP_DESTROY_VPORT:\n+\tcase VIRTCHNL2_OP_ENABLE_VPORT:\n+\tcase VIRTCHNL2_OP_DISABLE_VPORT:\n+\t\t/* for init virtchnl ops, need to poll the response */\n+\t\terr = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);\n+\t\tclear_cmd(adapter);\n+\t\tbreak;\n+\tdefault:\n+\t\t/* For other virtchnl ops in running time,\n+\t\t * wait for the cmd done flag.\n+\t\t */\n+\t\tdo {\n+\t\t\tif (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)\n+\t\t\t\tbreak;\n+\t\t\trte_delay_ms(ASQ_DELAY_MS);\n+\t\t\t/* If don't read msg or read sys event, continue */\n+\t\t} while (i++ < MAX_TRY_TIMES);\n+\t\t/* If there's no response is received, clear command */\n+\t\tif (i >= MAX_TRY_TIMES  ||\n+\t\t    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {\n+\t\t\terr = -1;\n+\t\t\tPMD_DRV_LOG(ERR, \"No response or return failure (%d) for cmd %d\",\n+\t\t\t\t    adapter->cmd_retval, args->ops);\n+\t\t\tclear_cmd(adapter);\n+\t\t}\n+\t\tbreak;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_check_api_version(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl2_version_info version, *pver;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tmemset(&version, 0, sizeof(struct virtchnl_version_info));\n+\tversion.major = VIRTCHNL2_VERSION_MAJOR_2;\n+\tversion.minor = VIRTCHNL2_VERSION_MINOR_0;\n+\n+\targs.ops = VIRTCHNL_OP_VERSION;\n+\targs.in_args = (uint8_t *)&version;\n+\targs.in_args_size = sizeof(version);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err != 0) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL_OP_VERSION\");\n+\t\treturn err;\n+\t}\n+\n+\tpver = (struct virtchnl2_version_info *)args.out_buffer;\n+\tadapter->virtchnl_version = *pver;\n+\n+\tif (adapter->virtchnl_version.major != VIRTCHNL2_VERSION_MAJOR_2 ||\n+\t    adapter->virtchnl_version.minor != VIRTCHNL2_VERSION_MINOR_0) {\n+\t\tPMD_INIT_LOG(ERR, \"VIRTCHNL API version mismatch:(%u.%u)-(%u.%u)\",\n+\t\t\t     adapter->virtchnl_version.major,\n+\t\t\t     adapter->virtchnl_version.minor,\n+\t\t\t     VIRTCHNL2_VERSION_MAJOR_2,\n+\t\t\t     VIRTCHNL2_VERSION_MINOR_0);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_vc_get_caps(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl2_get_capabilities caps_msg;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\t memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));\n+\t caps_msg.csum_caps =\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L3_IPV4\t\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_GENERIC\t\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L3_IPV4\t\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_GENERIC;\n+\n+\t caps_msg.seg_caps =\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_GENERIC;\n+\n+\t caps_msg.rss_caps =\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_OTHER\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_OTHER\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_AH\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_AH_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_AH\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;\n+\n+\t caps_msg.other_caps =\n+\t\t VIRTCHNL2_CAP_SPLITQ_QSCHED\t\t|\n+\t\t VIRTCHNL2_CAP_CRC\t\t\t|\n+\t\t VIRTCHNL2_CAP_WB_ON_ITR\t\t|\n+\t\t VIRTCHNL2_CAP_PROMISC\t\t\t|\n+\t\t VIRTCHNL2_CAP_LINK_SPEED\t\t|\n+\t\t VIRTCHNL2_CAP_VLAN;\n+\n+\targs.ops = VIRTCHNL2_OP_GET_CAPS;\n+\targs.in_args = (uint8_t *)&caps_msg;\n+\targs.in_args_size = sizeof(caps_msg);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err != 0) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL2_OP_GET_CAPS\");\n+\t\treturn err;\n+\t}\n+\n+\trte_memcpy(adapter->caps, args.out_buffer, sizeof(caps_msg));\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_vc_create_vport(struct idpf_adapter *adapter)\n+{\n+\tuint16_t idx = adapter->cur_vport_idx;\n+\tstruct virtchnl2_create_vport *vport_req_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n+\tstruct virtchnl2_create_vport vport_msg;\n+\tstruct idpf_cmd_info args;\n+\tint err = -1;\n+\n+\tmemset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));\n+\tvport_msg.vport_type = vport_req_info->vport_type;\n+\tvport_msg.txq_model = vport_req_info->txq_model;\n+\tvport_msg.rxq_model = vport_req_info->rxq_model;\n+\tvport_msg.num_tx_q = vport_req_info->num_tx_q;\n+\tvport_msg.num_tx_complq = vport_req_info->num_tx_complq;\n+\tvport_msg.num_rx_q = vport_req_info->num_rx_q;\n+\tvport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CREATE_VPORT;\n+\targs.in_args = (uint8_t *)&vport_msg;\n+\targs.in_args_size = sizeof(vport_msg);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err != 0) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT\");\n+\t\treturn err;\n+\t}\n+\n+\tif (adapter->vport_recv_info[idx] == NULL) {\n+\t\tadapter->vport_recv_info[idx] = rte_zmalloc(NULL,\n+\t\t\t\t\t\t    IDPF_DFLT_MBX_BUF_SIZE, 0);\n+\t\tif (adapter->vport_recv_info[idx] == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to alloc vport_recv_info.\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\trte_memcpy(adapter->vport_recv_info[idx], args.out_buffer,\n+\t\t   IDPF_DFLT_MBX_BUF_SIZE);\n+\treturn 0;\n+}\n+\n+int\n+idpf_vc_destroy_vport(struct idpf_vport *vport)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_vport vc_vport;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tvc_vport.vport_id = vport->vport_id;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_DESTROY_VPORT;\n+\targs.in_args = (uint8_t *)&vc_vport;\n+\targs.in_args_size = sizeof(vc_vport);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err != 0)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT\");\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_vport vc_vport;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tvc_vport.vport_id = vport->vport_id;\n+\targs.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :\n+\t\t\t    VIRTCHNL2_OP_DISABLE_VPORT;\n+\targs.in_args = (u8 *)&vc_vport;\n+\targs.in_args_size = sizeof(vc_vport);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_VPORT\",\n+\t\t\t    enable ? \"ENABLE\" : \"DISABLE\");\n+\t}\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build\nnew file mode 100644\nindex 0000000000..ecf73355c3\n--- /dev/null\n+++ b/drivers/net/idpf/meson.build\n@@ -0,0 +1,15 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2022 Intel Corporation\n+\n+if is_windows\n+    build = false\n+    reason = 'not supported on Windows'\n+    subdir_done()\n+endif\n+\n+deps += ['common_idpf']\n+\n+sources = files(\n+    'idpf_ethdev.c',\n+    'idpf_vchnl.c',\n+)\ndiff --git a/drivers/net/idpf/version.map b/drivers/net/idpf/version.map\nnew file mode 100644\nindex 0000000000..78c3585d7c\n--- /dev/null\n+++ b/drivers/net/idpf/version.map\n@@ -0,0 +1,3 @@\n+DPDK_23 {\n+\tlocal: *;\n+};\ndiff --git a/drivers/net/meson.build b/drivers/net/meson.build\nindex 35bfa78dee..4a951b95f2 100644\n--- a/drivers/net/meson.build\n+++ b/drivers/net/meson.build\n@@ -28,6 +28,7 @@ drivers = [\n         'i40e',\n         'iavf',\n         'ice',\n+        'idpf',\n         'igc',\n         'ionic',\n         'ipn3ke',\n",
    "prefixes": [
        "v13",
        "02/18"
    ]
}