get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118874/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118874,
    "url": "http://patchwork.dpdk.org/api/patches/118874/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221021051821.2164939-3-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221021051821.2164939-3-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221021051821.2164939-3-junfeng.guo@intel.com",
    "date": "2022-10-21T05:18:09",
    "name": "[v9,02/14] net/idpf: add support for device initialization",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "cced2b7cab3237fce1aa8554058c3f8e971b488a",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221021051821.2164939-3-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25359,
            "url": "http://patchwork.dpdk.org/api/series/25359/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25359",
            "date": "2022-10-21T05:18:07",
            "name": "add support for idpf PMD in DPDK",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/25359/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/118874/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/118874/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 980B7A0552;\n\tFri, 21 Oct 2022 07:20:07 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2AE1042BA3;\n\tFri, 21 Oct 2022 07:19:58 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n by mails.dpdk.org (Postfix) with ESMTP id AF15542B84\n for <dev@dpdk.org>; Fri, 21 Oct 2022 07:19:54 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 20 Oct 2022 22:19:54 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by fmsmga001.fm.intel.com with ESMTP; 20 Oct 2022 22:19:51 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666329594; x=1697865594;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=rmvNq9ueBKjvuaa0lc3zBy83jOaloAmleQBTujtoxvw=;\n b=KM93tuvngvhfQdaUK0cJE1C8kxu8KD5rx0weqMG2i+5uxS80DwkFrZCj\n ZunMYke3l6isL191sMHjTwUNEBVH7zuJ3HnlRkT2D0u5pmRLxbtfgICRC\n sxE4p/m9H7NHkUDUogTrY8CVlLxI/z7xZG5YRUXCw/iri75b26mYSEzQx\n GgnLUXLE1nW38Fx8YOsLV20yh4ZOE2Ey8apCowAgDj4F6zoDc6qbeq1be\n TDvZ5oodTVHZ6jkRVC/hn0QbRwGBbby4c6B4KYE6IqwvKq8d1++761mJO\n 5KktCEbaWr8BxWDIQGghQl+eqN6dPMMWbxDzYxqrx4ADmYvxWJNSfaMb/ Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10506\"; a=\"371128544\"",
            "E=Sophos;i=\"5.95,200,1661842800\"; d=\"scan'208\";a=\"371128544\"",
            "E=McAfee;i=\"6500,9779,10506\"; a=\"772826272\"",
            "E=Sophos;i=\"5.95,200,1661842800\"; d=\"scan'208\";a=\"772826272\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiaoyun Li <xiaoyun.li@intel.com>, Xiao Wang <xiao.w.wang@intel.com>",
        "Subject": "[PATCH v9 02/14] net/idpf: add support for device initialization",
        "Date": "Fri, 21 Oct 2022 13:18:09 +0800",
        "Message-Id": "<20221021051821.2164939-3-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221021051821.2164939-1-junfeng.guo@intel.com>",
        "References": "<20221020062951.645121-2-junfeng.guo@intel.com>\n <20221021051821.2164939-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Support device init and add the following dev ops skeleton:\n - dev_configure\n - dev_start\n - dev_stop\n - dev_close\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Xiao Wang <xiao.w.wang@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n MAINTAINERS                            |   9 +\n doc/guides/nics/features/idpf.ini      |  15 +\n doc/guides/nics/idpf.rst               |  52 ++\n doc/guides/nics/index.rst              |   1 +\n doc/guides/rel_notes/release_22_11.rst |   6 +\n drivers/net/idpf/idpf_ethdev.c         | 774 +++++++++++++++++++++++++\n drivers/net/idpf/idpf_ethdev.h         | 206 +++++++\n drivers/net/idpf/idpf_logs.h           |  42 ++\n drivers/net/idpf/idpf_vchnl.c          | 495 ++++++++++++++++\n drivers/net/idpf/meson.build           |  16 +\n drivers/net/idpf/version.map           |   3 +\n drivers/net/meson.build                |   1 +\n 12 files changed, 1620 insertions(+)\n create mode 100644 doc/guides/nics/features/idpf.ini\n create mode 100644 doc/guides/nics/idpf.rst\n create mode 100644 drivers/net/idpf/idpf_ethdev.c\n create mode 100644 drivers/net/idpf/idpf_ethdev.h\n create mode 100644 drivers/net/idpf/idpf_logs.h\n create mode 100644 drivers/net/idpf/idpf_vchnl.c\n create mode 100644 drivers/net/idpf/meson.build\n create mode 100644 drivers/net/idpf/version.map",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 92b381bc30..34f8b9cc61 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -764,6 +764,15 @@ F: drivers/net/ice/\n F: doc/guides/nics/ice.rst\n F: doc/guides/nics/features/ice.ini\n \n+Intel idpf\n+M: Jingjing Wu <jingjing.wu@intel.com>\n+M: Beilei Xing <beilei.xing@intel.com>\n+T: git://dpdk.org/next/dpdk-next-net-intel\n+F: drivers/net/idpf/\n+F: drivers/common/idpf/\n+F: doc/guides/nics/idpf.rst\n+F: doc/guides/nics/features/idpf.ini\n+\n Intel igc\n M: Junfeng Guo <junfeng.guo@intel.com>\n M: Simei Su <simei.su@intel.com>\ndiff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini\nnew file mode 100644\nindex 0000000000..f029a279b3\n--- /dev/null\n+++ b/doc/guides/nics/features/idpf.ini\n@@ -0,0 +1,15 @@\n+;\n+; Supported features of the 'idpf' network poll mode driver.\n+;\n+; Refer to default.ini for the full list of available PMD features.\n+;\n+; A feature with \"P\" indicates only be supported when non-vector path\n+; is selected.\n+;\n+[Features]\n+Multiprocess aware   = Y\n+FreeBSD              = Y\n+Linux                = Y\n+Windows              = Y\n+x86-32               = Y\n+x86-64               = Y\ndiff --git a/doc/guides/nics/idpf.rst b/doc/guides/nics/idpf.rst\nnew file mode 100644\nindex 0000000000..428bf4266a\n--- /dev/null\n+++ b/doc/guides/nics/idpf.rst\n@@ -0,0 +1,52 @@\n+..  SPDX-License-Identifier: BSD-3-Clause\n+    Copyright(c) 2022 Intel Corporation.\n+\n+IDPF Poll Mode Driver\n+======================\n+\n+The [*EXPERIMENTAL*] idpf PMD (**librte_net_idpf**) provides poll mode driver support for\n+Intel® Infrastructure Processing Unit (Intel® IPU) E2000.\n+\n+Linux Prerequisites\n+-------------------\n+\n+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.\n+\n+- To get better performance on Intel platforms, please follow the \"How to get best performance with NICs on Intel platforms\"\n+  section of the :ref:`Getting Started Guide for Linux <linux_gsg>`.\n+\n+Windows Prerequisites\n+---------------------\n+\n+- Follow the :doc:`guide for Windows <../windows_gsg/run_apps>`\n+  to setup the basic DPDK environment.\n+\n+- Identify the Intel® Ethernet adapter and get the latest NVM/FW version.\n+\n+- To access any Intel® Ethernet hardware, load the NetUIO driver in place of existing built-in (inbox) driver.\n+\n+- To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository\n+  <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.\n+\n+Pre-Installation Configuration\n+------------------------------\n+\n+Runtime Config Options\n+~~~~~~~~~~~~~~~~~~~~~~\n+\n+- ``vport`` (default ``not create ethdev``)\n+\n+  The IDPF PMD supports creation of multiple vports for one PCI device, each vport\n+  corresponds to a single ethdev. Using the ``devargs`` parameter ``vport`` the user\n+  can specify the vports with specific ID to be created, for example::\n+\n+    -a ca:00.0,vport=[0,2,3]\n+\n+  Then idpf PMD will create 3 vports (ethdevs) for device ca:00.0.\n+  NOTE: This parameter is MUST, otherwise there'll be no any ethdev created.\n+\n+Driver compilation and testing\n+------------------------------\n+\n+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`\n+for details.\ndiff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst\nindex 32c7544968..b4dd5ea522 100644\n--- a/doc/guides/nics/index.rst\n+++ b/doc/guides/nics/index.rst\n@@ -33,6 +33,7 @@ Network Interface Controller Drivers\n     hns3\n     i40e\n     ice\n+    idpf\n     igb\n     igc\n     ionic\ndiff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst\nindex 1c3daf141d..736fec6dcb 100644\n--- a/doc/guides/rel_notes/release_22_11.rst\n+++ b/doc/guides/rel_notes/release_22_11.rst\n@@ -260,6 +260,12 @@ New Features\n \n      strings $dpdk_binary_or_driver | sed -n 's/^PMD_INFO_STRING= //p'\n \n+* **Added IDPF PMD [*EXPERIMENTAL*].**\n+\n+  Added the new ``idpf`` net driver for Intel® Infrastructure Processing Unit\n+  (Intel® IPU) E2000.\n+  See the :doc:`../nics/idpf` NIC guide for more details on this new driver.\n+\n \n Removed Items\n -------------\ndiff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nnew file mode 100644\nindex 0000000000..7806c43668\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -0,0 +1,774 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <rte_atomic.h>\n+#include <rte_eal.h>\n+#include <rte_ether.h>\n+#include <rte_malloc.h>\n+#include <rte_memzone.h>\n+#include <rte_dev.h>\n+\n+#include \"idpf_ethdev.h\"\n+\n+#define IDPF_VPORT\t\t\"vport\"\n+\n+struct idpf_adapter_list adapter_list;\n+bool adapter_list_init;\n+\n+static const char * const idpf_valid_args[] = {\n+\tIDPF_VPORT,\n+\tNULL\n+};\n+\n+static int idpf_dev_configure(struct rte_eth_dev *dev);\n+static int idpf_dev_start(struct rte_eth_dev *dev);\n+static int idpf_dev_stop(struct rte_eth_dev *dev);\n+static int idpf_dev_close(struct rte_eth_dev *dev);\n+static void idpf_adapter_rel(struct idpf_adapter *adapter);\n+\n+int\n+idpf_dev_link_update(struct rte_eth_dev *dev,\n+\t\t     __rte_unused int wait_to_complete)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct rte_eth_link new_link;\n+\n+\tmemset(&new_link, 0, sizeof(new_link));\n+\n+\tnew_link.link_speed = RTE_ETH_SPEED_NUM_NONE;\n+\n+\tnew_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;\n+\tnew_link.link_status = vport->link_up ? RTE_ETH_LINK_UP :\n+\t\tRTE_ETH_LINK_DOWN;\n+\tnew_link.link_autoneg = !(dev->data->dev_conf.link_speeds &\n+\t\t\t\t  RTE_ETH_LINK_SPEED_FIXED);\n+\n+\treturn rte_eth_linkstatus_set(dev, &new_link);\n+}\n+\n+static const struct eth_dev_ops idpf_eth_dev_ops = {\n+\t.dev_configure\t\t\t= idpf_dev_configure,\n+\t.dev_start\t\t\t= idpf_dev_start,\n+\t.dev_stop\t\t\t= idpf_dev_stop,\n+\t.dev_close\t\t\t= idpf_dev_close,\n+\t.link_update\t\t\t= idpf_dev_link_update,\n+};\n+\n+static int\n+idpf_init_vport_req_info(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_create_vport *vport_info;\n+\tuint16_t idx = adapter->cur_vport_idx;\n+\n+\tif (idx == IDPF_INVALID_VPORT_IDX) {\n+\t\tPMD_INIT_LOG(ERR, \"Invalid vport index.\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (!adapter->vport_req_info[idx]) {\n+\t\tadapter->vport_req_info[idx] = rte_zmalloc(NULL,\n+\t\t\t\tsizeof(struct virtchnl2_create_vport), 0);\n+\t\tif (!adapter->vport_req_info[idx]) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_req_info\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tvport_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n+\n+\tvport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);\n+\n+\treturn 0;\n+}\n+\n+static uint16_t\n+idpf_parse_devarg_id(char *name)\n+{\n+\tuint16_t val;\n+\tchar *p;\n+\n+\tp = strstr(name, \"vport_\");\n+\tp += sizeof(\"vport_\") - 1;\n+\n+\tval = strtoul(p, NULL, 10);\n+\n+\treturn val;\n+}\n+\n+static int\n+idpf_init_vport(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tuint16_t idx = adapter->cur_vport_idx;\n+\tstruct virtchnl2_create_vport *vport_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];\n+\tint i;\n+\n+\tvport->vport_id = vport_info->vport_id;\n+\tvport->num_tx_q = vport_info->num_tx_q;\n+\tvport->num_rx_q = vport_info->num_rx_q;\n+\tvport->max_mtu = vport_info->max_mtu;\n+\trte_memcpy(vport->default_mac_addr,\n+\t\t   vport_info->default_mac_addr, ETH_ALEN);\n+\tvport->sw_idx = idx;\n+\n+\tfor (i = 0; i < vport_info->chunks.num_chunks; i++) {\n+\t\tif (vport_info->chunks.chunks[i].type ==\n+\t\t    VIRTCHNL2_QUEUE_TYPE_TX) {\n+\t\t\tvport->chunks_info.tx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.tx_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.tx_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t} else if (vport_info->chunks.chunks[i].type ==\n+\t\t\t VIRTCHNL2_QUEUE_TYPE_RX) {\n+\t\t\tvport->chunks_info.rx_start_qid =\n+\t\t\t\tvport_info->chunks.chunks[i].start_queue_id;\n+\t\t\tvport->chunks_info.rx_qtail_start =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_start;\n+\t\t\tvport->chunks_info.rx_qtail_spacing =\n+\t\t\t\tvport_info->chunks.chunks[i].qtail_reg_spacing;\n+\t\t}\n+\t}\n+\n+\tvport->devarg_id = idpf_parse_devarg_id(dev->data->name);\n+\tvport->dev_data = dev->data;\n+\tvport->stopped = 1;\n+\n+\tadapter->vports[idx] = vport;\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)\n+{\n+\treturn 0;\n+}\n+\n+static int\n+idpf_dev_start(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tvport->stopped = 0;\n+\n+\tif (idpf_vc_ena_dis_vport(vport, true)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to enable vport\");\n+\t\tgoto err_vport;\n+\t}\n+\n+\treturn 0;\n+\n+err_vport:\n+\treturn -1;\n+}\n+\n+static int\n+idpf_dev_stop(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (vport->stopped == 1)\n+\t\treturn 0;\n+\n+\tif (idpf_vc_ena_dis_vport(vport, false))\n+\t\tPMD_DRV_LOG(ERR, \"disable vport failed\");\n+\n+\tvport->stopped = 1;\n+\tdev->data->dev_started = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_dev_close(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn 0;\n+\n+\tidpf_dev_stop(dev);\n+\tidpf_vc_destroy_vport(vport);\n+\n+\tadapter->cur_vports &= ~BIT(vport->devarg_id);\n+\n+\trte_free(vport);\n+\tdev->data->dev_private = NULL;\n+\n+\treturn 0;\n+}\n+\n+static int\n+insert_value(struct idpf_adapter *adapter, uint16_t id)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < adapter->req_vport_nb; i++) {\n+\t\tif (adapter->req_vports[i] == id)\n+\t\t\treturn 0;\n+\t}\n+\n+\tif (adapter->req_vport_nb >= RTE_DIM(adapter->req_vports)) {\n+\t\tPMD_INIT_LOG(ERR, \"Total vport number can't be > %d\",\n+\t\t\t     IDPF_MAX_VPORT_NUM);\n+\t\treturn -1;\n+\t}\n+\n+\tadapter->req_vports[adapter->req_vport_nb] = id;\n+\tadapter->req_vport_nb++;\n+\n+\treturn 0;\n+}\n+\n+static const char *\n+parse_range(const char *value, struct idpf_adapter *adapter)\n+{\n+\tuint16_t lo, hi, i;\n+\tint n = 0;\n+\tint result;\n+\tconst char *pos = value;\n+\n+\tresult = sscanf(value, \"%hu%n-%hu%n\", &lo, &n, &hi, &n);\n+\tif (result == 1) {\n+\t\tif (lo >= IDPF_MAX_VPORT_NUM)\n+\t\t\treturn NULL;\n+\t\tif (insert_value(adapter, lo))\n+\t\t\treturn NULL;\n+\t} else if (result == 2) {\n+\t\tif (lo > hi || hi >= IDPF_MAX_VPORT_NUM)\n+\t\t\treturn NULL;\n+\t\tfor (i = lo; i <= hi; i++) {\n+\t\t\tif (insert_value(adapter, i))\n+\t\t\t\treturn NULL;\n+\t\t}\n+\t} else {\n+\t\treturn NULL;\n+\t}\n+\n+\treturn pos + n;\n+}\n+\n+static int\n+parse_vport(const char *key, const char *value, void *args)\n+{\n+\tstruct idpf_adapter *adapter = (struct idpf_adapter *)args;\n+\tconst char *pos = value;\n+\tint i;\n+\n+\tadapter->req_vport_nb = 0;\n+\n+\tif (*pos == '[')\n+\t\tpos++;\n+\n+\twhile (1) {\n+\t\tpos = parse_range(pos, adapter);\n+\t\tif (pos == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"invalid value:\\\"%s\\\" for key:\\\"%s\\\", \",\n+\t\t\t\t     value, key);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (*pos != ',')\n+\t\t\tbreak;\n+\t\tpos++;\n+\t}\n+\n+\tif (*value == '[' && *pos != ']') {\n+\t\tPMD_INIT_LOG(ERR, \"invalid value:\\\"%s\\\" for key:\\\"%s\\\", \",\n+\t\t\t     value, key);\n+\t\treturn -1;\n+\t}\n+\n+\tif (adapter->cur_vport_nb + adapter->req_vport_nb >\n+\t    IDPF_MAX_VPORT_NUM) {\n+\t\tPMD_INIT_LOG(ERR, \"Total vport number can't be > %d\",\n+\t\t\t     IDPF_MAX_VPORT_NUM);\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < adapter->req_vport_nb; i++) {\n+\t\tif (!(adapter->cur_vports & BIT(adapter->req_vports[i]))) {\n+\t\t\tadapter->cur_vports |= BIT(adapter->req_vports[i]);\n+\t\t\tadapter->cur_vport_nb++;\n+\t\t} else {\n+\t\t\tPMD_INIT_LOG(ERR, \"Vport %d has been created\",\n+\t\t\t\t     adapter->req_vports[i]);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n+{\n+\tstruct rte_devargs *devargs = pci_dev->device.devargs;\n+\tstruct rte_kvargs *kvlist;\n+\tint ret;\n+\n+\tif (!devargs)\n+\t\treturn 0;\n+\n+\tkvlist = rte_kvargs_parse(devargs->args, idpf_valid_args);\n+\tif (!kvlist) {\n+\t\tPMD_INIT_LOG(ERR, \"invalid kvargs key\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,\n+\t\t\t\t adapter);\n+\tif (ret)\n+\t\tgoto bail;\n+\n+bail:\n+\trte_kvargs_free(kvlist);\n+\treturn ret;\n+}\n+\n+static void\n+idpf_reset_pf(struct idpf_hw *hw)\n+{\n+\tuint32_t reg;\n+\n+\treg = IDPF_READ_REG(hw, PFGEN_CTRL);\n+\tIDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));\n+}\n+\n+#define IDPF_RESET_WAIT_CNT 100\n+static int\n+idpf_check_pf_reset_done(struct idpf_hw *hw)\n+{\n+\tuint32_t reg;\n+\tint i;\n+\n+\tfor (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {\n+\t\treg = IDPF_READ_REG(hw, PFGEN_RSTAT);\n+\t\tif (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))\n+\t\t\treturn 0;\n+\t\trte_delay_ms(1000);\n+\t}\n+\n+\tPMD_INIT_LOG(ERR, \"IDPF reset timeout\");\n+\treturn -EBUSY;\n+}\n+\n+#define CTLQ_NUM 2\n+static int\n+idpf_init_mbx(struct idpf_hw *hw)\n+{\n+\tstruct idpf_ctlq_create_info ctlq_info[CTLQ_NUM] = {\n+\t\t{\n+\t\t\t.type = IDPF_CTLQ_TYPE_MAILBOX_TX,\n+\t\t\t.id = IDPF_CTLQ_ID,\n+\t\t\t.len = IDPF_CTLQ_LEN,\n+\t\t\t.buf_size = IDPF_DFLT_MBX_BUF_SIZE,\n+\t\t\t.reg = {\n+\t\t\t\t.head = PF_FW_ATQH,\n+\t\t\t\t.tail = PF_FW_ATQT,\n+\t\t\t\t.len = PF_FW_ATQLEN,\n+\t\t\t\t.bah = PF_FW_ATQBAH,\n+\t\t\t\t.bal = PF_FW_ATQBAL,\n+\t\t\t\t.len_mask = PF_FW_ATQLEN_ATQLEN_M,\n+\t\t\t\t.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,\n+\t\t\t\t.head_mask = PF_FW_ATQH_ATQH_M,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type = IDPF_CTLQ_TYPE_MAILBOX_RX,\n+\t\t\t.id = IDPF_CTLQ_ID,\n+\t\t\t.len = IDPF_CTLQ_LEN,\n+\t\t\t.buf_size = IDPF_DFLT_MBX_BUF_SIZE,\n+\t\t\t.reg = {\n+\t\t\t\t.head = PF_FW_ARQH,\n+\t\t\t\t.tail = PF_FW_ARQT,\n+\t\t\t\t.len = PF_FW_ARQLEN,\n+\t\t\t\t.bah = PF_FW_ARQBAH,\n+\t\t\t\t.bal = PF_FW_ARQBAL,\n+\t\t\t\t.len_mask = PF_FW_ARQLEN_ARQLEN_M,\n+\t\t\t\t.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,\n+\t\t\t\t.head_mask = PF_FW_ARQH_ARQH_M,\n+\t\t\t}\n+\t\t}\n+\t};\n+\tstruct idpf_ctlq_info *ctlq;\n+\tint ret;\n+\n+\tret = idpf_ctlq_init(hw, CTLQ_NUM, ctlq_info);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tLIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,\n+\t\t\t\t struct idpf_ctlq_info, cq_list) {\n+\t\tif (ctlq->q_id == IDPF_CTLQ_ID &&\n+\t\t    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)\n+\t\t\thw->asq = ctlq;\n+\t\tif (ctlq->q_id == IDPF_CTLQ_ID &&\n+\t\t    ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)\n+\t\t\thw->arq = ctlq;\n+\t}\n+\n+\tif (!hw->asq || !hw->arq) {\n+\t\tidpf_ctlq_deinit(hw);\n+\t\tret = -ENOENT;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n+{\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tint ret = 0;\n+\n+\thw->hw_addr = (void *)pci_dev->mem_resource[0].addr;\n+\thw->hw_addr_len = pci_dev->mem_resource[0].len;\n+\thw->back = adapter;\n+\thw->vendor_id = pci_dev->id.vendor_id;\n+\thw->device_id = pci_dev->id.device_id;\n+\thw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;\n+\n+\tstrncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE);\n+\n+\tidpf_reset_pf(hw);\n+\tret = idpf_check_pf_reset_done(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"IDPF is still resetting\");\n+\t\tgoto err;\n+\t}\n+\n+\tret = idpf_init_mbx(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init mailbox\");\n+\t\tgoto err;\n+\t}\n+\n+\tadapter->mbx_resp = rte_zmalloc(\"idpf_adapter_mbx_resp\",\n+\t\t\t\t\tIDPF_DFLT_MBX_BUF_SIZE, 0);\n+\tif (!adapter->mbx_resp) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate idpf_adapter_mbx_resp memory\");\n+\t\tgoto err_mbx;\n+\t}\n+\n+\tif (idpf_vc_check_api_version(adapter)) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to check api version\");\n+\t\tgoto err_api;\n+\t}\n+\n+\tadapter->caps = rte_zmalloc(\"idpf_caps\",\n+\t\t\t\tsizeof(struct virtchnl2_get_capabilities), 0);\n+\tif (!adapter->caps) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate idpf_caps memory\");\n+\t\tgoto err_api;\n+\t}\n+\n+\tif (idpf_vc_get_caps(adapter)) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to get capabilities\");\n+\t\tgoto err_caps;\n+\t}\n+\n+\tadapter->max_vport_nb = adapter->caps->max_vports;\n+\n+\tadapter->vport_req_info = rte_zmalloc(\"vport_req_info\",\n+\t\t\t\t\t      adapter->max_vport_nb *\n+\t\t\t\t\t      sizeof(*adapter->vport_req_info),\n+\t\t\t\t\t      0);\n+\tif (!adapter->vport_req_info) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_req_info memory\");\n+\t\tgoto err_caps;\n+\t}\n+\n+\tadapter->vport_recv_info = rte_zmalloc(\"vport_recv_info\",\n+\t\t\t\t\t       adapter->max_vport_nb *\n+\t\t\t\t\t       sizeof(*adapter->vport_recv_info),\n+\t\t\t\t\t       0);\n+\tif (!adapter->vport_recv_info) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vport_recv_info memory\");\n+\t\tgoto err_vport_recv_info;\n+\t}\n+\n+\tadapter->vports = rte_zmalloc(\"vports\",\n+\t\t\t\t      adapter->max_vport_nb *\n+\t\t\t\t      sizeof(*adapter->vports),\n+\t\t\t\t      0);\n+\tif (!adapter->vports) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate vports memory\");\n+\t\tgoto err_vports;\n+\t}\n+\n+\tadapter->max_rxq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -\n+\t\t\t\tsizeof(struct virtchnl2_config_rx_queues)) /\n+\t\t\t\tsizeof(struct virtchnl2_rxq_info);\n+\tadapter->max_txq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -\n+\t\t\t\tsizeof(struct virtchnl2_config_tx_queues)) /\n+\t\t\t\tsizeof(struct virtchnl2_txq_info);\n+\n+\tadapter->cur_vports = 0;\n+\tadapter->cur_vport_nb = 0;\n+\n+\treturn ret;\n+\n+err_vports:\n+\trte_free(adapter->vport_recv_info);\n+\tadapter->vport_recv_info = NULL;\n+err_vport_recv_info:\n+\trte_free(adapter->vport_req_info);\n+\tadapter->vport_req_info = NULL;\n+err_caps:\n+\trte_free(adapter->caps);\n+\tadapter->caps = NULL;\n+err_api:\n+\trte_free(adapter->mbx_resp);\n+\tadapter->mbx_resp = NULL;\n+err_mbx:\n+\tidpf_ctlq_deinit(hw);\n+err:\n+\treturn -1;\n+}\n+\n+static uint16_t\n+idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)\n+{\n+\tuint16_t vport_idx;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < max_vport_nb; i++) {\n+\t\tif (!vports[i])\n+\t\t\tbreak;\n+\t}\n+\n+\tif (i == max_vport_nb)\n+\t\tvport_idx = IDPF_INVALID_VPORT_IDX;\n+\telse\n+\t\tvport_idx = i;\n+\n+\treturn vport_idx;\n+}\n+\n+static int\n+idpf_dev_init(struct rte_eth_dev *dev, void *init_params)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = init_params;\n+\tint ret = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tdev->dev_ops = &idpf_eth_dev_ops;\n+\tvport->adapter = adapter;\n+\n+\t/* for secondary processes, we don't initialise any further as primary\n+\t * has already done this work.\n+\t */\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn ret;\n+\n+\tdev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;\n+\n+\tret = idpf_init_vport_req_info(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init vport req_info.\");\n+\t\tgoto err;\n+\t}\n+\n+\tret = idpf_vc_create_vport(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to create vport.\");\n+\t\tgoto err_create_vport;\n+\t}\n+\n+\tret = idpf_init_vport(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init vports.\");\n+\t\tgoto err_init_vport;\n+\t}\n+\n+\tadapter->cur_vport_idx = idpf_get_vport_idx(adapter->vports,\n+\t\t\t\t\t\t    adapter->max_vport_nb);\n+\n+\tdev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);\n+\tif (dev->data->mac_addrs == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Cannot allocate mac_addr memory.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_init_vport;\n+\t}\n+\n+\trte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,\n+\t\t\t    &dev->data->mac_addrs[0]);\n+\n+\treturn 0;\n+\n+err_init_vport:\n+\tidpf_vc_destroy_vport(vport);\n+err_create_vport:\n+\trte_free(vport->adapter->vport_req_info[vport->adapter->cur_vport_idx]);\n+err:\n+\treturn ret;\n+}\n+\n+static const struct rte_pci_id pci_id_idpf_map[] = {\n+\t{ RTE_PCI_DEVICE(IDPF_INTEL_VENDOR_ID, IDPF_DEV_ID_PF) },\n+\t{ .vendor_id = 0, /* sentinel */ },\n+};\n+\n+struct idpf_adapter *\n+idpf_find_adapter(struct rte_pci_device *pci_dev)\n+{\n+\tstruct idpf_adapter *adapter;\n+\n+\tTAILQ_FOREACH(adapter, &adapter_list, next) {\n+\t\tif (!strncmp(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE))\n+\t\t\treturn adapter;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static int\n+idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t       struct rte_pci_device *pci_dev)\n+{\n+\tstruct idpf_adapter *adapter;\n+\tchar name[RTE_ETH_NAME_MAX_LEN];\n+\tint i, retval;\n+\tbool first_probe = FALSE;\n+\n+\tif (!adapter_list_init) {\n+\t\tTAILQ_INIT(&adapter_list);\n+\t\tadapter_list_init = true;\n+\t}\n+\n+\tadapter = idpf_find_adapter(pci_dev);\n+\tif (!adapter) {\n+\t\tfirst_probe = TRUE;\n+\t\tadapter = (struct idpf_adapter *)rte_zmalloc(\"idpf_adapter\",\n+\t\t\t\t\t\tsizeof(struct idpf_adapter), 0);\n+\t\tif (!adapter) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate adapter.\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tretval = idpf_adapter_init(pci_dev, adapter);\n+\t\tif (retval) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to init adapter.\");\n+\t\t\treturn retval;\n+\t\t}\n+\n+\t\tTAILQ_INSERT_TAIL(&adapter_list, adapter, next);\n+\t}\n+\n+\tretval = idpf_parse_devargs(pci_dev, adapter);\n+\tif (retval) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to parse private devargs\");\n+\t\tgoto err;\n+\t}\n+\n+\tfor (i = 0; i < adapter->req_vport_nb; i++) {\n+\t\tsnprintf(name, sizeof(name), \"idpf_%s_vport_%d\",\n+\t\t\t pci_dev->device.name,\n+\t\t\t adapter->req_vports[i]);\n+\t\tretval = rte_eth_dev_create(&pci_dev->device, name,\n+\t\t\t\t\t    sizeof(struct idpf_vport),\n+\t\t\t\t\t    NULL, NULL, idpf_dev_init,\n+\t\t\t\t\t    adapter);\n+\t\tif (retval)\n+\t\t\tPMD_DRV_LOG(ERR, \"failed to create vport %d\",\n+\t\t\t\t    adapter->req_vports[i]);\n+\t}\n+\n+\treturn 0;\n+\n+err:\n+\tif (first_probe) {\n+\t\tTAILQ_REMOVE(&adapter_list, adapter, next);\n+\t\tidpf_adapter_rel(adapter);\n+\t\trte_free(adapter);\n+\t}\n+\treturn retval;\n+}\n+\n+static void\n+idpf_adapter_rel(struct idpf_adapter *adapter)\n+{\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tint i;\n+\n+\tidpf_ctlq_deinit(hw);\n+\n+\trte_free(adapter->caps);\n+\tadapter->caps = NULL;\n+\n+\trte_free(adapter->mbx_resp);\n+\tadapter->mbx_resp = NULL;\n+\n+\tif (adapter->vport_req_info) {\n+\t\tfor (i = 0; i < adapter->max_vport_nb; i++) {\n+\t\t\trte_free(adapter->vport_req_info[i]);\n+\t\t\tadapter->vport_req_info[i] = NULL;\n+\t\t}\n+\t\trte_free(adapter->vport_req_info);\n+\t\tadapter->vport_req_info = NULL;\n+\t}\n+\n+\tif (adapter->vport_recv_info) {\n+\t\tfor (i = 0; i < adapter->max_vport_nb; i++) {\n+\t\t\trte_free(adapter->vport_recv_info[i]);\n+\t\t\tadapter->vport_recv_info[i] = NULL;\n+\t\t}\n+\t\trte_free(adapter->vport_recv_info);\n+\t\tadapter->vport_recv_info = NULL;\n+\t}\n+\n+\trte_free(adapter->vports);\n+\tadapter->vports = NULL;\n+}\n+\n+static int\n+idpf_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct idpf_adapter *adapter = idpf_find_adapter(pci_dev);\n+\tuint16_t port_id;\n+\n+\t/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */\n+\tRTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {\n+\t\t\trte_eth_dev_close(port_id);\n+\t}\n+\n+\tTAILQ_REMOVE(&adapter_list, adapter, next);\n+\tidpf_adapter_rel(adapter);\n+\trte_free(adapter);\n+\n+\treturn 0;\n+}\n+\n+static struct rte_pci_driver rte_idpf_pmd = {\n+\t.id_table\t= pci_id_idpf_map,\n+\t.drv_flags\t= RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_PROBE_AGAIN,\n+\t.probe\t\t= idpf_pci_probe,\n+\t.remove\t\t= idpf_pci_remove,\n+};\n+\n+/**\n+ * Driver initialization routine.\n+ * Invoked once at EAL init time.\n+ * Register itself as the [Poll Mode] Driver of PCI devices.\n+ */\n+RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd);\n+RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map);\n+RTE_PMD_REGISTER_KMOD_DEP(net_ice, \"* igb_uio | uio_pci_generic | vfio-pci\");\n+\n+RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE);\n+RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE);\ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nnew file mode 100644\nindex 0000000000..c2d72fae11\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -0,0 +1,206 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_ETHDEV_H_\n+#define _IDPF_ETHDEV_H_\n+\n+#include <stdint.h>\n+#include <rte_mbuf.h>\n+#include <rte_mempool.h>\n+#include <rte_malloc.h>\n+#include <rte_spinlock.h>\n+#include <rte_ethdev.h>\n+#include <rte_kvargs.h>\n+#include <ethdev_driver.h>\n+#include <ethdev_pci.h>\n+\n+#include \"idpf_logs.h\"\n+\n+#include \"idpf_osdep.h\"\n+#include \"idpf_type.h\"\n+#include \"idpf_devids.h\"\n+#include \"idpf_lan_txrx.h\"\n+#include \"idpf_lan_pf_regs.h\"\n+#include \"virtchnl.h\"\n+#include \"virtchnl2.h\"\n+\n+#define IDPF_MAX_VPORT_NUM\t8\n+\n+#define IDPF_DEFAULT_RXQ_NUM\t16\n+#define IDPF_DEFAULT_TXQ_NUM\t16\n+\n+#define IDPF_INVALID_VPORT_IDX\t0xffff\n+#define IDPF_TXQ_PER_GRP\t1\n+#define IDPF_TX_COMPLQ_PER_GRP\t1\n+#define IDPF_RXQ_PER_GRP\t1\n+#define IDPF_RX_BUFQ_PER_GRP\t2\n+\n+#define IDPF_CTLQ_ID\t\t-1\n+#define IDPF_CTLQ_LEN\t\t64\n+#define IDPF_DFLT_MBX_BUF_SIZE\t4096\n+\n+#define IDPF_DFLT_Q_VEC_NUM\t1\n+#define IDPF_DFLT_INTERVAL\t16\n+\n+#define IDPF_MAX_NUM_QUEUES\t256\n+#define IDPF_MIN_BUF_SIZE\t1024\n+#define IDPF_MAX_FRAME_SIZE\t9728\n+#define IDPF_MIN_FRAME_SIZE\t14\n+\n+#define IDPF_NUM_MACADDR_MAX\t64\n+\n+#define IDPF_MAX_PKT_TYPE\t1024\n+\n+#define IDPF_VLAN_TAG_SIZE\t4\n+#define IDPF_ETH_OVERHEAD \\\n+\t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)\n+\n+#ifndef ETH_ADDR_LEN\n+#define ETH_ADDR_LEN\t\t6\n+#endif\n+\n+#define IDPF_ADAPTER_NAME_LEN\t(PCI_PRI_STR_SIZE + 1)\n+\n+/* Message type read in virtual channel from PF */\n+enum idpf_vc_result {\n+\tIDPF_MSG_ERR = -1, /* Meet error when accessing admin queue */\n+\tIDPF_MSG_NON,      /* Read nothing from admin queue */\n+\tIDPF_MSG_SYS,      /* Read system msg from admin queue */\n+\tIDPF_MSG_CMD,      /* Read async command result */\n+};\n+\n+struct idpf_chunks_info {\n+\tuint32_t tx_start_qid;\n+\tuint32_t rx_start_qid;\n+\n+\tuint64_t tx_qtail_start;\n+\tuint32_t tx_qtail_spacing;\n+\tuint64_t rx_qtail_start;\n+\tuint32_t rx_qtail_spacing;\n+};\n+\n+struct idpf_vport {\n+\tstruct idpf_adapter *adapter; /* Backreference to associated adapter */\n+\tuint16_t vport_id;\n+\tuint16_t num_tx_q;\n+\tuint16_t num_rx_q;\n+\n+\tuint16_t max_mtu;\n+\tuint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\n+\n+\tuint16_t sw_idx; /* SW idx */\n+\n+\tstruct rte_eth_dev_data *dev_data; /* Pointer to the device data */\n+\tuint16_t max_pkt_len; /* Maximum packet length */\n+\n+\t/* Chunk info */\n+\tstruct idpf_chunks_info chunks_info;\n+\n+\t/* Event from ipf */\n+\tbool link_up;\n+\tuint32_t link_speed;\n+\n+\tuint16_t devarg_id;\n+\tbool stopped;\n+\tstruct virtchnl2_vport_stats eth_stats_offset;\n+};\n+\n+struct idpf_adapter {\n+\tTAILQ_ENTRY(idpf_adapter) next;\n+\tstruct idpf_hw hw;\n+\tchar name[IDPF_ADAPTER_NAME_LEN];\n+\n+\tstruct virtchnl_version_info virtchnl_version;\n+\tstruct virtchnl2_get_capabilities *caps;\n+\n+\tvolatile enum virtchnl_ops pend_cmd; /* pending command not finished */\n+\tuint32_t cmd_retval; /* return value of the cmd response from ipf */\n+\tuint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */\n+\n+\t/* Vport info */\n+\tuint8_t **vport_req_info;\n+\tuint8_t **vport_recv_info;\n+\tstruct idpf_vport **vports;\n+\tuint16_t max_vport_nb;\n+\tuint16_t req_vports[IDPF_MAX_VPORT_NUM];\n+\tuint16_t req_vport_nb;\n+\tuint16_t cur_vports;\n+\tuint16_t cur_vport_nb;\n+\tuint16_t cur_vport_idx;\n+\n+\tuint16_t used_vecs_num;\n+\n+\t/* Max config queue number per VC message */\n+\tuint32_t max_rxq_per_msg;\n+\tuint32_t max_txq_per_msg;\n+\n+\tuint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;\n+\n+\tbool stopped;\n+};\n+\n+TAILQ_HEAD(idpf_adapter_list, idpf_adapter);\n+extern struct idpf_adapter_list adapter_list;\n+\n+#define IDPF_DEV_TO_PCI(eth_dev)\t\t\\\n+\tRTE_DEV_TO_PCI((eth_dev)->device)\n+\n+/* structure used for sending and checking response of virtchnl ops */\n+struct idpf_cmd_info {\n+\tuint32_t ops;\n+\tuint8_t *in_args;       /* buffer for sending */\n+\tuint32_t in_args_size;  /* buffer size for sending */\n+\tuint8_t *out_buffer;    /* buffer for response */\n+\tuint32_t out_size;      /* buffer size for response */\n+};\n+\n+/* notify current command done. Only call in case execute\n+ * _atomic_set_cmd successfully.\n+ */\n+static inline void\n+_notify_cmd(struct idpf_adapter *adapter, int msg_ret)\n+{\n+\tadapter->cmd_retval = msg_ret;\n+\trte_wmb();\n+\tadapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;\n+}\n+\n+/* clear current command. Only call in case execute\n+ * _atomic_set_cmd successfully.\n+ */\n+static inline void\n+_clear_cmd(struct idpf_adapter *adapter)\n+{\n+\trte_wmb();\n+\tadapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;\n+\tadapter->cmd_retval = VIRTCHNL_STATUS_SUCCESS;\n+}\n+\n+/* Check there is pending cmd in execution. If none, set new command. */\n+static inline int\n+_atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)\n+{\n+\tenum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;\n+\tint ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,\n+\t\t\t\t\t    0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);\n+\n+\tif (!ret)\n+\t\tPMD_DRV_LOG(ERR, \"There is incomplete cmd %d\", adapter->pend_cmd);\n+\n+\treturn !ret;\n+}\n+\n+struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);\n+int idpf_dev_link_update(struct rte_eth_dev *dev,\n+\t\t\t __rte_unused int wait_to_complete);\n+void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);\n+int idpf_vc_check_api_version(struct idpf_adapter *adapter);\n+int idpf_vc_get_caps(struct idpf_adapter *adapter);\n+int idpf_vc_create_vport(struct rte_eth_dev *dev);\n+int idpf_vc_destroy_vport(struct idpf_vport *vport);\n+int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);\n+int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,\n+\t\t      uint16_t buf_len, uint8_t *buf);\n+\n+#endif /* _IDPF_ETHDEV_H_ */\ndiff --git a/drivers/net/idpf/idpf_logs.h b/drivers/net/idpf/idpf_logs.h\nnew file mode 100644\nindex 0000000000..a64544f86e\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_logs.h\n@@ -0,0 +1,42 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_LOGS_H_\n+#define _IDPF_LOGS_H_\n+\n+#include <rte_log.h>\n+\n+extern int idpf_logtype_init;\n+extern int idpf_logtype_driver;\n+\n+#define PMD_INIT_LOG(level, fmt, args...) \\\n+\trte_log(RTE_LOG_ ## level, idpf_logtype_init, \\\n+\t\t\"%s(): \" fmt \"\\n\", __func__, ##args)\n+\n+#define PMD_INIT_FUNC_TRACE() PMD_DRV_LOG(DEBUG, \" >>\")\n+\n+#define PMD_DRV_LOG_RAW(level, fmt, args...) \\\n+\trte_log(RTE_LOG_ ## level, idpf_logtype_driver, \\\n+\t\t\"%s(): \" fmt \"\\n\", __func__, ##args)\n+\n+#define PMD_DRV_LOG(level, fmt, args...) \\\n+\tPMD_DRV_LOG_RAW(level, fmt \"\\n\", ## args)\n+\n+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, \" >>\")\n+\n+#ifdef RTE_LIBRTE_IDPF_DEBUG_RX\n+#define PMD_RX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_IDPF_DEBUG_TX\n+#define PMD_TX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#endif /* _IDPF_LOGS_H_ */\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nnew file mode 100644\nindex 0000000000..ef0288ff45\n--- /dev/null\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -0,0 +1,495 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <stdarg.h>\n+#include <inttypes.h>\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+\n+#include <rte_debug.h>\n+#include <rte_atomic.h>\n+#include <rte_eal.h>\n+#include <rte_ether.h>\n+#include <ethdev_driver.h>\n+#include <ethdev_pci.h>\n+#include <rte_dev.h>\n+\n+#include \"idpf_ethdev.h\"\n+\n+#include \"idpf_prototype.h\"\n+\n+#define IDPF_CTLQ_LEN\t64\n+\n+static int\n+idpf_vc_clean(struct idpf_adapter *adapter)\n+{\n+\tstruct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN];\n+\tuint16_t num_q_msg = IDPF_CTLQ_LEN;\n+\tstruct idpf_dma_mem *dma_mem;\n+\tint err = 0;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < 10; i++) {\n+\t\terr = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);\n+\t\tmsleep(20);\n+\t\tif (num_q_msg)\n+\t\t\tbreak;\n+\t}\n+\tif (err)\n+\t\tgoto error;\n+\n+\t/* Empty queue is not an error */\n+\tfor (i = 0; i < num_q_msg; i++) {\n+\t\tdma_mem = q_msg[i]->ctx.indirect.payload;\n+\t\tif (dma_mem) {\n+\t\t\tidpf_free_dma_mem(&adapter->hw, dma_mem);\n+\t\t\trte_free(dma_mem);\n+\t\t}\n+\t\trte_free(q_msg[i]);\n+\t}\n+\n+error:\n+\treturn err;\n+}\n+\n+static int\n+idpf_send_vc_msg(struct idpf_adapter *adapter, enum virtchnl_ops op,\n+\t\t uint16_t msg_size, uint8_t *msg)\n+{\n+\tstruct idpf_ctlq_msg *ctlq_msg;\n+\tstruct idpf_dma_mem *dma_mem;\n+\tint err = 0;\n+\n+\terr = idpf_vc_clean(adapter);\n+\tif (err)\n+\t\tgoto err;\n+\n+\tctlq_msg = (struct idpf_ctlq_msg *)rte_zmalloc(NULL,\n+\t\t\t\tsizeof(struct idpf_ctlq_msg), 0);\n+\tif (!ctlq_msg) {\n+\t\terr = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\tdma_mem = (struct idpf_dma_mem *)rte_zmalloc(NULL,\n+\t\t\t\tsizeof(struct idpf_dma_mem), 0);\n+\tif (!dma_mem) {\n+\t\terr = -ENOMEM;\n+\t\tgoto dma_mem_error;\n+\t}\n+\n+\tdma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;\n+\tidpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);\n+\tif (!dma_mem->va) {\n+\t\terr = -ENOMEM;\n+\t\tgoto dma_alloc_error;\n+\t}\n+\n+\tmemcpy(dma_mem->va, msg, msg_size);\n+\n+\tctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;\n+\tctlq_msg->func_id = 0;\n+\tctlq_msg->data_len = msg_size;\n+\tctlq_msg->cookie.mbx.chnl_opcode = op;\n+\tctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;\n+\tctlq_msg->ctx.indirect.payload = dma_mem;\n+\n+\terr = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);\n+\tif (err)\n+\t\tgoto send_error;\n+\n+\treturn err;\n+\n+send_error:\n+\tidpf_free_dma_mem(&adapter->hw, dma_mem);\n+dma_alloc_error:\n+\trte_free(dma_mem);\n+dma_mem_error:\n+\trte_free(ctlq_msg);\n+err:\n+\treturn err;\n+}\n+\n+static enum idpf_vc_result\n+idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,\n+\t\t      uint8_t *buf)\n+{\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tstruct idpf_ctlq_msg ctlq_msg;\n+\tstruct idpf_dma_mem *dma_mem = NULL;\n+\tenum idpf_vc_result result = IDPF_MSG_NON;\n+\tenum virtchnl_ops opcode;\n+\tuint16_t pending = 1;\n+\tint ret;\n+\n+\tret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Can't read msg from AQ\");\n+\t\tif (ret != IDPF_ERR_CTLQ_NO_WORK)\n+\t\t\tresult = IDPF_MSG_ERR;\n+\t\treturn result;\n+\t}\n+\n+\trte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);\n+\n+\topcode = (enum virtchnl_ops)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);\n+\tadapter->cmd_retval =\n+\t\t(enum virtchnl_status_code)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);\n+\n+\tPMD_DRV_LOG(DEBUG, \"CQ from CP carries opcode %u, retval %d\",\n+\t\t    opcode, adapter->cmd_retval);\n+\n+\tif (opcode == VIRTCHNL2_OP_EVENT) {\n+\t\tstruct virtchnl2_event *ve =\n+\t\t\t(struct virtchnl2_event *)ctlq_msg.ctx.indirect.payload->va;\n+\n+\t\tresult = IDPF_MSG_SYS;\n+\t\tswitch (ve->event) {\n+\t\tcase VIRTCHNL2_EVENT_LINK_CHANGE:\n+\t\t\t/* TBD */\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tPMD_DRV_LOG(ERR, \"%s: Unknown event %d from CP\",\n+\t\t\t\t    __func__, ve->event);\n+\t\t\tbreak;\n+\t\t}\n+\t} else {\n+\t\t/* async reply msg on command issued by pf previously */\n+\t\tresult = IDPF_MSG_CMD;\n+\t\tif (opcode != adapter->pend_cmd) {\n+\t\t\tPMD_DRV_LOG(WARNING, \"command mismatch, expect %u, get %u\",\n+\t\t\t\t    adapter->pend_cmd, opcode);\n+\t\t\tresult = IDPF_MSG_ERR;\n+\t\t}\n+\t}\n+\n+\tif (ctlq_msg.data_len)\n+\t\tdma_mem = ctlq_msg.ctx.indirect.payload;\n+\telse\n+\t\tpending = 0;\n+\n+\tret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);\n+\tif (ret && dma_mem)\n+\t\tidpf_free_dma_mem(hw, dma_mem);\n+\n+\treturn result;\n+}\n+\n+#define MAX_TRY_TIMES 200\n+#define ASQ_DELAY_MS  10\n+\n+int\n+idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,\n+\t\t  uint8_t *buf)\n+{\n+\tint ret, err = 0, i = 0;\n+\n+\tdo {\n+\t\tret = idpf_read_msg_from_cp(adapter, buf_len, buf);\n+\t\tif (ret == IDPF_MSG_CMD)\n+\t\t\tbreak;\n+\t\trte_delay_ms(ASQ_DELAY_MS);\n+\t} while (i++ < MAX_TRY_TIMES);\n+\tif (i >= MAX_TRY_TIMES ||\n+\t    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {\n+\t\terr = -1;\n+\t\tPMD_DRV_LOG(ERR, \"No response or return failure (%d) for cmd %d\",\n+\t\t\t    adapter->cmd_retval, ops);\n+\t}\n+\n+\treturn err;\n+}\n+\n+static int\n+idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)\n+{\n+\tint err = 0;\n+\tint i = 0;\n+\tint ret;\n+\n+\tif (_atomic_set_cmd(adapter, args->ops))\n+\t\treturn -1;\n+\n+\tret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args->in_args);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"fail to send cmd %d\", args->ops);\n+\t\t_clear_cmd(adapter);\n+\t\treturn ret;\n+\t}\n+\n+\tswitch (args->ops) {\n+\tcase VIRTCHNL_OP_VERSION:\n+\tcase VIRTCHNL2_OP_GET_CAPS:\n+\tcase VIRTCHNL2_OP_CREATE_VPORT:\n+\tcase VIRTCHNL2_OP_DESTROY_VPORT:\n+\tcase VIRTCHNL2_OP_SET_RSS_KEY:\n+\tcase VIRTCHNL2_OP_SET_RSS_LUT:\n+\tcase VIRTCHNL2_OP_SET_RSS_HASH:\n+\tcase VIRTCHNL2_OP_CONFIG_RX_QUEUES:\n+\tcase VIRTCHNL2_OP_CONFIG_TX_QUEUES:\n+\tcase VIRTCHNL2_OP_ENABLE_QUEUES:\n+\tcase VIRTCHNL2_OP_DISABLE_QUEUES:\n+\tcase VIRTCHNL2_OP_ENABLE_VPORT:\n+\tcase VIRTCHNL2_OP_DISABLE_VPORT:\n+\tcase VIRTCHNL2_OP_MAP_QUEUE_VECTOR:\n+\tcase VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:\n+\tcase VIRTCHNL2_OP_ALLOC_VECTORS:\n+\tcase VIRTCHNL2_OP_DEALLOC_VECTORS:\n+\tcase VIRTCHNL2_OP_GET_STATS:\n+\t\t/* for init virtchnl ops, need to poll the response */\n+\t\terr = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);\n+\t\t_clear_cmd(adapter);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_PTYPE_INFO:\n+\t\t/* for multuple response message,\n+\t\t * do not handle the response here.\n+\t\t */\n+\t\tbreak;\n+\tdefault:\n+\t\t/* For other virtchnl ops in running time,\n+\t\t * wait for the cmd done flag.\n+\t\t */\n+\t\tdo {\n+\t\t\tif (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)\n+\t\t\t\tbreak;\n+\t\t\trte_delay_ms(ASQ_DELAY_MS);\n+\t\t\t/* If don't read msg or read sys event, continue */\n+\t\t} while (i++ < MAX_TRY_TIMES);\n+\t\t/* If there's no response is received, clear command */\n+\t\tif (i >= MAX_TRY_TIMES  ||\n+\t\t    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {\n+\t\t\terr = -1;\n+\t\t\tPMD_DRV_LOG(ERR, \"No response or return failure (%d) for cmd %d\",\n+\t\t\t\t    adapter->cmd_retval, args->ops);\n+\t\t\t_clear_cmd(adapter);\n+\t\t}\n+\t\tbreak;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_check_api_version(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl_version_info version;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tmemset(&version, 0, sizeof(struct virtchnl_version_info));\n+\tversion.major = VIRTCHNL_VERSION_MAJOR_2;\n+\tversion.minor = VIRTCHNL_VERSION_MINOR_0;\n+\n+\targs.ops = VIRTCHNL_OP_VERSION;\n+\targs.in_args = (uint8_t *)&version;\n+\targs.in_args_size = sizeof(version);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL_OP_VERSION\");\n+\t\treturn err;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_get_caps(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl2_get_capabilities caps_msg;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\t memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));\n+\t caps_msg.csum_caps =\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L3_IPV4\t\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP\t|\n+\t\t VIRTCHNL2_CAP_TX_CSUM_GENERIC\t\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L3_IPV4\t\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP\t|\n+\t\t VIRTCHNL2_CAP_RX_CSUM_GENERIC;\n+\n+\t caps_msg.seg_caps =\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_IPV6_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_SEG_GENERIC;\n+\n+\t caps_msg.rss_caps =\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_OTHER\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_UDP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_OTHER\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_AH\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV4_AH_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_AH\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_ESP\t\t|\n+\t\t VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;\n+\n+\t caps_msg.hsplit_caps =\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L2\t\t|\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L3\t\t|\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4\t|\n+\t\t VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6;\n+\n+\t caps_msg.rsc_caps =\n+\t\t VIRTCHNL2_CAP_RSC_IPV4_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSC_IPV4_SCTP\t\t|\n+\t\t VIRTCHNL2_CAP_RSC_IPV6_TCP\t\t|\n+\t\t VIRTCHNL2_CAP_RSC_IPV6_SCTP;\n+\n+\t caps_msg.other_caps =\n+\t\t VIRTCHNL2_CAP_RDMA\t\t\t|\n+\t\t VIRTCHNL2_CAP_SRIOV\t\t\t|\n+\t\t VIRTCHNL2_CAP_MACFILTER\t\t|\n+\t\t VIRTCHNL2_CAP_FLOW_DIRECTOR\t\t|\n+\t\t VIRTCHNL2_CAP_SPLITQ_QSCHED\t\t|\n+\t\t VIRTCHNL2_CAP_CRC\t\t\t|\n+\t\t VIRTCHNL2_CAP_WB_ON_ITR\t\t|\n+\t\t VIRTCHNL2_CAP_PROMISC\t\t\t|\n+\t\t VIRTCHNL2_CAP_LINK_SPEED\t\t|\n+\t\t VIRTCHNL2_CAP_VLAN;\n+\n+\targs.ops = VIRTCHNL2_OP_GET_CAPS;\n+\targs.in_args = (uint8_t *)&caps_msg;\n+\targs.in_args_size = sizeof(caps_msg);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL2_OP_GET_CAPS\");\n+\t\treturn err;\n+\t}\n+\n+\trte_memcpy(adapter->caps, args.out_buffer, sizeof(caps_msg));\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_create_vport(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_pci_device *pci_dev = IDPF_DEV_TO_PCI(dev);\n+\tstruct idpf_adapter *adapter = idpf_find_adapter(pci_dev);\n+\tuint16_t idx = adapter->cur_vport_idx;\n+\tstruct virtchnl2_create_vport *vport_req_info =\n+\t\t(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];\n+\tstruct virtchnl2_create_vport vport_msg;\n+\tstruct idpf_cmd_info args;\n+\tint err = -1;\n+\n+\tmemset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));\n+\tvport_msg.vport_type = vport_req_info->vport_type;\n+\tvport_msg.txq_model = vport_req_info->txq_model;\n+\tvport_msg.rxq_model = vport_req_info->rxq_model;\n+\tvport_msg.num_tx_q = vport_req_info->num_tx_q;\n+\tvport_msg.num_tx_complq = vport_req_info->num_tx_complq;\n+\tvport_msg.num_rx_q = vport_req_info->num_rx_q;\n+\tvport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_CREATE_VPORT;\n+\targs.in_args = (uint8_t *)&vport_msg;\n+\targs.in_args_size = sizeof(vport_msg);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT\");\n+\t\treturn err;\n+\t}\n+\n+\tif (!adapter->vport_recv_info[idx]) {\n+\t\tadapter->vport_recv_info[idx] = rte_zmalloc(NULL,\n+\t\t\t\t\t\t    IDPF_DFLT_MBX_BUF_SIZE, 0);\n+\t\tif (!adapter->vport_recv_info[idx]) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to alloc vport_recv_info.\");\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\trte_memcpy(adapter->vport_recv_info[idx], args.out_buffer,\n+\t\t   IDPF_DFLT_MBX_BUF_SIZE);\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_destroy_vport(struct idpf_vport *vport)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_vport vc_vport;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tvc_vport.vport_id = vport->vport_id;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.ops = VIRTCHNL2_OP_DESTROY_VPORT;\n+\targs.in_args = (uint8_t *)&vc_vport;\n+\targs.in_args_size = sizeof(vc_vport);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT\");\n+\t\treturn err;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_vport vc_vport;\n+\tstruct idpf_cmd_info args;\n+\tint err;\n+\n+\tvc_vport.vport_id = vport->vport_id;\n+\targs.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :\n+\t\t\t    VIRTCHNL2_OP_DISABLE_VPORT;\n+\targs.in_args = (u8 *)&vc_vport;\n+\targs.in_args_size = sizeof(vc_vport);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_VPORT\",\n+\t\t\t    enable ? \"ENABLE\" : \"DISABLE\");\n+\t}\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build\nnew file mode 100644\nindex 0000000000..bf8bf58ef5\n--- /dev/null\n+++ b/drivers/net/idpf/meson.build\n@@ -0,0 +1,16 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2022 Intel Corporation\n+\n+if is_windows\n+    build = false\n+    reason = 'not supported on Windows'\n+    subdir_done()\n+endif\n+\n+includes += include_directories('../../common/idpf')\n+deps += ['common_idpf', 'security', 'cryptodev']\n+\n+sources = files(\n+    'idpf_ethdev.c',\n+    'idpf_vchnl.c',\n+)\ndiff --git a/drivers/net/idpf/version.map b/drivers/net/idpf/version.map\nnew file mode 100644\nindex 0000000000..c2e0723b4c\n--- /dev/null\n+++ b/drivers/net/idpf/version.map\n@@ -0,0 +1,3 @@\n+DPDK_22 {\n+\tlocal: *;\n+};\ndiff --git a/drivers/net/meson.build b/drivers/net/meson.build\nindex 35bfa78dee..4a951b95f2 100644\n--- a/drivers/net/meson.build\n+++ b/drivers/net/meson.build\n@@ -28,6 +28,7 @@ drivers = [\n         'i40e',\n         'iavf',\n         'ice',\n+        'idpf',\n         'igc',\n         'ionic',\n         'ipn3ke',\n",
    "prefixes": [
        "v9",
        "02/14"
    ]
}