get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119196/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119196,
    "url": "http://patchwork.dpdk.org/api/patches/119196/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221027054505.1369248-2-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221027054505.1369248-2-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221027054505.1369248-2-junfeng.guo@intel.com",
    "date": "2022-10-27T05:44:48",
    "name": "[v13,01/18] common/idpf: introduce common library",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5603eaa4f4458775bac4063879a670946b4f9edd",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221027054505.1369248-2-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25450,
            "url": "http://patchwork.dpdk.org/api/series/25450/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25450",
            "date": "2022-10-27T05:44:47",
            "name": "add support for idpf PMD in DPDK",
            "version": 13,
            "mbox": "http://patchwork.dpdk.org/series/25450/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/119196/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/119196/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 273B9A00C4;\n\tThu, 27 Oct 2022 07:46:56 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E678F42BA8;\n\tThu, 27 Oct 2022 07:46:55 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by mails.dpdk.org (Postfix) with ESMTP id C062642BA4\n for <dev@dpdk.org>; Thu, 27 Oct 2022 07:46:52 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 26 Oct 2022 22:46:52 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by orsmga003.jf.intel.com with ESMTP; 26 Oct 2022 22:46:48 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666849612; x=1698385612;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=X64y/eyJlmImF7TcRprxPoyaeIBUDsRUJlNFBh3dIwI=;\n b=hN0U95M5T8vEKDJ+4SRgRb4Fzf+wMDxOd715NFS395VzmMThSenTcxl9\n opaj+bidmjJ+2ktMqdoLbHAVBcVv9FoTAeGY5YnfChoeoG87drjS2Gn/v\n xy7hNPFjmQpkM5buUI+nHTeitwa3gY2yoJionjWS3tBM/o4flTbmN4Cp4\n hh2WTivGlLlyhLoQDwF2/stxedbFhJ1QDH0SXTsEnpl6iasJq6WcUqR2y\n OpagVCVtK5iGrE/S24y3ATsBE6uKl+dwFkk3q8+1lIfYVxNsKl0g9mlB9\n ldWpB8xMiLsCelot1Rm9D0ZpWlV9F4nqLgPXXvVs/sTPlFJVcGLiMyNZg Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10512\"; a=\"309831042\"",
            "E=Sophos;i=\"5.95,215,1661842800\"; d=\"scan'208\";a=\"309831042\"",
            "E=McAfee;i=\"6500,9779,10512\"; a=\"583429196\"",
            "E=Sophos;i=\"5.95,215,1661842800\"; d=\"scan'208\";a=\"583429196\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiao Wang <xiao.w.wang@intel.com>",
        "Subject": "[PATCH v13 01/18] common/idpf: introduce common library",
        "Date": "Thu, 27 Oct 2022 13:44:48 +0800",
        "Message-Id": "<20221027054505.1369248-2-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221027054505.1369248-1-junfeng.guo@intel.com>",
        "References": "<20221026101027.240583-2-junfeng.guo@intel.com>\n <20221027054505.1369248-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Introduce common library for IDPF (Infrastructure Data\nPath Function) PMD.\nAdd base code and OS specific implementation first.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiao Wang <xiao.w.wang@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/common/idpf/base/idpf_alloc.h         |   22 +\n drivers/common/idpf/base/idpf_common.c        |  364 +++\n drivers/common/idpf/base/idpf_controlq.c      |  691 ++++\n drivers/common/idpf/base/idpf_controlq.h      |  224 ++\n drivers/common/idpf/base/idpf_controlq_api.h  |  234 ++\n .../common/idpf/base/idpf_controlq_setup.c    |  179 +\n drivers/common/idpf/base/idpf_devids.h        |   18 +\n drivers/common/idpf/base/idpf_lan_pf_regs.h   |  134 +\n drivers/common/idpf/base/idpf_lan_txrx.h      |  428 +++\n drivers/common/idpf/base/idpf_lan_vf_regs.h   |  114 +\n drivers/common/idpf/base/idpf_osdep.h         |  364 +++\n drivers/common/idpf/base/idpf_prototype.h     |   45 +\n drivers/common/idpf/base/idpf_type.h          |  106 +\n drivers/common/idpf/base/meson.build          |   14 +\n drivers/common/idpf/base/siov_regs.h          |   47 +\n drivers/common/idpf/base/virtchnl.h           | 2866 +++++++++++++++++\n drivers/common/idpf/base/virtchnl2.h          | 1462 +++++++++\n drivers/common/idpf/base/virtchnl2_lan_desc.h |  606 ++++\n .../common/idpf/base/virtchnl_inline_ipsec.h  |  567 ++++\n drivers/common/idpf/meson.build               |    4 +\n drivers/common/idpf/version.map               |   12 +\n drivers/common/meson.build                    |    1 +\n 22 files changed, 8502 insertions(+)\n create mode 100644 drivers/common/idpf/base/idpf_alloc.h\n create mode 100644 drivers/common/idpf/base/idpf_common.c\n create mode 100644 drivers/common/idpf/base/idpf_controlq.c\n create mode 100644 drivers/common/idpf/base/idpf_controlq.h\n create mode 100644 drivers/common/idpf/base/idpf_controlq_api.h\n create mode 100644 drivers/common/idpf/base/idpf_controlq_setup.c\n create mode 100644 drivers/common/idpf/base/idpf_devids.h\n create mode 100644 drivers/common/idpf/base/idpf_lan_pf_regs.h\n create mode 100644 drivers/common/idpf/base/idpf_lan_txrx.h\n create mode 100644 drivers/common/idpf/base/idpf_lan_vf_regs.h\n create mode 100644 drivers/common/idpf/base/idpf_osdep.h\n create mode 100644 drivers/common/idpf/base/idpf_prototype.h\n create mode 100644 drivers/common/idpf/base/idpf_type.h\n create mode 100644 drivers/common/idpf/base/meson.build\n create mode 100644 drivers/common/idpf/base/siov_regs.h\n create mode 100644 drivers/common/idpf/base/virtchnl.h\n create mode 100644 drivers/common/idpf/base/virtchnl2.h\n create mode 100644 drivers/common/idpf/base/virtchnl2_lan_desc.h\n create mode 100644 drivers/common/idpf/base/virtchnl_inline_ipsec.h\n create mode 100644 drivers/common/idpf/meson.build\n create mode 100644 drivers/common/idpf/version.map",
    "diff": "diff --git a/drivers/common/idpf/base/idpf_alloc.h b/drivers/common/idpf/base/idpf_alloc.h\nnew file mode 100644\nindex 0000000000..bc054851b3\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_alloc.h\n@@ -0,0 +1,22 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_ALLOC_H_\n+#define _IDPF_ALLOC_H_\n+\n+/* Memory types */\n+enum idpf_memset_type {\n+\tIDPF_NONDMA_MEM = 0,\n+\tIDPF_DMA_MEM\n+};\n+\n+/* Memcpy types */\n+enum idpf_memcpy_type {\n+\tIDPF_NONDMA_TO_NONDMA = 0,\n+\tIDPF_NONDMA_TO_DMA,\n+\tIDPF_DMA_TO_DMA,\n+\tIDPF_DMA_TO_NONDMA\n+};\n+\n+#endif /* _IDPF_ALLOC_H_ */\ndiff --git a/drivers/common/idpf/base/idpf_common.c b/drivers/common/idpf/base/idpf_common.c\nnew file mode 100644\nindex 0000000000..9efb0ea37a\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_common.c\n@@ -0,0 +1,364 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#include \"idpf_type.h\"\n+#include \"idpf_prototype.h\"\n+#include \"virtchnl.h\"\n+\n+\n+/**\n+ * idpf_set_mac_type - Sets MAC type\n+ * @hw: pointer to the HW structure\n+ *\n+ * This function sets the mac type of the adapter based on the\n+ * vendor ID and device ID stored in the hw structure.\n+ */\n+int idpf_set_mac_type(struct idpf_hw *hw)\n+{\n+\tint status = IDPF_SUCCESS;\n+\n+\tDEBUGFUNC(\"Set MAC type\\n\");\n+\n+\tif (hw->vendor_id == IDPF_INTEL_VENDOR_ID) {\n+\t\tswitch (hw->device_id) {\n+\t\tcase IDPF_DEV_ID_PF:\n+\t\t\thw->mac.type = IDPF_MAC_PF;\n+\t\t\tbreak;\n+\t\tcase IDPF_DEV_ID_VF:\n+\t\t\thw->mac.type = IDPF_MAC_VF;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\thw->mac.type = IDPF_MAC_GENERIC;\n+\t\t\tbreak;\n+\t\t}\n+\t} else {\n+\t\tstatus = IDPF_ERR_DEVICE_NOT_SUPPORTED;\n+\t}\n+\n+\tDEBUGOUT2(\"Setting MAC type found mac: %d, returns: %d\\n\",\n+\t\t  hw->mac.type, status);\n+\treturn status;\n+}\n+\n+/**\n+ *  idpf_init_hw - main initialization routine\n+ *  @hw: pointer to the hardware structure\n+ *  @ctlq_size: struct to pass ctlq size data\n+ */\n+int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size)\n+{\n+\tstruct idpf_ctlq_create_info *q_info;\n+\tint status = IDPF_SUCCESS;\n+\tstruct idpf_ctlq_info *cq = NULL;\n+\n+\t/* Setup initial control queues */\n+\tq_info = (struct idpf_ctlq_create_info *)\n+\t\t idpf_calloc(hw, 2, sizeof(struct idpf_ctlq_create_info));\n+\tif (!q_info)\n+\t\treturn IDPF_ERR_NO_MEMORY;\n+\n+\tq_info[0].type             = IDPF_CTLQ_TYPE_MAILBOX_TX;\n+\tq_info[0].buf_size         = ctlq_size.asq_buf_size;\n+\tq_info[0].len              = ctlq_size.asq_ring_size;\n+\tq_info[0].id               = -1; /* default queue */\n+\n+\tif (hw->mac.type == IDPF_MAC_PF) {\n+\t\tq_info[0].reg.head         = PF_FW_ATQH;\n+\t\tq_info[0].reg.tail         = PF_FW_ATQT;\n+\t\tq_info[0].reg.len          = PF_FW_ATQLEN;\n+\t\tq_info[0].reg.bah          = PF_FW_ATQBAH;\n+\t\tq_info[0].reg.bal          = PF_FW_ATQBAL;\n+\t\tq_info[0].reg.len_mask     = PF_FW_ATQLEN_ATQLEN_M;\n+\t\tq_info[0].reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;\n+\t\tq_info[0].reg.head_mask    = PF_FW_ATQH_ATQH_M;\n+\t} else {\n+\t\tq_info[0].reg.head         = VF_ATQH;\n+\t\tq_info[0].reg.tail         = VF_ATQT;\n+\t\tq_info[0].reg.len          = VF_ATQLEN;\n+\t\tq_info[0].reg.bah          = VF_ATQBAH;\n+\t\tq_info[0].reg.bal          = VF_ATQBAL;\n+\t\tq_info[0].reg.len_mask     = VF_ATQLEN_ATQLEN_M;\n+\t\tq_info[0].reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;\n+\t\tq_info[0].reg.head_mask    = VF_ATQH_ATQH_M;\n+\t}\n+\n+\tq_info[1].type             = IDPF_CTLQ_TYPE_MAILBOX_RX;\n+\tq_info[1].buf_size         = ctlq_size.arq_buf_size;\n+\tq_info[1].len              = ctlq_size.arq_ring_size;\n+\tq_info[1].id               = -1; /* default queue */\n+\n+\tif (hw->mac.type == IDPF_MAC_PF) {\n+\t\tq_info[1].reg.head         = PF_FW_ARQH;\n+\t\tq_info[1].reg.tail         = PF_FW_ARQT;\n+\t\tq_info[1].reg.len          = PF_FW_ARQLEN;\n+\t\tq_info[1].reg.bah          = PF_FW_ARQBAH;\n+\t\tq_info[1].reg.bal          = PF_FW_ARQBAL;\n+\t\tq_info[1].reg.len_mask     = PF_FW_ARQLEN_ARQLEN_M;\n+\t\tq_info[1].reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;\n+\t\tq_info[1].reg.head_mask    = PF_FW_ARQH_ARQH_M;\n+\t} else {\n+\t\tq_info[1].reg.head         = VF_ARQH;\n+\t\tq_info[1].reg.tail         = VF_ARQT;\n+\t\tq_info[1].reg.len          = VF_ARQLEN;\n+\t\tq_info[1].reg.bah          = VF_ARQBAH;\n+\t\tq_info[1].reg.bal          = VF_ARQBAL;\n+\t\tq_info[1].reg.len_mask     = VF_ARQLEN_ARQLEN_M;\n+\t\tq_info[1].reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;\n+\t\tq_info[1].reg.head_mask    = VF_ARQH_ARQH_M;\n+\t}\n+\n+\tstatus = idpf_ctlq_init(hw, 2, q_info);\n+\tif (status != IDPF_SUCCESS) {\n+\t\t/* TODO return error */\n+\t\tidpf_free(hw, q_info);\n+\t\treturn status;\n+\t}\n+\n+\tLIST_FOR_EACH_ENTRY(cq, &hw->cq_list_head, idpf_ctlq_info, cq_list) {\n+\t\tif (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)\n+\t\t\thw->asq = cq;\n+\t\telse if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)\n+\t\t\thw->arq = cq;\n+\t}\n+\n+\t/* TODO hardcode a mac addr for now */\n+\thw->mac.addr[0] = 0x00;\n+\thw->mac.addr[1] = 0x00;\n+\thw->mac.addr[2] = 0x00;\n+\thw->mac.addr[3] = 0x00;\n+\thw->mac.addr[4] = 0x03;\n+\thw->mac.addr[5] = 0x14;\n+\n+\treturn IDPF_SUCCESS;\n+}\n+\n+/**\n+ * idpf_send_msg_to_cp\n+ * @hw: pointer to the hardware structure\n+ * @v_opcode: opcodes for VF-PF communication\n+ * @v_retval: return error code\n+ * @msg: pointer to the msg buffer\n+ * @msglen: msg length\n+ * @cmd_details: pointer to command details\n+ *\n+ * Send message to CP. By default, this message\n+ * is sent asynchronously, i.e. idpf_asq_send_command() does not wait for\n+ * completion before returning.\n+ */\n+int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode,\n+\t\t\tint v_retval, u8 *msg, u16 msglen)\n+{\n+\tstruct idpf_ctlq_msg ctlq_msg = { 0 };\n+\tstruct idpf_dma_mem dma_mem = { 0 };\n+\tint status;\n+\n+\tctlq_msg.opcode = idpf_mbq_opc_send_msg_to_pf;\n+\tctlq_msg.func_id = 0;\n+\tctlq_msg.data_len = msglen;\n+\tctlq_msg.cookie.mbx.chnl_retval = v_retval;\n+\tctlq_msg.cookie.mbx.chnl_opcode = v_opcode;\n+\n+\tif (msglen > 0) {\n+\t\tdma_mem.va = (struct idpf_dma_mem *)\n+\t\t\t  idpf_alloc_dma_mem(hw, &dma_mem, msglen);\n+\t\tif (!dma_mem.va)\n+\t\t\treturn IDPF_ERR_NO_MEMORY;\n+\n+\t\tidpf_memcpy(dma_mem.va, msg, msglen, IDPF_NONDMA_TO_DMA);\n+\t\tctlq_msg.ctx.indirect.payload = &dma_mem;\n+\t}\n+\tstatus = idpf_ctlq_send(hw, hw->asq, 1, &ctlq_msg);\n+\n+\tif (dma_mem.va)\n+\t\tidpf_free_dma_mem(hw, &dma_mem);\n+\n+\treturn status;\n+}\n+\n+/**\n+ *  idpf_asq_done - check if FW has processed the Admin Send Queue\n+ *  @hw: pointer to the hw struct\n+ *\n+ *  Returns true if the firmware has processed all descriptors on the\n+ *  admin send queue. Returns false if there are still requests pending.\n+ */\n+bool idpf_asq_done(struct idpf_hw *hw)\n+{\n+\t/* AQ designers suggest use of head for better\n+\t * timing reliability than DD bit\n+\t */\n+\treturn rd32(hw, hw->asq->reg.head) == hw->asq->next_to_use;\n+}\n+\n+/**\n+ * idpf_check_asq_alive\n+ * @hw: pointer to the hw struct\n+ *\n+ * Returns true if Queue is enabled else false.\n+ */\n+bool idpf_check_asq_alive(struct idpf_hw *hw)\n+{\n+\tif (hw->asq->reg.len)\n+\t\treturn !!(rd32(hw, hw->asq->reg.len) &\n+\t\t\t  PF_FW_ATQLEN_ATQENABLE_M);\n+\n+\treturn false;\n+}\n+\n+/**\n+ *  idpf_clean_arq_element\n+ *  @hw: pointer to the hw struct\n+ *  @e: event info from the receive descriptor, includes any buffers\n+ *  @pending: number of events that could be left to process\n+ *\n+ *  This function cleans one Admin Receive Queue element and returns\n+ *  the contents through e.  It can also return how many events are\n+ *  left to process through 'pending'\n+ */\n+int idpf_clean_arq_element(struct idpf_hw *hw,\n+\t\t\t   struct idpf_arq_event_info *e, u16 *pending)\n+{\n+\tstruct idpf_ctlq_msg msg = { 0 };\n+\tint status;\n+\n+\t*pending = 1;\n+\n+\tstatus = idpf_ctlq_recv(hw->arq, pending, &msg);\n+\n+\t/* ctlq_msg does not align to ctlq_desc, so copy relevant data here */\n+\te->desc.opcode = msg.opcode;\n+\te->desc.cookie_high = msg.cookie.mbx.chnl_opcode;\n+\te->desc.cookie_low = msg.cookie.mbx.chnl_retval;\n+\te->desc.ret_val = msg.status;\n+\te->desc.datalen = msg.data_len;\n+\tif (msg.data_len > 0) {\n+\t\te->buf_len = msg.data_len;\n+\t\tidpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, msg.data_len,\n+\t\t\t    IDPF_DMA_TO_NONDMA);\n+\t}\n+\treturn status;\n+}\n+\n+/**\n+ *  idpf_deinit_hw - shutdown routine\n+ *  @hw: pointer to the hardware structure\n+ */\n+int idpf_deinit_hw(struct idpf_hw *hw)\n+{\n+\thw->asq = NULL;\n+\thw->arq = NULL;\n+\n+\treturn idpf_ctlq_deinit(hw);\n+}\n+\n+/**\n+ * idpf_reset\n+ * @hw: pointer to the hardware structure\n+ *\n+ * Send a RESET message to the CPF. Does not wait for response from CPF\n+ * as none will be forthcoming. Immediately after calling this function,\n+ * the control queue should be shut down and (optionally) reinitialized.\n+ */\n+int idpf_reset(struct idpf_hw *hw)\n+{\n+\treturn idpf_send_msg_to_cp(hw, VIRTCHNL_OP_RESET_VF,\n+\t\t\t\t      IDPF_SUCCESS, NULL, 0);\n+}\n+\n+/**\n+ * idpf_get_set_rss_lut\n+ * @hw: pointer to the hardware structure\n+ * @vsi_id: vsi fw index\n+ * @pf_lut: for PF table set true, for VSI table set false\n+ * @lut: pointer to the lut buffer provided by the caller\n+ * @lut_size: size of the lut buffer\n+ * @set: set true to set the table, false to get the table\n+ *\n+ * Internal function to get or set RSS look up table\n+ */\n+STATIC int idpf_get_set_rss_lut(struct idpf_hw *hw, u16 vsi_id,\n+\t\t\t\tbool pf_lut, u8 *lut, u16 lut_size,\n+\t\t\t\tbool set)\n+{\n+\t/* TODO fill out command */\n+\treturn IDPF_SUCCESS;\n+}\n+\n+/**\n+ * idpf_get_rss_lut\n+ * @hw: pointer to the hardware structure\n+ * @vsi_id: vsi fw index\n+ * @pf_lut: for PF table set true, for VSI table set false\n+ * @lut: pointer to the lut buffer provided by the caller\n+ * @lut_size: size of the lut buffer\n+ *\n+ * get the RSS lookup table, PF or VSI type\n+ */\n+int idpf_get_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,\n+\t\t     u8 *lut, u16 lut_size)\n+{\n+\treturn idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false);\n+}\n+\n+/**\n+ * idpf_set_rss_lut\n+ * @hw: pointer to the hardware structure\n+ * @vsi_id: vsi fw index\n+ * @pf_lut: for PF table set true, for VSI table set false\n+ * @lut: pointer to the lut buffer provided by the caller\n+ * @lut_size: size of the lut buffer\n+ *\n+ * set the RSS lookup table, PF or VSI type\n+ */\n+int idpf_set_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,\n+\t\t     u8 *lut, u16 lut_size)\n+{\n+\treturn idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);\n+}\n+\n+/**\n+ * idpf_get_set_rss_key\n+ * @hw: pointer to the hw struct\n+ * @vsi_id: vsi fw index\n+ * @key: pointer to key info struct\n+ * @set: set true to set the key, false to get the key\n+ *\n+ * get the RSS key per VSI\n+ */\n+STATIC int idpf_get_set_rss_key(struct idpf_hw *hw, u16 vsi_id,\n+\t\t\t\tstruct idpf_get_set_rss_key_data *key,\n+\t\t\t\tbool set)\n+{\n+\t/* TODO fill out command */\n+\treturn IDPF_SUCCESS;\n+}\n+\n+/**\n+ * idpf_get_rss_key\n+ * @hw: pointer to the hw struct\n+ * @vsi_id: vsi fw index\n+ * @key: pointer to key info struct\n+ *\n+ */\n+int idpf_get_rss_key(struct idpf_hw *hw, u16 vsi_id,\n+\t\t     struct idpf_get_set_rss_key_data *key)\n+{\n+\treturn idpf_get_set_rss_key(hw, vsi_id, key, false);\n+}\n+\n+/**\n+ * idpf_set_rss_key\n+ * @hw: pointer to the hw struct\n+ * @vsi_id: vsi fw index\n+ * @key: pointer to key info struct\n+ *\n+ * set the RSS key per VSI\n+ */\n+int idpf_set_rss_key(struct idpf_hw *hw, u16 vsi_id,\n+\t\t     struct idpf_get_set_rss_key_data *key)\n+{\n+\treturn idpf_get_set_rss_key(hw, vsi_id, key, true);\n+}\n+\n+RTE_LOG_REGISTER_DEFAULT(idpf_common_logger, NOTICE);\ndiff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c\nnew file mode 100644\nindex 0000000000..da3692df7d\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_controlq.c\n@@ -0,0 +1,691 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#include \"idpf_controlq.h\"\n+\n+/**\n+ * idpf_ctlq_setup_regs - initialize control queue registers\n+ * @cq: pointer to the specific control queue\n+ * @q_create_info: structs containing info for each queue to be initialized\n+ */\n+static void\n+idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,\n+\t\t     struct idpf_ctlq_create_info *q_create_info)\n+{\n+\t/* set head and tail registers in our local struct */\n+\tcq->reg.head = q_create_info->reg.head;\n+\tcq->reg.tail = q_create_info->reg.tail;\n+\tcq->reg.len = q_create_info->reg.len;\n+\tcq->reg.bah = q_create_info->reg.bah;\n+\tcq->reg.bal = q_create_info->reg.bal;\n+\tcq->reg.len_mask = q_create_info->reg.len_mask;\n+\tcq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;\n+\tcq->reg.head_mask = q_create_info->reg.head_mask;\n+}\n+\n+/**\n+ * idpf_ctlq_init_regs - Initialize control queue registers\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ * @is_rxq: true if receive control queue, false otherwise\n+ *\n+ * Initialize registers. The caller is expected to have already initialized the\n+ * descriptor ring memory and buffer memory\n+ */\n+static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t\tbool is_rxq)\n+{\n+\t/* Update tail to post pre-allocated buffers for rx queues */\n+\tif (is_rxq)\n+\t\twr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));\n+\n+\t/* For non-Mailbox control queues only TAIL need to be set */\n+\tif (cq->q_id != -1)\n+\t\treturn;\n+\n+\t/* Clear Head for both send or receive */\n+\twr32(hw, cq->reg.head, 0);\n+\n+\t/* set starting point */\n+\twr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));\n+\twr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));\n+\twr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));\n+}\n+\n+/**\n+ * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Record the address of the receive queue DMA buffers in the descriptors.\n+ * The buffers must have been previously allocated.\n+ */\n+static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)\n+{\n+\tint i = 0;\n+\n+\tfor (i = 0; i < cq->ring_size; i++) {\n+\t\tstruct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);\n+\t\tstruct idpf_dma_mem *bi = cq->bi.rx_buff[i];\n+\n+\t\t/* No buffer to post to descriptor, continue */\n+\t\tif (!bi)\n+\t\t\tcontinue;\n+\n+\t\tdesc->flags =\n+\t\t\tCPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);\n+\t\tdesc->opcode = 0;\n+\t\tdesc->datalen = (__le16)CPU_TO_LE16(bi->size);\n+\t\tdesc->ret_val = 0;\n+\t\tdesc->cookie_high = 0;\n+\t\tdesc->cookie_low = 0;\n+\t\tdesc->params.indirect.addr_high =\n+\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(bi->pa));\n+\t\tdesc->params.indirect.addr_low =\n+\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(bi->pa));\n+\t\tdesc->params.indirect.param0 = 0;\n+\t\tdesc->params.indirect.param1 = 0;\n+\t}\n+}\n+\n+/**\n+ * idpf_ctlq_shutdown - shutdown the CQ\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * The main shutdown routine for any controq queue\n+ */\n+static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n+{\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\tif (!cq->ring_size)\n+\t\tgoto shutdown_sq_out;\n+\n+#ifdef SIMICS_BUILD\n+\twr32(hw, cq->reg.head, 0);\n+\twr32(hw, cq->reg.tail, 0);\n+\twr32(hw, cq->reg.len, 0);\n+\twr32(hw, cq->reg.bal, 0);\n+\twr32(hw, cq->reg.bah, 0);\n+#endif /* SIMICS_BUILD */\n+\n+\t/* free ring buffers and the ring itself */\n+\tidpf_ctlq_dealloc_ring_res(hw, cq);\n+\n+\t/* Set ring_size to 0 to indicate uninitialized queue */\n+\tcq->ring_size = 0;\n+\n+shutdown_sq_out:\n+\tidpf_release_lock(&cq->cq_lock);\n+\tidpf_destroy_lock(&cq->cq_lock);\n+}\n+\n+/**\n+ * idpf_ctlq_add - add one control queue\n+ * @hw: pointer to hardware struct\n+ * @qinfo: info for queue to be created\n+ * @cq_out: (output) double pointer to control queue to be created\n+ *\n+ * Allocate and initialize a control queue and add it to the control queue list.\n+ * The cq parameter will be allocated/initialized and passed back to the caller\n+ * if no errors occur.\n+ *\n+ * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add\n+ */\n+int idpf_ctlq_add(struct idpf_hw *hw,\n+\t\t  struct idpf_ctlq_create_info *qinfo,\n+\t\t  struct idpf_ctlq_info **cq_out)\n+{\n+\tbool is_rxq = false;\n+\tint status = IDPF_SUCCESS;\n+\n+\tif (!qinfo->len || !qinfo->buf_size ||\n+\t    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||\n+\t    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)\n+\t\treturn IDPF_ERR_CFG;\n+\n+\t*cq_out = (struct idpf_ctlq_info *)\n+\t\tidpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));\n+\tif (!(*cq_out))\n+\t\treturn IDPF_ERR_NO_MEMORY;\n+\n+\t(*cq_out)->cq_type = qinfo->type;\n+\t(*cq_out)->q_id = qinfo->id;\n+\t(*cq_out)->buf_size = qinfo->buf_size;\n+\t(*cq_out)->ring_size = qinfo->len;\n+\n+\t(*cq_out)->next_to_use = 0;\n+\t(*cq_out)->next_to_clean = 0;\n+\t(*cq_out)->next_to_post = (*cq_out)->ring_size - 1;\n+\n+\tswitch (qinfo->type) {\n+\tcase IDPF_CTLQ_TYPE_MAILBOX_RX:\n+\t\tis_rxq = true;\n+#ifdef __KERNEL__\n+\t\tfallthrough;\n+#else\n+\t\t/* fallthrough */\n+#endif /* __KERNEL__ */\n+\tcase IDPF_CTLQ_TYPE_MAILBOX_TX:\n+\t\tstatus = idpf_ctlq_alloc_ring_res(hw, *cq_out);\n+\t\tbreak;\n+\tdefault:\n+\t\tstatus = IDPF_ERR_PARAM;\n+\t\tbreak;\n+\t}\n+\n+\tif (status)\n+\t\tgoto init_free_q;\n+\n+\tif (is_rxq) {\n+\t\tidpf_ctlq_init_rxq_bufs(*cq_out);\n+\t} else {\n+\t\t/* Allocate the array of msg pointers for TX queues */\n+\t\t(*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **)\n+\t\t\tidpf_calloc(hw, qinfo->len,\n+\t\t\t\t    sizeof(struct idpf_ctlq_msg *));\n+\t\tif (!(*cq_out)->bi.tx_msg) {\n+\t\t\tstatus = IDPF_ERR_NO_MEMORY;\n+\t\t\tgoto init_dealloc_q_mem;\n+\t\t}\n+\t}\n+\n+\tidpf_ctlq_setup_regs(*cq_out, qinfo);\n+\n+\tidpf_ctlq_init_regs(hw, *cq_out, is_rxq);\n+\n+\tidpf_init_lock(&(*cq_out)->cq_lock);\n+\n+\tLIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list);\n+\n+\treturn status;\n+\n+init_dealloc_q_mem:\n+\t/* free ring buffers and the ring itself */\n+\tidpf_ctlq_dealloc_ring_res(hw, *cq_out);\n+init_free_q:\n+\tidpf_free(hw, *cq_out);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * idpf_ctlq_remove - deallocate and remove specified control queue\n+ * @hw: pointer to hardware struct\n+ * @cq: pointer to control queue to be removed\n+ */\n+void idpf_ctlq_remove(struct idpf_hw *hw,\n+\t\t      struct idpf_ctlq_info *cq)\n+{\n+\tLIST_REMOVE(cq, cq_list);\n+\tidpf_ctlq_shutdown(hw, cq);\n+\tidpf_free(hw, cq);\n+}\n+\n+/**\n+ * idpf_ctlq_init - main initialization routine for all control queues\n+ * @hw: pointer to hardware struct\n+ * @num_q: number of queues to initialize\n+ * @q_info: array of structs containing info for each queue to be initialized\n+ *\n+ * This initializes any number and any type of control queues. This is an all\n+ * or nothing routine; if one fails, all previously allocated queues will be\n+ * destroyed. This must be called prior to using the individual add/remove\n+ * APIs.\n+ */\n+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,\n+\t\t   struct idpf_ctlq_create_info *q_info)\n+{\n+\tstruct idpf_ctlq_info *cq = NULL, *tmp = NULL;\n+\tint ret_code = IDPF_SUCCESS;\n+\tint i = 0;\n+\n+\tLIST_INIT(&hw->cq_list_head);\n+\n+\tfor (i = 0; i < num_q; i++) {\n+\t\tstruct idpf_ctlq_create_info *qinfo = q_info + i;\n+\n+\t\tret_code = idpf_ctlq_add(hw, qinfo, &cq);\n+\t\tif (ret_code)\n+\t\t\tgoto init_destroy_qs;\n+\t}\n+\n+\treturn ret_code;\n+\n+init_destroy_qs:\n+\tLIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,\n+\t\t\t\t idpf_ctlq_info, cq_list)\n+\t\tidpf_ctlq_remove(hw, cq);\n+\n+\treturn ret_code;\n+}\n+\n+/**\n+ * idpf_ctlq_deinit - destroy all control queues\n+ * @hw: pointer to hw struct\n+ */\n+int idpf_ctlq_deinit(struct idpf_hw *hw)\n+{\n+\tstruct idpf_ctlq_info *cq = NULL, *tmp = NULL;\n+\tint ret_code = IDPF_SUCCESS;\n+\n+\tLIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,\n+\t\t\t\t idpf_ctlq_info, cq_list)\n+\t\tidpf_ctlq_remove(hw, cq);\n+\n+\treturn ret_code;\n+}\n+\n+/**\n+ * idpf_ctlq_send - send command to Control Queue (CTQ)\n+ * @hw: pointer to hw struct\n+ * @cq: handle to control queue struct to send on\n+ * @num_q_msg: number of messages to send on control queue\n+ * @q_msg: pointer to array of queue messages to be sent\n+ *\n+ * The caller is expected to allocate DMAable buffers and pass them to the\n+ * send routine via the q_msg struct / control queue specific data struct.\n+ * The control queue will hold a reference to each send message until\n+ * the completion for that message has been cleaned.\n+ */\n+int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t   u16 num_q_msg, struct idpf_ctlq_msg q_msg[])\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tint num_desc_avail = 0;\n+\tint status = IDPF_SUCCESS;\n+\tint i = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn IDPF_ERR_CTLQ_EMPTY;\n+\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\t/* Ensure there are enough descriptors to send all messages */\n+\tnum_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);\n+\tif (num_desc_avail == 0 || num_desc_avail < num_q_msg) {\n+\t\tstatus = IDPF_ERR_CTLQ_FULL;\n+\t\tgoto sq_send_command_out;\n+\t}\n+\n+\tfor (i = 0; i < num_q_msg; i++) {\n+\t\tstruct idpf_ctlq_msg *msg = &q_msg[i];\n+\t\tu64 msg_cookie;\n+\n+\t\tdesc = IDPF_CTLQ_DESC(cq, cq->next_to_use);\n+\n+\t\tdesc->opcode = CPU_TO_LE16(msg->opcode);\n+\t\tdesc->pfid_vfid = CPU_TO_LE16(msg->func_id);\n+\n+\t\tmsg_cookie = *(u64 *)&msg->cookie;\n+\t\tdesc->cookie_high =\n+\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));\n+\t\tdesc->cookie_low =\n+\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));\n+\n+\t\tdesc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<\n+\t\t\t\t\t  IDPF_CTLQ_FLAG_HOST_ID_S);\n+\t\tif (msg->data_len) {\n+\t\t\tstruct idpf_dma_mem *buff = msg->ctx.indirect.payload;\n+\n+\t\t\tdesc->datalen |= CPU_TO_LE16(msg->data_len);\n+\t\t\tdesc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);\n+\t\t\tdesc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);\n+\n+\t\t\t/* Update the address values in the desc with the pa\n+\t\t\t * value for respective buffer\n+\t\t\t */\n+\t\t\tdesc->params.indirect.addr_high =\n+\t\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(buff->pa));\n+\t\t\tdesc->params.indirect.addr_low =\n+\t\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(buff->pa));\n+\n+\t\t\tidpf_memcpy(&desc->params, msg->ctx.indirect.context,\n+\t\t\t\t    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);\n+#ifdef SIMICS_BUILD\n+\t\t\t/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf\n+\t\t\t * need to set peer PF function id in param0 for Simics\n+\t\t\t */\n+\t\t\tif (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {\n+\t\t\t\tdesc->params.indirect.param0 =\n+\t\t\t\t\tCPU_TO_LE32(msg->func_id);\n+\t\t\t}\n+#endif\n+\t\t} else {\n+\t\t\tidpf_memcpy(&desc->params, msg->ctx.direct,\n+\t\t\t\t    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);\n+#ifdef SIMICS_BUILD\n+\t\t\t/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf\n+\t\t\t * need to set peer PF function id in param0 for Simics\n+\t\t\t */\n+\t\t\tif (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {\n+\t\t\t\tdesc->params.direct.param0 =\n+\t\t\t\t\tCPU_TO_LE32(msg->func_id);\n+\t\t\t}\n+#endif\n+\t\t}\n+\n+\t\t/* Store buffer info */\n+\t\tcq->bi.tx_msg[cq->next_to_use] = msg;\n+\n+\t\t(cq->next_to_use)++;\n+\t\tif (cq->next_to_use == cq->ring_size)\n+\t\t\tcq->next_to_use = 0;\n+\t}\n+\n+\t/* Force memory write to complete before letting hardware\n+\t * know that there are new descriptors to fetch.\n+\t */\n+\tidpf_wmb();\n+\n+\twr32(hw, cq->reg.tail, cq->next_to_use);\n+\n+sq_send_command_out:\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the\n+ * requested queue\n+ * @cq: pointer to the specific Control queue\n+ * @clean_count: (input|output) number of descriptors to clean as input, and\n+ * number of descriptors actually cleaned as output\n+ * @msg_status: (output) pointer to msg pointer array to be populated; needs\n+ * to be allocated by caller\n+ *\n+ * Returns an array of message pointers associated with the cleaned\n+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned\n+ * descriptors.  The status will be returned for each; any messages that failed\n+ * to send will have a non-zero status. The caller is expected to free original\n+ * ctlq_msgs and free or reuse the DMA buffers.\n+ */\n+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n+\t\t       struct idpf_ctlq_msg *msg_status[])\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tu16 i = 0, num_to_clean;\n+\tu16 ntc, desc_err;\n+\tint ret = IDPF_SUCCESS;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn IDPF_ERR_CTLQ_EMPTY;\n+\n+\tif (*clean_count == 0)\n+\t\treturn IDPF_SUCCESS;\n+\tif (*clean_count > cq->ring_size)\n+\t\treturn IDPF_ERR_PARAM;\n+\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\tntc = cq->next_to_clean;\n+\n+\tnum_to_clean = *clean_count;\n+\n+\tfor (i = 0; i < num_to_clean; i++) {\n+\t\t/* Fetch next descriptor and check if marked as done */\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n+\t\tif (!(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))\n+\t\t\tbreak;\n+\n+\t\tdesc_err = LE16_TO_CPU(desc->ret_val);\n+\t\tif (desc_err) {\n+\t\t\t/* strip off FW internal code */\n+\t\t\tdesc_err &= 0xff;\n+\t\t}\n+\n+\t\tmsg_status[i] = cq->bi.tx_msg[ntc];\n+\t\tmsg_status[i]->status = desc_err;\n+\n+\t\tcq->bi.tx_msg[ntc] = NULL;\n+\n+\t\t/* Zero out any stale data */\n+\t\tidpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);\n+\n+\t\tntc++;\n+\t\tif (ntc == cq->ring_size)\n+\t\t\tntc = 0;\n+\t}\n+\n+\tcq->next_to_clean = ntc;\n+\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\t/* Return number of descriptors actually cleaned */\n+\t*clean_count = i;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to control queue handle\n+ * @buff_count: (input|output) input is number of buffers caller is trying to\n+ * return; output is number of buffers that were not posted\n+ * @buffs: array of pointers to dma mem structs to be given to hardware\n+ *\n+ * Caller uses this function to return DMA buffers to the descriptor ring after\n+ * consuming them; buff_count will be the number of buffers.\n+ *\n+ * Note: this function needs to be called after a receive call even\n+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,\n+ * buffs = NULL to support direct commands\n+ */\n+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n+\t\t\t    u16 *buff_count, struct idpf_dma_mem **buffs)\n+{\n+\tstruct idpf_ctlq_desc *desc;\n+\tu16 ntp = cq->next_to_post;\n+\tbool buffs_avail = false;\n+\tu16 tbp = ntp + 1;\n+\tint status = IDPF_SUCCESS;\n+\tint i = 0;\n+\n+\tif (*buff_count > cq->ring_size)\n+\t\treturn IDPF_ERR_PARAM;\n+\n+\tif (*buff_count > 0)\n+\t\tbuffs_avail = true;\n+\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\tif (tbp >= cq->ring_size)\n+\t\ttbp = 0;\n+\n+\tif (tbp == cq->next_to_clean)\n+\t\t/* Nothing to do */\n+\t\tgoto post_buffs_out;\n+\n+\t/* Post buffers for as many as provided or up until the last one used */\n+\twhile (ntp != cq->next_to_clean) {\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntp);\n+\n+\t\tif (cq->bi.rx_buff[ntp])\n+\t\t\tgoto fill_desc;\n+\t\tif (!buffs_avail) {\n+\t\t\t/* If the caller hasn't given us any buffers or\n+\t\t\t * there are none left, search the ring itself\n+\t\t\t * for an available buffer to move to this\n+\t\t\t * entry starting at the next entry in the ring\n+\t\t\t */\n+\t\t\ttbp = ntp + 1;\n+\n+\t\t\t/* Wrap ring if necessary */\n+\t\t\tif (tbp >= cq->ring_size)\n+\t\t\t\ttbp = 0;\n+\n+\t\t\twhile (tbp != cq->next_to_clean) {\n+\t\t\t\tif (cq->bi.rx_buff[tbp]) {\n+\t\t\t\t\tcq->bi.rx_buff[ntp] =\n+\t\t\t\t\t\tcq->bi.rx_buff[tbp];\n+\t\t\t\t\tcq->bi.rx_buff[tbp] = NULL;\n+\n+\t\t\t\t\t/* Found a buffer, no need to\n+\t\t\t\t\t * search anymore\n+\t\t\t\t\t */\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\t/* Wrap ring if necessary */\n+\t\t\t\ttbp++;\n+\t\t\t\tif (tbp >= cq->ring_size)\n+\t\t\t\t\ttbp = 0;\n+\t\t\t}\n+\n+\t\t\tif (tbp == cq->next_to_clean)\n+\t\t\t\tgoto post_buffs_out;\n+\t\t} else {\n+\t\t\t/* Give back pointer to DMA buffer */\n+\t\t\tcq->bi.rx_buff[ntp] = buffs[i];\n+\t\t\ti++;\n+\n+\t\t\tif (i >= *buff_count)\n+\t\t\t\tbuffs_avail = false;\n+\t\t}\n+\n+fill_desc:\n+\t\tdesc->flags =\n+\t\t\tCPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);\n+\n+\t\t/* Post buffers to descriptor */\n+\t\tdesc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);\n+\t\tdesc->params.indirect.addr_high =\n+\t\t\tCPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));\n+\t\tdesc->params.indirect.addr_low =\n+\t\t\tCPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));\n+\n+\t\tntp++;\n+\t\tif (ntp == cq->ring_size)\n+\t\t\tntp = 0;\n+\t}\n+\n+post_buffs_out:\n+\t/* Only update tail if buffers were actually posted */\n+\tif (cq->next_to_post != ntp) {\n+\t\tif (ntp)\n+\t\t\t/* Update next_to_post to ntp - 1 since current ntp\n+\t\t\t * will not have a buffer\n+\t\t\t */\n+\t\t\tcq->next_to_post = ntp - 1;\n+\t\telse\n+\t\t\t/* Wrap to end of end ring since current ntp is 0 */\n+\t\t\tcq->next_to_post = cq->ring_size - 1;\n+\n+\t\twr32(hw, cq->reg.tail, cq->next_to_post);\n+\t}\n+\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\t/* return the number of buffers that were not posted */\n+\t*buff_count = *buff_count - i;\n+\n+\treturn status;\n+}\n+\n+/**\n+ * idpf_ctlq_recv - receive control queue message call back\n+ * @cq: pointer to control queue handle to receive on\n+ * @num_q_msg: (input|output) input number of messages that should be received;\n+ * output number of messages actually received\n+ * @q_msg: (output) array of received control queue messages on this q;\n+ * needs to be pre-allocated by caller for as many messages as requested\n+ *\n+ * Called by interrupt handler or polling mechanism. Caller is expected\n+ * to free buffers\n+ */\n+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n+\t\t   struct idpf_ctlq_msg *q_msg)\n+{\n+\tu16 num_to_clean, ntc, ret_val, flags;\n+\tstruct idpf_ctlq_desc *desc;\n+\tint ret_code = IDPF_SUCCESS;\n+\tu16 i = 0;\n+\n+\tif (!cq || !cq->ring_size)\n+\t\treturn IDPF_ERR_CTLQ_EMPTY;\n+\n+\tif (*num_q_msg == 0)\n+\t\treturn IDPF_SUCCESS;\n+\telse if (*num_q_msg > cq->ring_size)\n+\t\treturn IDPF_ERR_PARAM;\n+\n+\t/* take the lock before we start messing with the ring */\n+\tidpf_acquire_lock(&cq->cq_lock);\n+\n+\tntc = cq->next_to_clean;\n+\n+\tnum_to_clean = *num_q_msg;\n+\n+\tfor (i = 0; i < num_to_clean; i++) {\n+\t\tu64 msg_cookie;\n+\n+\t\t/* Fetch next descriptor and check if marked as done */\n+\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n+\t\tflags = LE16_TO_CPU(desc->flags);\n+\n+\t\tif (!(flags & IDPF_CTLQ_FLAG_DD))\n+\t\t\tbreak;\n+\n+\t\tret_val = LE16_TO_CPU(desc->ret_val);\n+\n+\t\tq_msg[i].vmvf_type = (flags &\n+\t\t\t\t      (IDPF_CTLQ_FLAG_FTYPE_VM |\n+\t\t\t\t       IDPF_CTLQ_FLAG_FTYPE_PF)) >>\n+\t\t\t\t      IDPF_CTLQ_FLAG_FTYPE_S;\n+\n+\t\tif (flags & IDPF_CTLQ_FLAG_ERR)\n+\t\t\tret_code = IDPF_ERR_CTLQ_ERROR;\n+\n+\t\tmsg_cookie = (u64)LE32_TO_CPU(desc->cookie_high) << 32;\n+\t\tmsg_cookie |= (u64)LE32_TO_CPU(desc->cookie_low);\n+\t\tidpf_memcpy(&q_msg[i].cookie, &msg_cookie, sizeof(u64),\n+\t\t\t    IDPF_NONDMA_TO_NONDMA);\n+\n+\t\tq_msg[i].opcode = LE16_TO_CPU(desc->opcode);\n+\t\tq_msg[i].data_len = LE16_TO_CPU(desc->datalen);\n+\t\tq_msg[i].status = ret_val;\n+\n+\t\tif (desc->datalen) {\n+\t\t\tidpf_memcpy(q_msg[i].ctx.indirect.context,\n+\t\t\t\t    &desc->params.indirect,\n+\t\t\t\t    IDPF_INDIRECT_CTX_SIZE,\n+\t\t\t\t    IDPF_DMA_TO_NONDMA);\n+\n+\t\t\t/* Assign pointer to dma buffer to ctlq_msg array\n+\t\t\t * to be given to upper layer\n+\t\t\t */\n+\t\t\tq_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];\n+\n+\t\t\t/* Zero out pointer to DMA buffer info;\n+\t\t\t * will be repopulated by post buffers API\n+\t\t\t */\n+\t\t\tcq->bi.rx_buff[ntc] = NULL;\n+\t\t} else {\n+\t\t\tidpf_memcpy(q_msg[i].ctx.direct,\n+\t\t\t\t    desc->params.raw,\n+\t\t\t\t    IDPF_DIRECT_CTX_SIZE,\n+\t\t\t\t    IDPF_DMA_TO_NONDMA);\n+\t\t}\n+\n+\t\t/* Zero out stale data in descriptor */\n+\t\tidpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),\n+\t\t\t    IDPF_DMA_MEM);\n+\n+\t\tntc++;\n+\t\tif (ntc == cq->ring_size)\n+\t\t\tntc = 0;\n+\t};\n+\n+\tcq->next_to_clean = ntc;\n+\n+\tidpf_release_lock(&cq->cq_lock);\n+\n+\t*num_q_msg = i;\n+\tif (*num_q_msg == 0)\n+\t\tret_code = IDPF_ERR_CTLQ_NO_WORK;\n+\n+\treturn ret_code;\n+}\ndiff --git a/drivers/common/idpf/base/idpf_controlq.h b/drivers/common/idpf/base/idpf_controlq.h\nnew file mode 100644\nindex 0000000000..e7b0d803b3\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_controlq.h\n@@ -0,0 +1,224 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_CONTROLQ_H_\n+#define _IDPF_CONTROLQ_H_\n+\n+#ifdef __KERNEL__\n+#include <linux/slab.h>\n+#endif\n+\n+#ifndef __KERNEL__\n+#include \"idpf_osdep.h\"\n+#include \"idpf_alloc.h\"\n+#endif\n+#include \"idpf_controlq_api.h\"\n+\n+/* Maximum buffer lengths for all control queue types */\n+#define IDPF_CTLQ_MAX_RING_SIZE 1024\n+#define IDPF_CTLQ_MAX_BUF_LEN\t4096\n+\n+#define IDPF_CTLQ_DESC(R, i) \\\n+\t(&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i]))\n+\n+#define IDPF_CTLQ_DESC_UNUSED(R)\t\t\t\t\t\\\n+\t((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \\\n+\t       (R)->next_to_clean - (R)->next_to_use - 1))\n+\n+#ifndef __KERNEL__\n+/* Data type manipulation macros. */\n+#define IDPF_HI_DWORD(x)\t((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))\n+#define IDPF_LO_DWORD(x)\t((u32)((x) & 0xFFFFFFFF))\n+#define IDPF_HI_WORD(x)\t\t((u16)(((x) >> 16) & 0xFFFF))\n+#define IDPF_LO_WORD(x)\t\t((u16)((x) & 0xFFFF))\n+\n+#endif\n+/* Control Queue default settings */\n+#define IDPF_CTRL_SQ_CMD_TIMEOUT\t250  /* msecs */\n+\n+struct idpf_ctlq_desc {\n+\t__le16\tflags;\n+\t__le16\topcode;\n+\t__le16\tdatalen;\t/* 0 for direct commands */\n+\tunion {\n+\t\t__le16 ret_val;\n+\t\t__le16 pfid_vfid;\n+#define IDPF_CTLQ_DESC_VF_ID_S\t0\n+#ifdef SIMICS_BUILD\n+#define IDPF_CTLQ_DESC_VF_ID_M\t(0x3FF << IDPF_CTLQ_DESC_VF_ID_S)\n+#define IDPF_CTLQ_DESC_PF_ID_S\t10\n+#define IDPF_CTLQ_DESC_PF_ID_M\t(0x3F << IDPF_CTLQ_DESC_PF_ID_S)\n+#else\n+#define IDPF_CTLQ_DESC_VF_ID_M\t(0x7FF << IDPF_CTLQ_DESC_VF_ID_S)\n+#define IDPF_CTLQ_DESC_PF_ID_S\t11\n+#define IDPF_CTLQ_DESC_PF_ID_M\t(0x1F << IDPF_CTLQ_DESC_PF_ID_S)\n+#endif\n+\t};\n+\t__le32 cookie_high;\n+\t__le32 cookie_low;\n+\tunion {\n+\t\tstruct {\n+\t\t\t__le32 param0;\n+\t\t\t__le32 param1;\n+\t\t\t__le32 param2;\n+\t\t\t__le32 param3;\n+\t\t} direct;\n+\t\tstruct {\n+\t\t\t__le32 param0;\n+\t\t\t__le32 param1;\n+\t\t\t__le32 addr_high;\n+\t\t\t__le32 addr_low;\n+\t\t} indirect;\n+\t\tu8 raw[16];\n+\t} params;\n+};\n+\n+/* Flags sub-structure\n+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |\n+ * |DD |CMP|ERR|  * RSV *  |FTYPE  | *RSV* |RD |VFC|BUF|  HOST_ID  |\n+ */\n+/* command flags and offsets */\n+#define IDPF_CTLQ_FLAG_DD_S\t\t0\n+#define IDPF_CTLQ_FLAG_CMP_S\t\t1\n+#define IDPF_CTLQ_FLAG_ERR_S\t\t2\n+#define IDPF_CTLQ_FLAG_FTYPE_S\t\t6\n+#define IDPF_CTLQ_FLAG_RD_S\t\t10\n+#define IDPF_CTLQ_FLAG_VFC_S\t\t11\n+#define IDPF_CTLQ_FLAG_BUF_S\t\t12\n+#define IDPF_CTLQ_FLAG_HOST_ID_S\t13\n+\n+#define IDPF_CTLQ_FLAG_DD\tBIT(IDPF_CTLQ_FLAG_DD_S)\t/* 0x1\t  */\n+#define IDPF_CTLQ_FLAG_CMP\tBIT(IDPF_CTLQ_FLAG_CMP_S)\t/* 0x2\t  */\n+#define IDPF_CTLQ_FLAG_ERR\tBIT(IDPF_CTLQ_FLAG_ERR_S)\t/* 0x4\t  */\n+#define IDPF_CTLQ_FLAG_FTYPE_VM\tBIT(IDPF_CTLQ_FLAG_FTYPE_S)\t/* 0x40\t  */\n+#define IDPF_CTLQ_FLAG_FTYPE_PF\tBIT(IDPF_CTLQ_FLAG_FTYPE_S + 1)\t/* 0x80   */\n+#define IDPF_CTLQ_FLAG_RD\tBIT(IDPF_CTLQ_FLAG_RD_S)\t/* 0x400  */\n+#define IDPF_CTLQ_FLAG_VFC\tBIT(IDPF_CTLQ_FLAG_VFC_S)\t/* 0x800  */\n+#define IDPF_CTLQ_FLAG_BUF\tBIT(IDPF_CTLQ_FLAG_BUF_S)\t/* 0x1000 */\n+\n+/* Host ID is a special field that has 3b and not a 1b flag */\n+#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)\n+\n+struct idpf_mbxq_desc {\n+\tu8 pad[8];\t\t/* CTLQ flags/opcode/len/retval fields */\n+\tu32 chnl_opcode;\t/* avoid confusion with desc->opcode */\n+\tu32 chnl_retval;\t/* ditto for desc->retval */\n+\tu32 pf_vf_id;\t\t/* used by CP when sending to PF */\n+};\n+\n+enum idpf_mac_type {\n+\tIDPF_MAC_UNKNOWN = 0,\n+\tIDPF_MAC_PF,\n+\tIDPF_MAC_VF,\n+\tIDPF_MAC_GENERIC\n+};\n+\n+#define ETH_ALEN 6\n+\n+struct idpf_mac_info {\n+\tenum idpf_mac_type type;\n+\tu8 addr[ETH_ALEN];\n+\tu8 perm_addr[ETH_ALEN];\n+};\n+\n+#define IDPF_AQ_LINK_UP 0x1\n+\n+/* PCI bus types */\n+enum idpf_bus_type {\n+\tidpf_bus_type_unknown = 0,\n+\tidpf_bus_type_pci,\n+\tidpf_bus_type_pcix,\n+\tidpf_bus_type_pci_express,\n+\tidpf_bus_type_reserved\n+};\n+\n+/* PCI bus speeds */\n+enum idpf_bus_speed {\n+\tidpf_bus_speed_unknown\t= 0,\n+\tidpf_bus_speed_33\t= 33,\n+\tidpf_bus_speed_66\t= 66,\n+\tidpf_bus_speed_100\t= 100,\n+\tidpf_bus_speed_120\t= 120,\n+\tidpf_bus_speed_133\t= 133,\n+\tidpf_bus_speed_2500\t= 2500,\n+\tidpf_bus_speed_5000\t= 5000,\n+\tidpf_bus_speed_8000\t= 8000,\n+\tidpf_bus_speed_reserved\n+};\n+\n+/* PCI bus widths */\n+enum idpf_bus_width {\n+\tidpf_bus_width_unknown\t= 0,\n+\tidpf_bus_width_pcie_x1\t= 1,\n+\tidpf_bus_width_pcie_x2\t= 2,\n+\tidpf_bus_width_pcie_x4\t= 4,\n+\tidpf_bus_width_pcie_x8\t= 8,\n+\tidpf_bus_width_32\t= 32,\n+\tidpf_bus_width_64\t= 64,\n+\tidpf_bus_width_reserved\n+};\n+\n+/* Bus parameters */\n+struct idpf_bus_info {\n+\tenum idpf_bus_speed speed;\n+\tenum idpf_bus_width width;\n+\tenum idpf_bus_type type;\n+\n+\tu16 func;\n+\tu16 device;\n+\tu16 lan_id;\n+\tu16 bus_id;\n+};\n+\n+/* Function specific capabilities */\n+struct idpf_hw_func_caps {\n+\tu32 num_alloc_vfs;\n+\tu32 vf_base_id;\n+};\n+\n+/* Define the APF hardware struct to replace other control structs as needed\n+ * Align to ctlq_hw_info\n+ */\n+struct idpf_hw {\n+\t/* Some part of BAR0 address space is not mapped by the LAN driver.\n+\t * This results in 2 regions of BAR0 to be mapped by LAN driver which\n+\t * will have its own base hardware address when mapped.\n+\t */\n+\tu8 *hw_addr;\n+\tu8 *hw_addr_region2;\n+\tu64 hw_addr_len;\n+\tu64 hw_addr_region2_len;\n+\n+\tvoid *back;\n+\n+\t/* control queue - send and receive */\n+\tstruct idpf_ctlq_info *asq;\n+\tstruct idpf_ctlq_info *arq;\n+\n+\t/* subsystem structs */\n+\tstruct idpf_mac_info mac;\n+\tstruct idpf_bus_info bus;\n+\tstruct idpf_hw_func_caps func_caps;\n+\n+\t/* pci info */\n+\tu16 device_id;\n+\tu16 vendor_id;\n+\tu16 subsystem_device_id;\n+\tu16 subsystem_vendor_id;\n+\tu8 revision_id;\n+\tbool adapter_stopped;\n+\n+\tLIST_HEAD_TYPE(list_head, idpf_ctlq_info) cq_list_head;\n+};\n+\n+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw,\n+\t\t\t     struct idpf_ctlq_info *cq);\n+\n+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);\n+\n+/* prototype for functions used for dynamic memory allocation */\n+void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem,\n+\t\t\t u64 size);\n+void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem);\n+#endif /* _IDPF_CONTROLQ_H_ */\ndiff --git a/drivers/common/idpf/base/idpf_controlq_api.h b/drivers/common/idpf/base/idpf_controlq_api.h\nnew file mode 100644\nindex 0000000000..3bb8f72aad\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_controlq_api.h\n@@ -0,0 +1,234 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_CONTROLQ_API_H_\n+#define _IDPF_CONTROLQ_API_H_\n+\n+#ifdef __KERNEL__\n+#include \"idpf_mem.h\"\n+#else /* !__KERNEL__ */\n+#include \"idpf_osdep.h\"\n+/* Error Codes */\n+/* Linux kernel driver can't directly use these. Instead, they are mapped to\n+ * linux compatible error codes which get translated in the build script.\n+ */\n+#define IDPF_SUCCESS\t\t\t0\n+#define IDPF_ERR_PARAM\t\t\t-53\t/* -EBADR */\n+#define IDPF_ERR_NOT_IMPL\t\t-95\t/* -EOPNOTSUPP */\n+#define IDPF_ERR_NOT_READY\t\t-16\t/* -EBUSY */\n+#define IDPF_ERR_BAD_PTR\t\t-14\t/* -EFAULT */\n+#define IDPF_ERR_INVAL_SIZE\t\t-90\t/* -EMSGSIZE */\n+#define IDPF_ERR_DEVICE_NOT_SUPPORTED\t-19\t/* -ENODEV */\n+#define IDPF_ERR_FW_API_VER\t\t-13\t/* -EACCESS */\n+#define IDPF_ERR_NO_MEMORY\t\t-12\t/* -ENOMEM */\n+#define IDPF_ERR_CFG\t\t\t-22\t/* -EINVAL */\n+#define IDPF_ERR_OUT_OF_RANGE\t\t-34\t/* -ERANGE */\n+#define IDPF_ERR_ALREADY_EXISTS\t\t-17\t/* -EEXIST */\n+#define IDPF_ERR_DOES_NOT_EXIST\t\t-6\t/* -ENXIO */\n+#define IDPF_ERR_IN_USE\t\t\t-114\t/* -EALREADY */\n+#define IDPF_ERR_MAX_LIMIT\t\t-109\t/* -ETOOMANYREFS */\n+#define IDPF_ERR_RESET_ONGOING\t\t-104\t/* -ECONNRESET */\n+\n+/* CRQ/CSQ specific error codes */\n+#define IDPF_ERR_CTLQ_ERROR\t\t-74\t/* -EBADMSG */\n+#define IDPF_ERR_CTLQ_TIMEOUT\t\t-110\t/* -ETIMEDOUT */\n+#define IDPF_ERR_CTLQ_FULL\t\t-28\t/* -ENOSPC */\n+#define IDPF_ERR_CTLQ_NO_WORK\t\t-42\t/* -ENOMSG */\n+#define IDPF_ERR_CTLQ_EMPTY\t\t-105\t/* -ENOBUFS */\n+#endif /* !__KERNEL__ */\n+\n+struct idpf_hw;\n+\n+/* Used for queue init, response and events */\n+enum idpf_ctlq_type {\n+\tIDPF_CTLQ_TYPE_MAILBOX_TX\t= 0,\n+\tIDPF_CTLQ_TYPE_MAILBOX_RX\t= 1,\n+\tIDPF_CTLQ_TYPE_CONFIG_TX\t= 2,\n+\tIDPF_CTLQ_TYPE_CONFIG_RX\t= 3,\n+\tIDPF_CTLQ_TYPE_EVENT_RX\t\t= 4,\n+\tIDPF_CTLQ_TYPE_RDMA_TX\t\t= 5,\n+\tIDPF_CTLQ_TYPE_RDMA_RX\t\t= 6,\n+\tIDPF_CTLQ_TYPE_RDMA_COMPL\t= 7\n+};\n+\n+/*\n+ * Generic Control Queue Structures\n+ */\n+\n+struct idpf_ctlq_reg {\n+\t/* used for queue tracking */\n+\tu32 head;\n+\tu32 tail;\n+\t/* Below applies only to default mb (if present) */\n+\tu32 len;\n+\tu32 bah;\n+\tu32 bal;\n+\tu32 len_mask;\n+\tu32 len_ena_mask;\n+\tu32 head_mask;\n+};\n+\n+/* Generic queue msg structure */\n+struct idpf_ctlq_msg {\n+\tu8 vmvf_type; /* represents the source of the message on recv */\n+#define IDPF_VMVF_TYPE_VF 0\n+#define IDPF_VMVF_TYPE_VM 1\n+#define IDPF_VMVF_TYPE_PF 2\n+\tu8 host_id;\n+\t/* 3b field used only when sending a message to peer - to be used in\n+\t * combination with target func_id to route the message\n+\t */\n+#define IDPF_HOST_ID_MASK 0x7\n+\n+\tu16 opcode;\n+\tu16 data_len;\t/* data_len = 0 when no payload is attached */\n+\tunion {\n+\t\tu16 func_id;\t/* when sending a message */\n+\t\tu16 status;\t/* when receiving a message */\n+\t};\n+\tunion {\n+\t\tstruct {\n+\t\t\tu32 chnl_retval;\n+\t\t\tu32 chnl_opcode;\n+\t\t} mbx;\n+\t} cookie;\n+\tunion {\n+#define IDPF_DIRECT_CTX_SIZE\t16\n+#define IDPF_INDIRECT_CTX_SIZE\t8\n+\t\t/* 16 bytes of context can be provided or 8 bytes of context\n+\t\t * plus the address of a DMA buffer\n+\t\t */\n+\t\tu8 direct[IDPF_DIRECT_CTX_SIZE];\n+\t\tstruct {\n+\t\t\tu8 context[IDPF_INDIRECT_CTX_SIZE];\n+\t\t\tstruct idpf_dma_mem *payload;\n+\t\t} indirect;\n+\t} ctx;\n+};\n+\n+/* Generic queue info structures */\n+/* MB, CONFIG and EVENT q do not have extended info */\n+struct idpf_ctlq_create_info {\n+\tenum idpf_ctlq_type type;\n+\tint id; /* absolute queue offset passed as input\n+\t\t * -1 for default mailbox if present\n+\t\t */\n+\tu16 len; /* Queue length passed as input */\n+\tu16 buf_size; /* buffer size passed as input */\n+\tu64 base_address; /* output, HPA of the Queue start  */\n+\tstruct idpf_ctlq_reg reg; /* registers accessed by ctlqs */\n+\n+\tint ext_info_size;\n+\tvoid *ext_info; /* Specific to q type */\n+};\n+\n+/* Control Queue information */\n+struct idpf_ctlq_info {\n+\tLIST_ENTRY_TYPE(idpf_ctlq_info) cq_list;\n+\n+\tenum idpf_ctlq_type cq_type;\n+\tint q_id;\n+\tidpf_lock cq_lock;\t\t/* queue lock\n+\t\t\t\t\t * idpf_lock is defined in OSdep.h\n+\t\t\t\t\t */\n+\t/* used for interrupt processing */\n+\tu16 next_to_use;\n+\tu16 next_to_clean;\n+\tu16 next_to_post;\t\t/* starting descriptor to post buffers\n+\t\t\t\t\t * to after recev\n+\t\t\t\t\t */\n+\n+\tstruct idpf_dma_mem desc_ring;\t/* descriptor ring memory\n+\t\t\t\t\t * idpf_dma_mem is defined in OSdep.h\n+\t\t\t\t\t */\n+\tunion {\n+\t\tstruct idpf_dma_mem **rx_buff;\n+\t\tstruct idpf_ctlq_msg **tx_msg;\n+\t} bi;\n+\n+\tu16 buf_size;\t\t\t/* queue buffer size */\n+\tu16 ring_size;\t\t\t/* Number of descriptors */\n+\tstruct idpf_ctlq_reg reg;\t/* registers accessed by ctlqs */\n+};\n+\n+/* PF/VF mailbox commands */\n+enum idpf_mbx_opc {\n+\t/* idpf_mbq_opc_send_msg_to_pf:\n+\t *\tusage: used by PF or VF to send a message to its CPF\n+\t *\ttarget: RX queue and function ID of parent PF taken from HW\n+\t */\n+\tidpf_mbq_opc_send_msg_to_pf\t\t= 0x0801,\n+\n+\t/* idpf_mbq_opc_send_msg_to_vf:\n+\t *\tusage: used by PF to send message to a VF\n+\t *\ttarget: VF control queue ID must be specified in descriptor\n+\t */\n+\tidpf_mbq_opc_send_msg_to_vf\t\t= 0x0802,\n+\n+\t/* idpf_mbq_opc_send_msg_to_peer_pf:\n+\t *\tusage: used by any function to send message to any peer PF\n+\t *\ttarget: RX queue and host of parent PF taken from HW\n+\t */\n+\tidpf_mbq_opc_send_msg_to_peer_pf\t= 0x0803,\n+\n+\t/* idpf_mbq_opc_send_msg_to_peer_drv:\n+\t *\tusage: used by any function to send message to any peer driver\n+\t *\ttarget: RX queue and target host must be specific in descriptor\n+\t */\n+\tidpf_mbq_opc_send_msg_to_peer_drv\t= 0x0804,\n+};\n+\n+/*\n+ * API supported for control queue management\n+ */\n+\n+/* Will init all required q including default mb.  \"q_info\" is an array of\n+ * create_info structs equal to the number of control queues to be created.\n+ */\n+__rte_internal\n+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,\n+\t\t   struct idpf_ctlq_create_info *q_info);\n+\n+/* Allocate and initialize a single control queue, which will be added to the\n+ * control queue list; returns a handle to the created control queue\n+ */\n+int idpf_ctlq_add(struct idpf_hw *hw,\n+\t\t  struct idpf_ctlq_create_info *qinfo,\n+\t\t  struct idpf_ctlq_info **cq);\n+\n+/* Deinitialize and deallocate a single control queue */\n+void idpf_ctlq_remove(struct idpf_hw *hw,\n+\t\t      struct idpf_ctlq_info *cq);\n+\n+/* Sends messages to HW and will also free the buffer*/\n+__rte_internal\n+int idpf_ctlq_send(struct idpf_hw *hw,\n+\t\t   struct idpf_ctlq_info *cq,\n+\t\t   u16 num_q_msg,\n+\t\t   struct idpf_ctlq_msg q_msg[]);\n+\n+/* Receives messages and called by interrupt handler/polling\n+ * initiated by app/process. Also caller is supposed to free the buffers\n+ */\n+__rte_internal\n+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n+\t\t   struct idpf_ctlq_msg *q_msg);\n+\n+/* Reclaims send descriptors on HW write back */\n+__rte_internal\n+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n+\t\t       struct idpf_ctlq_msg *msg_status[]);\n+\n+/* Indicate RX buffers are done being processed */\n+__rte_internal\n+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,\n+\t\t\t    struct idpf_ctlq_info *cq,\n+\t\t\t    u16 *buff_count,\n+\t\t\t    struct idpf_dma_mem **buffs);\n+\n+/* Will destroy all q including the default mb */\n+__rte_internal\n+int idpf_ctlq_deinit(struct idpf_hw *hw);\n+\n+#endif /* _IDPF_CONTROLQ_API_H_ */\ndiff --git a/drivers/common/idpf/base/idpf_controlq_setup.c b/drivers/common/idpf/base/idpf_controlq_setup.c\nnew file mode 100644\nindex 0000000000..00dfedb6a4\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_controlq_setup.c\n@@ -0,0 +1,179 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+\n+#include \"idpf_controlq.h\"\n+\n+\n+/**\n+ * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ */\n+static int\n+idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,\n+\t\t\t  struct idpf_ctlq_info *cq)\n+{\n+\tsize_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);\n+\n+\tcq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);\n+\tif (!cq->desc_ring.va)\n+\t\treturn IDPF_ERR_NO_MEMORY;\n+\n+\treturn IDPF_SUCCESS;\n+}\n+\n+/**\n+ * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Allocate the buffer head for all control queues, and if it's a receive\n+ * queue, allocate DMA buffers\n+ */\n+static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,\n+\t\t\t\tstruct idpf_ctlq_info *cq)\n+{\n+\tint i = 0;\n+\n+\t/* Do not allocate DMA buffers for transmit queues */\n+\tif (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)\n+\t\treturn IDPF_SUCCESS;\n+\n+\t/* We'll be allocating the buffer info memory first, then we can\n+\t * allocate the mapped buffers for the event processing\n+\t */\n+\tcq->bi.rx_buff = (struct idpf_dma_mem **)\n+\t\tidpf_calloc(hw, cq->ring_size,\n+\t\t\t    sizeof(struct idpf_dma_mem *));\n+\tif (!cq->bi.rx_buff)\n+\t\treturn IDPF_ERR_NO_MEMORY;\n+\n+\t/* allocate the mapped buffers (except for the last one) */\n+\tfor (i = 0; i < cq->ring_size - 1; i++) {\n+\t\tstruct idpf_dma_mem *bi;\n+\t\tint num = 1; /* number of idpf_dma_mem to be allocated */\n+\n+\t\tcq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc(hw, num,\n+\t\t\t\t\t\tsizeof(struct idpf_dma_mem));\n+\t\tif (!cq->bi.rx_buff[i])\n+\t\t\tgoto unwind_alloc_cq_bufs;\n+\n+\t\tbi = cq->bi.rx_buff[i];\n+\n+\t\tbi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);\n+\t\tif (!bi->va) {\n+\t\t\t/* unwind will not free the failed entry */\n+\t\t\tidpf_free(hw, cq->bi.rx_buff[i]);\n+\t\t\tgoto unwind_alloc_cq_bufs;\n+\t\t}\n+\t}\n+\n+\treturn IDPF_SUCCESS;\n+\n+unwind_alloc_cq_bufs:\n+\t/* don't try to free the one that failed... */\n+\ti--;\n+\tfor (; i >= 0; i--) {\n+\t\tidpf_free_dma_mem(hw, cq->bi.rx_buff[i]);\n+\t\tidpf_free(hw, cq->bi.rx_buff[i]);\n+\t}\n+\tidpf_free(hw, cq->bi.rx_buff);\n+\n+\treturn IDPF_ERR_NO_MEMORY;\n+}\n+\n+/**\n+ * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * This assumes the posted send buffers have already been cleaned\n+ * and de-allocated\n+ */\n+static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,\n+\t\t\t\t     struct idpf_ctlq_info *cq)\n+{\n+\tidpf_free_dma_mem(hw, &cq->desc_ring);\n+}\n+\n+/**\n+ * idpf_ctlq_free_bufs - Free CQ buffer info elements\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX\n+ * queues.  The upper layers are expected to manage freeing of TX DMA buffers\n+ */\n+static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n+{\n+\tvoid *bi;\n+\n+\tif (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {\n+\t\tint i;\n+\n+\t\t/* free DMA buffers for rx queues*/\n+\t\tfor (i = 0; i < cq->ring_size; i++) {\n+\t\t\tif (cq->bi.rx_buff[i]) {\n+\t\t\t\tidpf_free_dma_mem(hw, cq->bi.rx_buff[i]);\n+\t\t\t\tidpf_free(hw, cq->bi.rx_buff[i]);\n+\t\t\t}\n+\t\t}\n+\n+\t\tbi = (void *)cq->bi.rx_buff;\n+\t} else {\n+\t\tbi = (void *)cq->bi.tx_msg;\n+\t}\n+\n+\t/* free the buffer header */\n+\tidpf_free(hw, bi);\n+}\n+\n+/**\n+ * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Free the memory used by the ring, buffers and other related structures\n+ */\n+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n+{\n+\t/* free ring buffers and the ring itself */\n+\tidpf_ctlq_free_bufs(hw, cq);\n+\tidpf_ctlq_free_desc_ring(hw, cq);\n+}\n+\n+/**\n+ * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs\n+ * @hw: pointer to hw struct\n+ * @cq: pointer to control queue struct\n+ *\n+ * Do *NOT* hold the lock when calling this as the memory allocation routines\n+ * called are not going to be atomic context safe\n+ */\n+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n+{\n+\tint ret_code;\n+\n+\t/* verify input for valid configuration */\n+\tif (!cq->ring_size || !cq->buf_size)\n+\t\treturn IDPF_ERR_CFG;\n+\n+\t/* allocate the ring memory */\n+\tret_code = idpf_ctlq_alloc_desc_ring(hw, cq);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\n+\t/* allocate buffers in the rings */\n+\tret_code = idpf_ctlq_alloc_bufs(hw, cq);\n+\tif (ret_code)\n+\t\tgoto idpf_init_cq_free_ring;\n+\n+\t/* success! */\n+\treturn IDPF_SUCCESS;\n+\n+idpf_init_cq_free_ring:\n+\tidpf_free_dma_mem(hw, &cq->desc_ring);\n+\treturn ret_code;\n+}\ndiff --git a/drivers/common/idpf/base/idpf_devids.h b/drivers/common/idpf/base/idpf_devids.h\nnew file mode 100644\nindex 0000000000..a91eb4e02a\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_devids.h\n@@ -0,0 +1,18 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_DEVIDS_H_\n+#define _IDPF_DEVIDS_H_\n+\n+/* Vendor ID */\n+#define IDPF_INTEL_VENDOR_ID\t\t0x8086\n+\n+/* Device IDs */\n+#define IDPF_DEV_ID_PF\t\t\t0x1452\n+#define IDPF_DEV_ID_VF\t\t\t0x1889\n+\n+\n+\n+\n+#endif /* _IDPF_DEVIDS_H_ */\ndiff --git a/drivers/common/idpf/base/idpf_lan_pf_regs.h b/drivers/common/idpf/base/idpf_lan_pf_regs.h\nnew file mode 100644\nindex 0000000000..3df2347bd7\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_lan_pf_regs.h\n@@ -0,0 +1,134 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_LAN_PF_REGS_H_\n+#define _IDPF_LAN_PF_REGS_H_\n+\n+\n+/* Receive queues */\n+#define PF_QRX_BASE\t\t\t0x00000000\n+#define PF_QRX_TAIL(_QRX)\t\t(PF_QRX_BASE + (((_QRX) * 0x1000)))\n+#define PF_QRX_BUFFQ_BASE\t\t0x03000000\n+#define PF_QRX_BUFFQ_TAIL(_QRX)\t\t(PF_QRX_BUFFQ_BASE + (((_QRX) * 0x1000)))\n+\n+/* Transmit queues */\n+#define PF_QTX_BASE\t\t\t0x05000000\n+#define PF_QTX_COMM_DBELL(_DBQM)\t(PF_QTX_BASE + ((_DBQM) * 0x1000))\n+\n+\n+/* Control(PF Mailbox) Queue */\n+#define PF_FW_BASE\t\t\t0x08400000\n+\n+#define PF_FW_ARQBAL\t\t\t(PF_FW_BASE)\n+#define PF_FW_ARQBAH\t\t\t(PF_FW_BASE + 0x4)\n+#define PF_FW_ARQLEN\t\t\t(PF_FW_BASE + 0x8)\n+#define PF_FW_ARQLEN_ARQLEN_S\t\t0\n+#define PF_FW_ARQLEN_ARQLEN_M\t\tMAKEMASK(0x1FFF, PF_FW_ARQLEN_ARQLEN_S)\n+#define PF_FW_ARQLEN_ARQVFE_S\t\t28\n+#define PF_FW_ARQLEN_ARQVFE_M\t\tBIT(PF_FW_ARQLEN_ARQVFE_S)\n+#define PF_FW_ARQLEN_ARQOVFL_S\t\t29\n+#define PF_FW_ARQLEN_ARQOVFL_M\t\tBIT(PF_FW_ARQLEN_ARQOVFL_S)\n+#define PF_FW_ARQLEN_ARQCRIT_S\t\t30\n+#define PF_FW_ARQLEN_ARQCRIT_M\t\tBIT(PF_FW_ARQLEN_ARQCRIT_S)\n+#define PF_FW_ARQLEN_ARQENABLE_S\t31\n+#define PF_FW_ARQLEN_ARQENABLE_M\tBIT(PF_FW_ARQLEN_ARQENABLE_S)\n+#define PF_FW_ARQH\t\t\t(PF_FW_BASE + 0xC)\n+#define PF_FW_ARQH_ARQH_S\t\t0\n+#define PF_FW_ARQH_ARQH_M\t\tMAKEMASK(0x1FFF, PF_FW_ARQH_ARQH_S)\n+#define PF_FW_ARQT\t\t\t(PF_FW_BASE + 0x10)\n+\n+#define PF_FW_ATQBAL\t\t\t(PF_FW_BASE + 0x14)\n+#define PF_FW_ATQBAH\t\t\t(PF_FW_BASE + 0x18)\n+#define PF_FW_ATQLEN\t\t\t(PF_FW_BASE + 0x1C)\n+#define PF_FW_ATQLEN_ATQLEN_S\t\t0\n+#define PF_FW_ATQLEN_ATQLEN_M\t\tMAKEMASK(0x3FF, PF_FW_ATQLEN_ATQLEN_S)\n+#define PF_FW_ATQLEN_ATQVFE_S\t\t28\n+#define PF_FW_ATQLEN_ATQVFE_M\t\tBIT(PF_FW_ATQLEN_ATQVFE_S)\n+#define PF_FW_ATQLEN_ATQOVFL_S\t\t29\n+#define PF_FW_ATQLEN_ATQOVFL_M\t\tBIT(PF_FW_ATQLEN_ATQOVFL_S)\n+#define PF_FW_ATQLEN_ATQCRIT_S\t\t30\n+#define PF_FW_ATQLEN_ATQCRIT_M\t\tBIT(PF_FW_ATQLEN_ATQCRIT_S)\n+#define PF_FW_ATQLEN_ATQENABLE_S\t31\n+#define PF_FW_ATQLEN_ATQENABLE_M\tBIT(PF_FW_ATQLEN_ATQENABLE_S)\n+#define PF_FW_ATQH\t\t\t(PF_FW_BASE + 0x20)\n+#define PF_FW_ATQH_ATQH_S\t\t0\n+#define PF_FW_ATQH_ATQH_M\t\tMAKEMASK(0x3FF, PF_FW_ATQH_ATQH_S)\n+#define PF_FW_ATQT\t\t\t(PF_FW_BASE + 0x24)\n+\n+/* Interrupts */\n+#define PF_GLINT_BASE\t\t\t0x08900000\n+#define PF_GLINT_DYN_CTL(_INT)\t\t(PF_GLINT_BASE + ((_INT) * 0x1000))\n+#define PF_GLINT_DYN_CTL_INTENA_S\t0\n+#define PF_GLINT_DYN_CTL_INTENA_M\tBIT(PF_GLINT_DYN_CTL_INTENA_S)\n+#define PF_GLINT_DYN_CTL_CLEARPBA_S\t1\n+#define PF_GLINT_DYN_CTL_CLEARPBA_M\tBIT(PF_GLINT_DYN_CTL_CLEARPBA_S)\n+#define PF_GLINT_DYN_CTL_SWINT_TRIG_S\t2\n+#define PF_GLINT_DYN_CTL_SWINT_TRIG_M\tBIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)\n+#define PF_GLINT_DYN_CTL_ITR_INDX_S\t3\n+#define PF_GLINT_DYN_CTL_ITR_INDX_M\tMAKEMASK(0x3, PF_GLINT_DYN_CTL_ITR_INDX_S)\n+#define PF_GLINT_DYN_CTL_INTERVAL_S\t5\n+#define PF_GLINT_DYN_CTL_INTERVAL_M\tBIT(PF_GLINT_DYN_CTL_INTERVAL_S)\n+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S\t24\n+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S)\n+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S\t25\n+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M\tBIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S)\n+#define PF_GLINT_DYN_CTL_WB_ON_ITR_S\t30\n+#define PF_GLINT_DYN_CTL_WB_ON_ITR_M\tBIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S)\n+#define PF_GLINT_DYN_CTL_INTENA_MSK_S\t31\n+#define PF_GLINT_DYN_CTL_INTENA_MSK_M\tBIT(PF_GLINT_DYN_CTL_INTENA_MSK_S)\n+#define PF_GLINT_ITR_V2(_i, _reg_start) (((_i) * 4) + (_reg_start))\n+#define PF_GLINT_ITR(_i, _INT) (PF_GLINT_BASE + (((_i) + 1) * 4) + ((_INT) * 0x1000))\n+#define PF_GLINT_ITR_MAX_INDEX\t\t2\n+#define PF_GLINT_ITR_INTERVAL_S\t\t0\n+#define PF_GLINT_ITR_INTERVAL_M\t\tMAKEMASK(0xFFF, PF_GLINT_ITR_INTERVAL_S)\n+\n+/* Timesync registers */\n+#define PF_TIMESYNC_BASE\t\t0x08404000\n+#define PF_GLTSYN_CMD_SYNC\t\t(PF_TIMESYNC_BASE)\n+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_S\t0\n+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M\tMAKEMASK(0x3, PF_GLTSYN_CMD_SYNC_EXEC_CMD_S)\n+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_S\t2\n+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M\tBIT(PF_GLTSYN_CMD_SYNC_SHTIME_EN_S)\n+#define PF_GLTSYN_SHTIME_0\t\t(PF_TIMESYNC_BASE + 0x4)\n+#define PF_GLTSYN_SHTIME_L\t\t(PF_TIMESYNC_BASE + 0x8)\n+#define PF_GLTSYN_SHTIME_H\t\t(PF_TIMESYNC_BASE + 0xC)\n+#define PF_GLTSYN_ART_L\t\t\t(PF_TIMESYNC_BASE + 0x10)\n+#define PF_GLTSYN_ART_H\t\t\t(PF_TIMESYNC_BASE + 0x14)\n+\n+/* Generic registers */\n+#define PF_INT_DIR_OICR_ENA\t\t0x08406000\n+#define PF_INT_DIR_OICR_ENA_S\t\t0\n+#define PF_INT_DIR_OICR_ENA_M\tMAKEMASK(0xFFFFFFFF, PF_INT_DIR_OICR_ENA_S)\n+#define PF_INT_DIR_OICR\t\t\t0x08406004\n+#define PF_INT_DIR_OICR_TSYN_EVNT\t0\n+#define PF_INT_DIR_OICR_PHY_TS_0\tBIT(1)\n+#define PF_INT_DIR_OICR_PHY_TS_1\tBIT(2)\n+#define PF_INT_DIR_OICR_CAUSE\t\t0x08406008\n+#define PF_INT_DIR_OICR_CAUSE_CAUSE_S\t0\n+#define PF_INT_DIR_OICR_CAUSE_CAUSE_M\tMAKEMASK(0xFFFFFFFF, PF_INT_DIR_OICR_CAUSE_CAUSE_S)\n+#define PF_INT_PBA_CLEAR\t\t0x0840600C\n+\n+#define PF_FUNC_RID\t\t\t0x08406010\n+#define PF_FUNC_RID_FUNCTION_NUMBER_S\t0\n+#define PF_FUNC_RID_FUNCTION_NUMBER_M\tMAKEMASK(0x7, PF_FUNC_RID_FUNCTION_NUMBER_S)\n+#define PF_FUNC_RID_DEVICE_NUMBER_S\t3\n+#define PF_FUNC_RID_DEVICE_NUMBER_M\tMAKEMASK(0x1F, PF_FUNC_RID_DEVICE_NUMBER_S)\n+#define PF_FUNC_RID_BUS_NUMBER_S\t8\n+#define PF_FUNC_RID_BUS_NUMBER_M\tMAKEMASK(0xFF, PF_FUNC_RID_BUS_NUMBER_S)\n+\n+/* Reset registers */\n+#define PFGEN_RTRIG\t\t\t0x08407000\n+#define PFGEN_RTRIG_CORER_S\t\t0\n+#define PFGEN_RTRIG_CORER_M\t\tBIT(0)\n+#define PFGEN_RTRIG_LINKR_S\t\t1\n+#define PFGEN_RTRIG_LINKR_M\t\tBIT(1)\n+#define PFGEN_RTRIG_IMCR_S\t\t2\n+#define PFGEN_RTRIG_IMCR_M\t\tBIT(2)\n+#define PFGEN_RSTAT\t\t\t0x08407008 /* PFR Status */\n+#define PFGEN_RSTAT_PFR_STATE_S\t\t0\n+#define PFGEN_RSTAT_PFR_STATE_M\t\tMAKEMASK(0x3, PFGEN_RSTAT_PFR_STATE_S)\n+#define PFGEN_CTRL\t\t\t0x0840700C\n+#define PFGEN_CTRL_PFSWR\t\tBIT(0)\n+\n+#endif\ndiff --git a/drivers/common/idpf/base/idpf_lan_txrx.h b/drivers/common/idpf/base/idpf_lan_txrx.h\nnew file mode 100644\nindex 0000000000..98484b267c\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_lan_txrx.h\n@@ -0,0 +1,428 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_LAN_TXRX_H_\n+#define _IDPF_LAN_TXRX_H_\n+#ifndef __KERNEL__\n+#include \"idpf_osdep.h\"\n+#endif\n+\n+enum idpf_rss_hash {\n+\t/* Values 0 - 28 are reserved for future use */\n+\tIDPF_HASH_INVALID\t\t= 0,\n+\tIDPF_HASH_NONF_UNICAST_IPV4_UDP\t= 29,\n+\tIDPF_HASH_NONF_MULTICAST_IPV4_UDP,\n+\tIDPF_HASH_NONF_IPV4_UDP,\n+\tIDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK,\n+\tIDPF_HASH_NONF_IPV4_TCP,\n+\tIDPF_HASH_NONF_IPV4_SCTP,\n+\tIDPF_HASH_NONF_IPV4_OTHER,\n+\tIDPF_HASH_FRAG_IPV4,\n+\t/* Values 37-38 are reserved */\n+\tIDPF_HASH_NONF_UNICAST_IPV6_UDP\t= 39,\n+\tIDPF_HASH_NONF_MULTICAST_IPV6_UDP,\n+\tIDPF_HASH_NONF_IPV6_UDP,\n+\tIDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK,\n+\tIDPF_HASH_NONF_IPV6_TCP,\n+\tIDPF_HASH_NONF_IPV6_SCTP,\n+\tIDPF_HASH_NONF_IPV6_OTHER,\n+\tIDPF_HASH_FRAG_IPV6,\n+\tIDPF_HASH_NONF_RSVD47,\n+\tIDPF_HASH_NONF_FCOE_OX,\n+\tIDPF_HASH_NONF_FCOE_RX,\n+\tIDPF_HASH_NONF_FCOE_OTHER,\n+\t/* Values 51-62 are reserved */\n+\tIDPF_HASH_L2_PAYLOAD\t\t= 63,\n+\tIDPF_HASH_MAX\n+};\n+\n+/* Supported RSS offloads */\n+#define IDPF_DEFAULT_RSS_HASH ( \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \\\n+\tBIT_ULL(IDPF_HASH_FRAG_IPV4) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \\\n+\tBIT_ULL(IDPF_HASH_FRAG_IPV6) | \\\n+\tBIT_ULL(IDPF_HASH_L2_PAYLOAD))\n+\n+\t/* TODO: Wrap below comment under internal flag\n+\t * Below 6 pcktypes are not supported by FVL or older products\n+\t * They are supported by FPK and future products\n+\t */\n+#define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \\\n+\tBIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP))\n+\n+/* For idpf_splitq_base_tx_compl_desc */\n+#define IDPF_TXD_COMPLQ_GEN_S\t15\n+#define IDPF_TXD_COMPLQ_GEN_M\t\tBIT_ULL(IDPF_TXD_COMPLQ_GEN_S)\n+#define IDPF_TXD_COMPLQ_COMPL_TYPE_S\t11\n+#define IDPF_TXD_COMPLQ_COMPL_TYPE_M\t\\\n+\tMAKEMASK(0x7UL, IDPF_TXD_COMPLQ_COMPL_TYPE_S)\n+#define IDPF_TXD_COMPLQ_QID_S\t0\n+#define IDPF_TXD_COMPLQ_QID_M\t\tMAKEMASK(0x3FFUL, IDPF_TXD_COMPLQ_QID_S)\n+\n+/* For base mode TX descriptors */\n+\n+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S\t23\n+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M\tBIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S)\n+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S\t19\n+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M\t\\\n+\t(0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S)\n+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S\t12\n+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M\t\\\n+\t(0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S)\n+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S\t11\n+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M    \\\n+\tBIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S)\n+#define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST\t\\\n+\tIDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M\n+#define IDPF_TXD_CTX_QW0_TUNN_NATT_S\t        9\n+#define IDPF_TXD_CTX_QW0_TUNN_NATT_M\t(0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)\n+#define IDPF_TXD_CTX_UDP_TUNNELING\tBIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S)\n+#define IDPF_TXD_CTX_GRE_TUNNELING\t(0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)\n+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S\t2\n+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M\t\\\n+\t(0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S)\n+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S\t0\n+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M\t\\\n+\t(0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S)\n+\n+#define IDPF_TXD_CTX_QW1_MSS_S\t\t50\n+#define IDPF_TXD_CTX_QW1_MSS_M\t\t\\\n+\tMAKEMASK(0x3FFFULL, IDPF_TXD_CTX_QW1_MSS_S)\n+#define IDPF_TXD_CTX_QW1_TSO_LEN_S\t30\n+#define IDPF_TXD_CTX_QW1_TSO_LEN_M\t\\\n+\tMAKEMASK(0x3FFFFULL, IDPF_TXD_CTX_QW1_TSO_LEN_S)\n+#define IDPF_TXD_CTX_QW1_CMD_S\t\t4\n+#define IDPF_TXD_CTX_QW1_CMD_M\t\t\\\n+\tMAKEMASK(0xFFFUL, IDPF_TXD_CTX_QW1_CMD_S)\n+#define IDPF_TXD_CTX_QW1_DTYPE_S\t0\n+#define IDPF_TXD_CTX_QW1_DTYPE_M\t\\\n+\tMAKEMASK(0xFUL, IDPF_TXD_CTX_QW1_DTYPE_S)\n+#define IDPF_TXD_QW1_L2TAG1_S\t\t48\n+#define IDPF_TXD_QW1_L2TAG1_M\t\t\\\n+\tMAKEMASK(0xFFFFULL, IDPF_TXD_QW1_L2TAG1_S)\n+#define IDPF_TXD_QW1_TX_BUF_SZ_S\t34\n+#define IDPF_TXD_QW1_TX_BUF_SZ_M\t\\\n+\tMAKEMASK(0x3FFFULL, IDPF_TXD_QW1_TX_BUF_SZ_S)\n+#define IDPF_TXD_QW1_OFFSET_S\t\t16\n+#define IDPF_TXD_QW1_OFFSET_M\t\t\\\n+\tMAKEMASK(0x3FFFFULL, IDPF_TXD_QW1_OFFSET_S)\n+#define IDPF_TXD_QW1_CMD_S\t\t4\n+#define IDPF_TXD_QW1_CMD_M\t\tMAKEMASK(0xFFFUL, IDPF_TXD_QW1_CMD_S)\n+#define IDPF_TXD_QW1_DTYPE_S\t\t0\n+#define IDPF_TXD_QW1_DTYPE_M\t\tMAKEMASK(0xFUL, IDPF_TXD_QW1_DTYPE_S)\n+\n+/* TX Completion Descriptor Completion Types */\n+#define IDPF_TXD_COMPLT_ITR_FLUSH\t0\n+#define IDPF_TXD_COMPLT_RULE_MISS\t1\n+#define IDPF_TXD_COMPLT_RS\t\t2\n+#define IDPF_TXD_COMPLT_REINJECTED\t3\n+#define IDPF_TXD_COMPLT_RE\t\t4\n+#define IDPF_TXD_COMPLT_SW_MARKER\t5\n+\n+enum idpf_tx_desc_dtype_value {\n+\tIDPF_TX_DESC_DTYPE_DATA\t\t\t\t= 0,\n+\tIDPF_TX_DESC_DTYPE_CTX\t\t\t\t= 1,\n+\tIDPF_TX_DESC_DTYPE_REINJECT_CTX\t\t\t= 2,\n+\tIDPF_TX_DESC_DTYPE_FLEX_DATA\t\t\t= 3,\n+\tIDPF_TX_DESC_DTYPE_FLEX_CTX\t\t\t= 4,\n+\tIDPF_TX_DESC_DTYPE_FLEX_TSO_CTX\t\t\t= 5,\n+\tIDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1\t\t= 6,\n+\tIDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2\t\t= 7,\n+\tIDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX\t= 8,\n+\tIDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_TSO_CTX\t= 9,\n+\tIDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_CTX\t= 10,\n+\tIDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX\t\t= 11,\n+\tIDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE\t\t= 12,\n+\tIDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_TSO_CTX\t= 13,\n+\tIDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_CTX\t\t= 14,\n+\t/* DESC_DONE - HW has completed write-back of descriptor */\n+\tIDPF_TX_DESC_DTYPE_DESC_DONE\t\t\t= 15,\n+};\n+\n+enum idpf_tx_ctx_desc_cmd_bits {\n+\tIDPF_TX_CTX_DESC_TSO\t\t= 0x01,\n+\tIDPF_TX_CTX_DESC_TSYN\t\t= 0x02,\n+\tIDPF_TX_CTX_DESC_IL2TAG2\t= 0x04,\n+\tIDPF_TX_CTX_DESC_RSVD\t\t= 0x08,\n+\tIDPF_TX_CTX_DESC_SWTCH_NOTAG\t= 0x00,\n+\tIDPF_TX_CTX_DESC_SWTCH_UPLINK\t= 0x10,\n+\tIDPF_TX_CTX_DESC_SWTCH_LOCAL\t= 0x20,\n+\tIDPF_TX_CTX_DESC_SWTCH_VSI\t= 0x30,\n+\tIDPF_TX_CTX_DESC_FILT_AU_EN\t= 0x40,\n+\tIDPF_TX_CTX_DESC_FILT_AU_EVICT\t= 0x80,\n+\tIDPF_TX_CTX_DESC_RSVD1\t\t= 0xF00\n+};\n+\n+enum idpf_tx_desc_len_fields {\n+\t/* Note: These are predefined bit offsets */\n+\tIDPF_TX_DESC_LEN_MACLEN_S\t= 0, /* 7 BITS */\n+\tIDPF_TX_DESC_LEN_IPLEN_S\t= 7, /* 7 BITS */\n+\tIDPF_TX_DESC_LEN_L4_LEN_S\t= 14 /* 4 BITS */\n+};\n+\n+#define IDPF_TXD_QW1_MACLEN_M MAKEMASK(0x7FUL, IDPF_TX_DESC_LEN_MACLEN_S)\n+#define IDPF_TXD_QW1_IPLEN_M  MAKEMASK(0x7FUL, IDPF_TX_DESC_LEN_IPLEN_S)\n+#define IDPF_TXD_QW1_L4LEN_M  MAKEMASK(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S)\n+#define IDPF_TXD_QW1_FCLEN_M  MAKEMASK(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S)\n+\n+enum idpf_tx_base_desc_cmd_bits {\n+\tIDPF_TX_DESC_CMD_EOP\t\t\t= 0x0001,\n+\tIDPF_TX_DESC_CMD_RS\t\t\t= 0x0002,\n+\t /* only on VFs else RSVD */\n+\tIDPF_TX_DESC_CMD_ICRC\t\t\t= 0x0004,\n+\tIDPF_TX_DESC_CMD_IL2TAG1\t\t= 0x0008,\n+\tIDPF_TX_DESC_CMD_RSVD1\t\t\t= 0x0010,\n+\tIDPF_TX_DESC_CMD_IIPT_NONIP\t\t= 0x0000, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_IIPT_IPV6\t\t= 0x0020, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_IIPT_IPV4\t\t= 0x0040, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_IIPT_IPV4_CSUM\t\t= 0x0060, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_RSVD2\t\t\t= 0x0080,\n+\tIDPF_TX_DESC_CMD_L4T_EOFT_UNK\t\t= 0x0000, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_L4T_EOFT_TCP\t\t= 0x0100, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_L4T_EOFT_SCTP\t\t= 0x0200, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_L4T_EOFT_UDP\t\t= 0x0300, /* 2 BITS */\n+\tIDPF_TX_DESC_CMD_RSVD3\t\t\t= 0x0400,\n+\tIDPF_TX_DESC_CMD_RSVD4\t\t\t= 0x0800,\n+};\n+\n+/* Transmit descriptors  */\n+/* splitq tx buf, singleq tx buf and singleq compl desc */\n+struct idpf_base_tx_desc {\n+\t__le64 buf_addr; /* Address of descriptor's data buf */\n+\t__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */\n+};/* read used with buffer queues*/\n+\n+struct idpf_splitq_tx_compl_desc {\n+\t/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */\n+\t__le16 qid_comptype_gen;\n+\tunion {\n+\t\t__le16 q_head; /* Queue head */\n+\t\t__le16 compl_tag; /* Completion tag */\n+\t} q_head_compl_tag;\n+\tu32 rsvd;\n+\n+};/* writeback used with completion queues*/\n+\n+/* Context descriptors */\n+struct idpf_base_tx_ctx_desc {\n+\tstruct {\n+\t\t__le32 tunneling_params;\n+\t\t__le16 l2tag2;\n+\t\t__le16 rsvd1;\n+\t} qw0;\n+\t__le64 qw1; /* type_cmd_tlen_mss/rt_hint */\n+};\n+\n+/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */\n+enum idpf_tx_flex_desc_cmd_bits {\n+\tIDPF_TX_FLEX_DESC_CMD_EOP\t\t\t= 0x01,\n+\tIDPF_TX_FLEX_DESC_CMD_RS\t\t\t= 0x02,\n+\tIDPF_TX_FLEX_DESC_CMD_RE\t\t\t= 0x04,\n+\tIDPF_TX_FLEX_DESC_CMD_IL2TAG1\t\t\t= 0x08,\n+\tIDPF_TX_FLEX_DESC_CMD_DUMMY\t\t\t= 0x10,\n+\tIDPF_TX_FLEX_DESC_CMD_CS_EN\t\t\t= 0x20,\n+\tIDPF_TX_FLEX_DESC_CMD_FILT_AU_EN\t\t= 0x40,\n+\tIDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT\t\t= 0x80,\n+};\n+\n+struct idpf_flex_tx_desc {\n+\t__le64 buf_addr;\t/* Packet buffer address */\n+\tstruct {\n+\t\t__le16 cmd_dtype;\n+#define IDPF_FLEX_TXD_QW1_DTYPE_S\t\t0\n+#define IDPF_FLEX_TXD_QW1_DTYPE_M\t\t\\\n+\t\tMAKEMASK(0x1FUL, IDPF_FLEX_TXD_QW1_DTYPE_S)\n+#define IDPF_FLEX_TXD_QW1_CMD_S\t\t5\n+#define IDPF_FLEX_TXD_QW1_CMD_M\t\tMAKEMASK(0x7FFUL, IDPF_TXD_QW1_CMD_S)\n+\t\tunion {\n+\t\t\t/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */\n+\t\t\tu8 raw[4];\n+\n+\t\t\t/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */\n+\t\t\tstruct {\n+\t\t\t\t__le16 l2tag1;\n+\t\t\t\tu8 flex;\n+\t\t\t\tu8 tsync;\n+\t\t\t} tsync;\n+\n+\t\t\t/* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */\n+\t\t\tstruct {\n+\t\t\t\t__le16 l2tag1;\n+\t\t\t\t__le16 l2tag2;\n+\t\t\t} l2tags;\n+\t\t} flex;\n+\t\t__le16 buf_size;\n+\t} qw1;\n+};\n+\n+struct idpf_flex_tx_sched_desc {\n+\t__le64 buf_addr;\t/* Packet buffer address */\n+\n+\t/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */\n+\tstruct {\n+\t\tu8 cmd_dtype;\n+#define IDPF_TXD_FLEX_FLOW_DTYPE_M\t0x1F\n+#define IDPF_TXD_FLEX_FLOW_CMD_EOP\t0x20\n+#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN\t0x40\n+#define IDPF_TXD_FLEX_FLOW_CMD_RE\t0x80\n+\n+\t\tu8 rsvd[3];\n+\n+\t\t__le16 compl_tag;\n+\t\t__le16 rxr_bufsize;\n+#define IDPF_TXD_FLEX_FLOW_RXR\t\t0x4000\n+#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M\t0x3FFF\n+\t} qw1;\n+};\n+\n+/* Common cmd fields for all flex context descriptors\n+ * Note: these defines already account for the 5 bit dtype in the cmd_dtype\n+ * field\n+ */\n+enum idpf_tx_flex_ctx_desc_cmd_bits {\n+\tIDPF_TX_FLEX_CTX_DESC_CMD_TSO\t\t\t= 0x0020,\n+\tIDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN\t\t= 0x0040,\n+\tIDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2\t\t= 0x0080,\n+\tIDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK\t\t= 0x0200, /* 2 bits */\n+\tIDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL\t\t= 0x0400, /* 2 bits */\n+\tIDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI\t= 0x0600, /* 2 bits */\n+};\n+\n+/* Standard flex descriptor TSO context quad word */\n+struct idpf_flex_tx_tso_ctx_qw {\n+\t__le32 flex_tlen;\n+#define IDPF_TXD_FLEX_CTX_TLEN_M\t0x3FFFF\n+#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S\t24\n+\t__le16 mss_rt;\n+#define IDPF_TXD_FLEX_CTX_MSS_RT_M\t0x3FFF\n+\tu8 hdr_len;\n+\tu8 flex;\n+};\n+\n+union idpf_flex_tx_ctx_desc {\n+\t/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_CTX (0x04) */\n+\tstruct {\n+\t\tu8 qw0_flex[8];\n+\t\tstruct {\n+\t\t\t__le16 cmd_dtype;\n+\t\t\t__le16 l2tag1;\n+\t\t\tu8 qw1_flex[4];\n+\t\t} qw1;\n+\t} gen;\n+\n+\t/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */\n+\tstruct {\n+\t\tstruct idpf_flex_tx_tso_ctx_qw qw0;\n+\t\tstruct {\n+\t\t\t__le16 cmd_dtype;\n+\t\t\tu8 flex[6];\n+\t\t} qw1;\n+\t} tso;\n+\n+\t/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX (0x08) */\n+\tstruct {\n+\t\tstruct idpf_flex_tx_tso_ctx_qw qw0;\n+\t\tstruct {\n+\t\t\t__le16 cmd_dtype;\n+\t\t\t__le16 l2tag2;\n+\t\t\tu8 flex0;\n+\t\t\tu8 ptag;\n+\t\t\tu8 flex1[2];\n+\t\t} qw1;\n+\t} tso_l2tag2_ptag;\n+\n+\t/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX (0x0B) */\n+\tstruct {\n+\t\tu8 qw0_flex[8];\n+\t\tstruct {\n+\t\t\t__le16 cmd_dtype;\n+\t\t\t__le16 l2tag2;\n+\t\t\tu8 flex[4];\n+\t\t} qw1;\n+\t} l2tag2;\n+\n+\t/* DTYPE = IDPF_TX_DESC_DTYPE_REINJECT_CTX (0x02) */\n+\tstruct {\n+\t\tstruct {\n+\t\t\t__le32 sa_domain;\n+#define IDPF_TXD_FLEX_CTX_SA_DOM_M\t0xFFFF\n+#define IDPF_TXD_FLEX_CTX_SA_DOM_VAL\t0x10000\n+\t\t\t__le32 sa_idx;\n+#define IDPF_TXD_FLEX_CTX_SAIDX_M\t0x1FFFFF\n+\t\t} qw0;\n+\t\tstruct {\n+\t\t\t__le16 cmd_dtype;\n+\t\t\t__le16 txr2comp;\n+#define IDPF_TXD_FLEX_CTX_TXR2COMP\t0x1\n+\t\t\t__le16 miss_txq_comp_tag;\n+\t\t\t__le16 miss_txq_id;\n+\t\t} qw1;\n+\t} reinjection_pkt;\n+};\n+\n+/* Host Split Context Descriptors */\n+struct idpf_flex_tx_hs_ctx_desc {\n+\tunion {\n+\t\tstruct {\n+\t\t\t__le32 host_fnum_tlen;\n+#define IDPF_TXD_FLEX_CTX_TLEN_S\t0\n+/* see IDPF_TXD_FLEX_CTX_TLEN_M for mask definition */\n+#define IDPF_TXD_FLEX_CTX_FNUM_S\t18\n+#define IDPF_TXD_FLEX_CTX_FNUM_M\t0x7FF\n+#define IDPF_TXD_FLEX_CTX_HOST_S\t29\n+#define IDPF_TXD_FLEX_CTX_HOST_M\t0x7\n+\t\t\t__le16 ftype_mss_rt;\n+#define IDPF_TXD_FLEX_CTX_MSS_RT_0\t0\n+#define IDPF_TXD_FLEX_CTX_MSS_RT_M\t0x3FFF\n+#define IDPF_TXD_FLEX_CTX_FTYPE_S\t14\n+#define IDPF_TXD_FLEX_CTX_FTYPE_VF\tMAKEMASK(0x0, IDPF_TXD_FLEX_CTX_FTYPE_S)\n+#define IDPF_TXD_FLEX_CTX_FTYPE_VDEV\tMAKEMASK(0x1, IDPF_TXD_FLEX_CTX_FTYPE_S)\n+#define IDPF_TXD_FLEX_CTX_FTYPE_PF\tMAKEMASK(0x2, IDPF_TXD_FLEX_CTX_FTYPE_S)\n+\t\t\tu8 hdr_len;\n+\t\t\tu8 ptag;\n+\t\t} tso;\n+\t\tstruct {\n+\t\t\tu8 flex0[2];\n+\t\t\t__le16 host_fnum_ftype;\n+\t\t\tu8 flex1[3];\n+\t\t\tu8 ptag;\n+\t\t} no_tso;\n+\t} qw0;\n+\n+\t__le64 qw1_cmd_dtype;\n+#define IDPF_TXD_FLEX_CTX_QW1_PASID_S\t\t16\n+#define IDPF_TXD_FLEX_CTX_QW1_PASID_M\t\t0xFFFFF\n+#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID_S\t36\n+#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID\t\\\n+\t\tMAKEMASK(0x1, IDPF_TXD_FLEX_CTX_PASID_VALID_S)\n+#define IDPF_TXD_FLEX_CTX_QW1_TPH_S\t\t37\n+#define IDPF_TXD_FLEX_CTX_QW1_TPH \\\n+\t\tMAKEMASK(0x1, IDPF_TXD_FLEX_CTX_TPH_S)\n+#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_S\t\t38\n+#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_M\t\t0xF\n+/* The following are only valid for DTYPE = 0x09 and DTYPE = 0x0A */\n+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_S\t\t42\n+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_M\t\t0x1FFFFF\n+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S\t63\n+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VALID\t\\\n+\t\tMAKEMASK(0x1, IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S)\n+/* The following are only valid for DTYPE = 0x0D and DTYPE = 0x0E */\n+#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_S\t\t48\n+#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_M\t\t0xFF\n+#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_S\t\t56\n+#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_M\t\t0xFF\n+};\n+#endif /* _IDPF_LAN_TXRX_H_ */\ndiff --git a/drivers/common/idpf/base/idpf_lan_vf_regs.h b/drivers/common/idpf/base/idpf_lan_vf_regs.h\nnew file mode 100644\nindex 0000000000..9cd4f757d9\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_lan_vf_regs.h\n@@ -0,0 +1,114 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_LAN_VF_REGS_H_\n+#define _IDPF_LAN_VF_REGS_H_\n+\n+\n+/* Reset */\n+#define VFGEN_RSTAT\t\t\t0x00008800\n+#define VFGEN_RSTAT_VFR_STATE_S\t\t0\n+#define VFGEN_RSTAT_VFR_STATE_M\t\tMAKEMASK(0x3, VFGEN_RSTAT_VFR_STATE_S)\n+\n+/* Control(VF Mailbox) Queue */\n+#define VF_BASE\t\t\t\t0x00006000\n+\n+#define VF_ATQBAL\t\t\t(VF_BASE + 0x1C00)\n+#define VF_ATQBAH\t\t\t(VF_BASE + 0x1800)\n+#define VF_ATQLEN\t\t\t(VF_BASE + 0x0800)\n+#define VF_ATQLEN_ATQLEN_S\t\t0\n+#define VF_ATQLEN_ATQLEN_M\t\tMAKEMASK(0x3FF, VF_ATQLEN_ATQLEN_S)\n+#define VF_ATQLEN_ATQVFE_S\t\t28\n+#define VF_ATQLEN_ATQVFE_M\t\tBIT(VF_ATQLEN_ATQVFE_S)\n+#define VF_ATQLEN_ATQOVFL_S\t\t29\n+#define VF_ATQLEN_ATQOVFL_M\t\tBIT(VF_ATQLEN_ATQOVFL_S)\n+#define VF_ATQLEN_ATQCRIT_S\t\t30\n+#define VF_ATQLEN_ATQCRIT_M\t\tBIT(VF_ATQLEN_ATQCRIT_S)\n+#define VF_ATQLEN_ATQENABLE_S\t\t31\n+#define VF_ATQLEN_ATQENABLE_M\t\tBIT(VF_ATQLEN_ATQENABLE_S)\n+#define VF_ATQH\t\t\t\t(VF_BASE + 0x0400)\n+#define VF_ATQH_ATQH_S\t\t\t0\n+#define VF_ATQH_ATQH_M\t\t\tMAKEMASK(0x3FF, VF_ATQH_ATQH_S)\n+#define VF_ATQT\t\t\t\t(VF_BASE + 0x2400)\n+\n+#define VF_ARQBAL\t\t\t(VF_BASE + 0x0C00)\n+#define VF_ARQBAH\t\t\t(VF_BASE)\n+#define VF_ARQLEN\t\t\t(VF_BASE + 0x2000)\n+#define VF_ARQLEN_ARQLEN_S\t\t0\n+#define VF_ARQLEN_ARQLEN_M\t\tMAKEMASK(0x3FF, VF_ARQLEN_ARQLEN_S)\n+#define VF_ARQLEN_ARQVFE_S\t\t28\n+#define VF_ARQLEN_ARQVFE_M\t\tBIT(VF_ARQLEN_ARQVFE_S)\n+#define VF_ARQLEN_ARQOVFL_S\t\t29\n+#define VF_ARQLEN_ARQOVFL_M\t\tBIT(VF_ARQLEN_ARQOVFL_S)\n+#define VF_ARQLEN_ARQCRIT_S\t\t30\n+#define VF_ARQLEN_ARQCRIT_M\t\tBIT(VF_ARQLEN_ARQCRIT_S)\n+#define VF_ARQLEN_ARQENABLE_S\t\t31\n+#define VF_ARQLEN_ARQENABLE_M\t\tBIT(VF_ARQLEN_ARQENABLE_S)\n+#define VF_ARQH\t\t\t\t(VF_BASE + 0x1400)\n+#define VF_ARQH_ARQH_S\t\t\t0\n+#define VF_ARQH_ARQH_M\t\t\tMAKEMASK(0x1FFF, VF_ARQH_ARQH_S)\n+#define VF_ARQT\t\t\t\t(VF_BASE + 0x1000)\n+\n+/* Transmit queues */\n+#define VF_QTX_TAIL_BASE\t\t0x00000000\n+#define VF_QTX_TAIL(_QTX)\t\t(VF_QTX_TAIL_BASE + (_QTX) * 0x4)\n+#define VF_QTX_TAIL_EXT_BASE\t\t0x00040000\n+#define VF_QTX_TAIL_EXT(_QTX)\t\t(VF_QTX_TAIL_EXT_BASE + ((_QTX) * 4))\n+\n+/* Receive queues */\n+#define VF_QRX_TAIL_BASE\t\t0x00002000\n+#define VF_QRX_TAIL(_QRX)\t\t(VF_QRX_TAIL_BASE + ((_QRX) * 4))\n+#define VF_QRX_TAIL_EXT_BASE\t\t0x00050000\n+#define VF_QRX_TAIL_EXT(_QRX)\t\t(VF_QRX_TAIL_EXT_BASE + ((_QRX) * 4))\n+#define VF_QRXB_TAIL_BASE\t\t0x00060000\n+#define VF_QRXB_TAIL(_QRX)\t\t(VF_QRXB_TAIL_BASE + ((_QRX) * 4))\n+\n+/* Interrupts */\n+#define VF_INT_DYN_CTL0\t\t\t0x00005C00\n+#define VF_INT_DYN_CTL0_INTENA_S\t0\n+#define VF_INT_DYN_CTL0_INTENA_M\tBIT(VF_INT_DYN_CTL0_INTENA_S)\n+#define VF_INT_DYN_CTL0_ITR_INDX_S\t3\n+#define VF_INT_DYN_CTL0_ITR_INDX_M\tMAKEMASK(0x3, VF_INT_DYN_CTL0_ITR_INDX_S)\n+#define VF_INT_DYN_CTLN(_INT)\t\t(0x00003800 + ((_INT) * 4))\n+#define VF_INT_DYN_CTLN_EXT(_INT)\t(0x00070000 + ((_INT) * 4))\n+#define VF_INT_DYN_CTLN_INTENA_S\t0\n+#define VF_INT_DYN_CTLN_INTENA_M\tBIT(VF_INT_DYN_CTLN_INTENA_S)\n+#define VF_INT_DYN_CTLN_CLEARPBA_S\t1\n+#define VF_INT_DYN_CTLN_CLEARPBA_M\tBIT(VF_INT_DYN_CTLN_CLEARPBA_S)\n+#define VF_INT_DYN_CTLN_SWINT_TRIG_S\t2\n+#define VF_INT_DYN_CTLN_SWINT_TRIG_M\tBIT(VF_INT_DYN_CTLN_SWINT_TRIG_S)\n+#define VF_INT_DYN_CTLN_ITR_INDX_S\t3\n+#define VF_INT_DYN_CTLN_ITR_INDX_M\tMAKEMASK(0x3, VF_INT_DYN_CTLN_ITR_INDX_S)\n+#define VF_INT_DYN_CTLN_INTERVAL_S\t5\n+#define VF_INT_DYN_CTLN_INTERVAL_M\tBIT(VF_INT_DYN_CTLN_INTERVAL_S)\n+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S\t24\n+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M\tBIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S)\n+#define VF_INT_DYN_CTLN_SW_ITR_INDX_S\t25\n+#define VF_INT_DYN_CTLN_SW_ITR_INDX_M\tBIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S)\n+#define VF_INT_DYN_CTLN_WB_ON_ITR_S\t30\n+#define VF_INT_DYN_CTLN_WB_ON_ITR_M\tBIT(VF_INT_DYN_CTLN_WB_ON_ITR_S)\n+#define VF_INT_DYN_CTLN_INTENA_MSK_S\t31\n+#define VF_INT_DYN_CTLN_INTENA_MSK_M\tBIT(VF_INT_DYN_CTLN_INTENA_MSK_S)\n+#define VF_INT_ITR0(_i)\t\t\t(0x00004C00 + ((_i) * 4))\n+#define VF_INT_ITRN_V2(_i, _reg_start)\t((_reg_start) + (((_i)) * 4))\n+#define VF_INT_ITRN(_i, _INT)\t\t(0x00002800 + ((_i) * 4) + ((_INT) * 0x40))\n+#define VF_INT_ITRN_64(_i, _INT)\t(0x00002C00 + ((_i) * 4) + ((_INT) * 0x100))\n+#define VF_INT_ITRN_2K(_i, _INT)\t(0x00072000 + ((_i) * 4) + ((_INT) * 0x100))\n+#define VF_INT_ITRN_MAX_INDEX\t\t2\n+#define VF_INT_ITRN_INTERVAL_S\t\t0\n+#define VF_INT_ITRN_INTERVAL_M\t\tMAKEMASK(0xFFF, VF_INT_ITRN_INTERVAL_S)\n+#define VF_INT_PBA_CLEAR\t\t0x00008900\n+\n+#define VF_INT_ICR0_ENA1\t\t0x00005000\n+#define VF_INT_ICR0_ENA1_ADMINQ_S\t30\n+#define VF_INT_ICR0_ENA1_ADMINQ_M\tBIT(VF_INT_ICR0_ENA1_ADMINQ_S)\n+#define VF_INT_ICR0_ENA1_RSVD_S\t\t31\n+#define VF_INT_ICR01\t\t\t0x00004800\n+#define VF_QF_HENA(_i)\t\t\t(0x0000C400 + ((_i) * 4))\n+#define VF_QF_HENA_MAX_INDX\t\t1\n+#define VF_QF_HKEY(_i)\t\t\t(0x0000CC00 + ((_i) * 4))\n+#define VF_QF_HKEY_MAX_INDX\t\t12\n+#define VF_QF_HLUT(_i)\t\t\t(0x0000D000 + ((_i) * 4))\n+#define VF_QF_HLUT_MAX_INDX\t\t15\n+#endif\ndiff --git a/drivers/common/idpf/base/idpf_osdep.h b/drivers/common/idpf/base/idpf_osdep.h\nnew file mode 100644\nindex 0000000000..99ae9cf60a\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_osdep.h\n@@ -0,0 +1,364 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_OSDEP_H_\n+#define _IDPF_OSDEP_H_\n+\n+#include <string.h>\n+#include <stdint.h>\n+#include <stdio.h>\n+#include <stdarg.h>\n+#include <inttypes.h>\n+#include <sys/queue.h>\n+#include <stdbool.h>\n+\n+#include <rte_common.h>\n+#include <rte_memcpy.h>\n+#include <rte_malloc.h>\n+#include <rte_memzone.h>\n+#include <rte_byteorder.h>\n+#include <rte_cycles.h>\n+#include <rte_spinlock.h>\n+#include <rte_log.h>\n+#include <rte_random.h>\n+#include <rte_io.h>\n+\n+#define INLINE inline\n+#define STATIC static\n+\n+typedef uint8_t\t\tu8;\n+typedef int8_t\t\ts8;\n+typedef uint16_t\tu16;\n+typedef int16_t\t\ts16;\n+typedef uint32_t\tu32;\n+typedef int32_t\t\ts32;\n+typedef uint64_t\tu64;\n+typedef uint64_t\ts64;\n+\n+typedef struct idpf_lock idpf_lock;\n+\n+#define __iomem\n+#define hw_dbg(hw, S, A...)\tdo {} while (0)\n+#define upper_32_bits(n)\t((u32)(((n) >> 16) >> 16))\n+#define lower_32_bits(n)\t((u32)(n))\n+#define low_16_bits(x)\t\t((x) & 0xFFFF)\n+#define high_16_bits(x)\t\t(((x) & 0xFFFF0000) >> 16)\n+\n+#ifndef ETH_ADDR_LEN\n+#define ETH_ADDR_LEN\t\t6\n+#endif\n+\n+#ifndef __le16\n+#define __le16\tuint16_t\n+#endif\n+#ifndef __le32\n+#define __le32\tuint32_t\n+#endif\n+#ifndef __le64\n+#define __le64\tuint64_t\n+#endif\n+#ifndef __be16\n+#define __be16\tuint16_t\n+#endif\n+#ifndef __be32\n+#define __be32\tuint32_t\n+#endif\n+#ifndef __be64\n+#define __be64\tuint64_t\n+#endif\n+\n+#ifndef BIT_ULL\n+#define BIT_ULL(a) RTE_BIT64(a)\n+#endif\n+\n+#ifndef BIT\n+#define BIT(a) RTE_BIT32(a)\n+#endif\n+\n+#define FALSE\t0\n+#define TRUE\t1\n+#define false\t0\n+#define true\t1\n+\n+/* Avoid macro redefinition warning on Windows */\n+#ifdef RTE_EXEC_ENV_WINDOWS\n+#ifdef min\n+#undef min\n+#endif\n+#ifdef max\n+#undef max\n+#endif\n+#endif\n+\n+#define min(a, b) RTE_MIN(a, b)\n+#define max(a, b) RTE_MAX(a, b)\n+\n+#define ARRAY_SIZE(arr)  RTE_DIM(arr)\n+#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->(f)))\n+#define MAKEMASK(m, s) ((m) << (s))\n+\n+extern int idpf_common_logger;\n+\n+#define DEBUGOUT(S)\t\trte_log(RTE_LOG_DEBUG, idpf_common_logger, S)\n+#define DEBUGOUT2(S, A...)\trte_log(RTE_LOG_DEBUG, idpf_common_logger, S, ##A)\n+#define DEBUGFUNC(F)\t\tDEBUGOUT(F \"\\n\")\n+\n+#define idpf_debug(h, m, s, ...)\t\t\t\t\t\\\n+\tdo {\t\t\t\t\t\t\t\t\\\n+\t\tif (((m) & (h)->debug_mask))\t\t\t\t\\\n+\t\t\tPMD_DRV_LOG_RAW(DEBUG, \"idpf %02x.%x \" s,       \\\n+\t\t\t\t\t(h)->bus.device, (h)->bus.func,\t\\\n+\t\t\t\t\t##__VA_ARGS__);\t\t\t\\\n+\t} while (0)\n+\n+#define idpf_info(hw, fmt, args...) idpf_debug(hw, IDPF_DBG_ALL, fmt, ##args)\n+#define idpf_warn(hw, fmt, args...) idpf_debug(hw, IDPF_DBG_ALL, fmt, ##args)\n+#define idpf_debug_array(hw, type, rowsize, groupsize, buf, len)\t\\\n+\tdo {\t\t\t\t\t\t\t\t\\\n+\t\tstruct idpf_hw *hw_l = hw;\t\t\t\t\\\n+\t\tu16 len_l = len;\t\t\t\t\t\\\n+\t\tu8 *buf_l = buf;\t\t\t\t\t\\\n+\t\tint i;\t\t\t\t\t\t\t\\\n+\t\tfor (i = 0; i < len_l; i += 8)\t\t\t\t\\\n+\t\t\tidpf_debug(hw_l, type,\t\t\t\t\\\n+\t\t\t\t   \"0x%04X  0x%016\"PRIx64\"\\n\",\t\t\\\n+\t\t\t\t   i, *((u64 *)((buf_l) + i)));\t\t\\\n+\t} while (0)\n+#define idpf_snprintf snprintf\n+#ifndef SNPRINTF\n+#define SNPRINTF idpf_snprintf\n+#endif\n+\n+#define IDPF_PCI_REG(reg)     rte_read32(reg)\n+#define IDPF_PCI_REG_ADDR(a, reg)\t\t\t\t\\\n+\t((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))\n+#define IDPF_PCI_REG64(reg)     rte_read64(reg)\n+#define IDPF_PCI_REG_ADDR64(a, reg)\t\t\t\t\\\n+\t((volatile uint64_t *)((char *)(a)->hw_addr + (reg)))\n+\n+#define idpf_wmb() rte_io_wmb()\n+#define idpf_rmb() rte_io_rmb()\n+#define idpf_mb() rte_io_mb()\n+\n+static inline uint32_t idpf_read_addr(volatile void *addr)\n+{\n+\treturn rte_le_to_cpu_32(IDPF_PCI_REG(addr));\n+}\n+\n+static inline uint64_t idpf_read_addr64(volatile void *addr)\n+{\n+\treturn rte_le_to_cpu_64(IDPF_PCI_REG64(addr));\n+}\n+\n+#define IDPF_PCI_REG_WRITE(reg, value)\t\t\t\\\n+\trte_write32((rte_cpu_to_le_32(value)), reg)\n+\n+#define IDPF_PCI_REG_WRITE64(reg, value)\t\t\\\n+\trte_write64((rte_cpu_to_le_64(value)), reg)\n+\n+#define IDPF_READ_REG(hw, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((hw), (reg)))\n+#define IDPF_WRITE_REG(hw, reg, value)\t\t\t\t\t\\\n+\tIDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((hw), (reg)), (value))\n+\n+#define rd32(a, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((a), (reg)))\n+#define wr32(a, reg, value)\t\t\t\t\t\t\\\n+\tIDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((a), (reg)), (value))\n+#define div64_long(n, d) ((n) / (d))\n+#define rd64(a, reg) idpf_read_addr64(IDPF_PCI_REG_ADDR64((a), (reg)))\n+\n+#define BITS_PER_BYTE       8\n+\n+/* memory allocation tracking */\n+struct idpf_dma_mem {\n+\tvoid *va;\n+\tu64 pa;\n+\tu32 size;\n+\tconst void *zone;\n+} __rte_packed;\n+\n+struct idpf_virt_mem {\n+\tvoid *va;\n+\tu32 size;\n+} __rte_packed;\n+\n+#define idpf_malloc(h, s)\trte_zmalloc(NULL, s, 0)\n+#define idpf_calloc(h, c, s)\trte_zmalloc(NULL, (c) * (s), 0)\n+#define idpf_free(h, m)\t\trte_free(m)\n+\n+#define idpf_memset(a, b, c, d)\tmemset((a), (b), (c))\n+#define idpf_memcpy(a, b, c, d)\trte_memcpy((a), (b), (c))\n+#define idpf_memdup(a, b, c, d)\trte_memcpy(idpf_malloc(a, c), b, c)\n+\n+#define CPU_TO_BE16(o) rte_cpu_to_be_16(o)\n+#define CPU_TO_BE32(o) rte_cpu_to_be_32(o)\n+#define CPU_TO_BE64(o) rte_cpu_to_be_64(o)\n+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)\n+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)\n+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)\n+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)\n+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)\n+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)\n+\n+#define NTOHS(a) rte_be_to_cpu_16(a)\n+#define NTOHL(a) rte_be_to_cpu_32(a)\n+#define HTONS(a) rte_cpu_to_be_16(a)\n+#define HTONL(a) rte_cpu_to_be_32(a)\n+\n+/* SW spinlock */\n+struct idpf_lock {\n+\trte_spinlock_t spinlock;\n+};\n+\n+static inline void\n+idpf_init_lock(struct idpf_lock *sp)\n+{\n+\trte_spinlock_init(&sp->spinlock);\n+}\n+\n+static inline void\n+idpf_acquire_lock(struct idpf_lock *sp)\n+{\n+\trte_spinlock_lock(&sp->spinlock);\n+}\n+\n+static inline void\n+idpf_release_lock(struct idpf_lock *sp)\n+{\n+\trte_spinlock_unlock(&sp->spinlock);\n+}\n+\n+static inline void\n+idpf_destroy_lock(__rte_unused struct idpf_lock *sp)\n+{\n+}\n+\n+struct idpf_hw;\n+\n+static inline void *\n+idpf_alloc_dma_mem(__rte_unused struct idpf_hw *hw,\n+\t\t   struct idpf_dma_mem *mem, u64 size)\n+{\n+\tconst struct rte_memzone *mz = NULL;\n+\tchar z_name[RTE_MEMZONE_NAMESIZE];\n+\n+\tif (!mem)\n+\t\treturn NULL;\n+\n+\tsnprintf(z_name, sizeof(z_name), \"idpf_dma_%\"PRIu64, rte_rand());\n+\tmz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,\n+\t\t\t\t\t RTE_MEMZONE_IOVA_CONTIG, RTE_PGSIZE_4K);\n+\tif (!mz)\n+\t\treturn NULL;\n+\n+\tmem->size = size;\n+\tmem->va = mz->addr;\n+\tmem->pa = mz->iova;\n+\tmem->zone = (const void *)mz;\n+\tmemset(mem->va, 0, size);\n+\n+\treturn mem->va;\n+}\n+\n+static inline void\n+idpf_free_dma_mem(__rte_unused struct idpf_hw *hw,\n+\t\t  struct idpf_dma_mem *mem)\n+{\n+\trte_memzone_free((const struct rte_memzone *)mem->zone);\n+\tmem->size = 0;\n+\tmem->va = NULL;\n+\tmem->pa = 0;\n+}\n+\n+static inline u8\n+idpf_hweight8(u32 num)\n+{\n+\tu8 bits = 0;\n+\tu32 i;\n+\n+\tfor (i = 0; i < 8; i++) {\n+\t\tbits += (u8)(num & 0x1);\n+\t\tnum >>= 1;\n+\t}\n+\n+\treturn bits;\n+}\n+\n+static inline u8\n+idpf_hweight32(u32 num)\n+{\n+\tu8 bits = 0;\n+\tu32 i;\n+\n+\tfor (i = 0; i < 32; i++) {\n+\t\tbits += (u8)(num & 0x1);\n+\t\tnum >>= 1;\n+\t}\n+\n+\treturn bits;\n+}\n+\n+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))\n+#define DELAY(x) rte_delay_us(x)\n+#define idpf_usec_delay(x) rte_delay_us(x)\n+#define idpf_msec_delay(x, y) rte_delay_us(1000 * (x))\n+#define udelay(x) DELAY(x)\n+#define msleep(x) DELAY(1000 * (x))\n+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))\n+\n+#ifndef IDPF_DBG_TRACE\n+#define IDPF_DBG_TRACE\t  BIT_ULL(0)\n+#endif\n+\n+#ifndef DIVIDE_AND_ROUND_UP\n+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))\n+#endif\n+\n+#ifndef IDPF_INTEL_VENDOR_ID\n+#define IDPF_INTEL_VENDOR_ID\t    0x8086\n+#endif\n+\n+#ifndef IS_UNICAST_ETHER_ADDR\n+#define IS_UNICAST_ETHER_ADDR(addr)\t\t\t\\\n+\t((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0))\n+#endif\n+\n+#ifndef IS_MULTICAST_ETHER_ADDR\n+#define IS_MULTICAST_ETHER_ADDR(addr)\t\t\t\\\n+\t((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1))\n+#endif\n+\n+#ifndef IS_BROADCAST_ETHER_ADDR\n+/* Check whether an address is broadcast. */\n+#define IS_BROADCAST_ETHER_ADDR(addr)\t\t\t\\\n+\t((bool)((((u16 *)(addr))[0] == ((u16)0xffff))))\n+#endif\n+\n+#ifndef IS_ZERO_ETHER_ADDR\n+#define IS_ZERO_ETHER_ADDR(addr)\t\t\t\t\\\n+\t(((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) &&\t\\\n+\t ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) &&\t\\\n+\t ((bool)((((u16 *)(addr))[2] == ((u16)0x0)))))\n+#endif\n+\n+#ifndef LIST_HEAD_TYPE\n+#define LIST_HEAD_TYPE(list_name, type) LIST_HEAD(list_name, type)\n+#endif\n+\n+#ifndef LIST_ENTRY_TYPE\n+#define LIST_ENTRY_TYPE(type)\t   LIST_ENTRY(type)\n+#endif\n+\n+#ifndef LIST_FOR_EACH_ENTRY_SAFE\n+#define LIST_FOR_EACH_ENTRY_SAFE(pos, temp, head, entry_type, list)\t\\\n+\tLIST_FOREACH(pos, head, list)\n+\n+#endif\n+\n+#ifndef LIST_FOR_EACH_ENTRY\n+#define LIST_FOR_EACH_ENTRY(pos, head, entry_type, list)\t\t\\\n+\tLIST_FOREACH(pos, head, list)\n+\n+#endif\n+\n+#endif /* _IDPF_OSDEP_H_ */\ndiff --git a/drivers/common/idpf/base/idpf_prototype.h b/drivers/common/idpf/base/idpf_prototype.h\nnew file mode 100644\nindex 0000000000..529b62212d\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_prototype.h\n@@ -0,0 +1,45 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_PROTOTYPE_H_\n+#define _IDPF_PROTOTYPE_H_\n+\n+/* Include generic macros and types first */\n+#include \"idpf_osdep.h\"\n+#include \"idpf_controlq.h\"\n+#include \"idpf_type.h\"\n+#include \"idpf_alloc.h\"\n+#include \"idpf_devids.h\"\n+#include \"idpf_controlq_api.h\"\n+#include \"idpf_lan_pf_regs.h\"\n+#include \"idpf_lan_vf_regs.h\"\n+#include \"idpf_lan_txrx.h\"\n+#include \"virtchnl.h\"\n+\n+#define APF\n+\n+int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size);\n+int idpf_deinit_hw(struct idpf_hw *hw);\n+\n+int idpf_clean_arq_element(struct idpf_hw *hw,\n+\t\t\t   struct idpf_arq_event_info *e,\n+\t\t\t   u16 *events_pending);\n+bool idpf_asq_done(struct idpf_hw *hw);\n+bool idpf_check_asq_alive(struct idpf_hw *hw);\n+\n+int idpf_get_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,\n+\t\t     u8 *lut, u16 lut_size);\n+int idpf_set_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,\n+\t\t     u8 *lut, u16 lut_size);\n+int idpf_get_rss_key(struct idpf_hw *hw, u16 seid,\n+\t\t     struct idpf_get_set_rss_key_data *key);\n+int idpf_set_rss_key(struct idpf_hw *hw, u16 seid,\n+\t\t     struct idpf_get_set_rss_key_data *key);\n+\n+int idpf_set_mac_type(struct idpf_hw *hw);\n+\n+int idpf_reset(struct idpf_hw *hw);\n+int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode,\n+\t\t\tint v_retval, u8 *msg, u16 msglen);\n+#endif /* _IDPF_PROTOTYPE_H_ */\ndiff --git a/drivers/common/idpf/base/idpf_type.h b/drivers/common/idpf/base/idpf_type.h\nnew file mode 100644\nindex 0000000000..3b46536287\n--- /dev/null\n+++ b/drivers/common/idpf/base/idpf_type.h\n@@ -0,0 +1,106 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _IDPF_TYPE_H_\n+#define _IDPF_TYPE_H_\n+\n+#include \"idpf_controlq.h\"\n+\n+#define UNREFERENCED_XPARAMETER\n+#define UNREFERENCED_1PARAMETER(_p)\n+#define UNREFERENCED_2PARAMETER(_p, _q)\n+#define UNREFERENCED_3PARAMETER(_p, _q, _r)\n+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)\n+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)\n+\n+#define MAKEMASK(m, s)\t((m) << (s))\n+\n+struct idpf_eth_stats {\n+\tu64 rx_bytes;\t\t\t/* gorc */\n+\tu64 rx_unicast;\t\t\t/* uprc */\n+\tu64 rx_multicast;\t\t/* mprc */\n+\tu64 rx_broadcast;\t\t/* bprc */\n+\tu64 rx_discards;\t\t/* rdpc */\n+\tu64 rx_unknown_protocol;\t/* rupp */\n+\tu64 tx_bytes;\t\t\t/* gotc */\n+\tu64 tx_unicast;\t\t\t/* uptc */\n+\tu64 tx_multicast;\t\t/* mptc */\n+\tu64 tx_broadcast;\t\t/* bptc */\n+\tu64 tx_discards;\t\t/* tdpc */\n+\tu64 tx_errors;\t\t\t/* tepc */\n+};\n+\n+/* Statistics collected by the MAC */\n+struct idpf_hw_port_stats {\n+\t/* eth stats collected by the port */\n+\tstruct idpf_eth_stats eth;\n+\n+\t/* additional port specific stats */\n+\tu64 tx_dropped_link_down;\t/* tdold */\n+\tu64 crc_errors;\t\t\t/* crcerrs */\n+\tu64 illegal_bytes;\t\t/* illerrc */\n+\tu64 error_bytes;\t\t/* errbc */\n+\tu64 mac_local_faults;\t\t/* mlfc */\n+\tu64 mac_remote_faults;\t\t/* mrfc */\n+\tu64 rx_length_errors;\t\t/* rlec */\n+\tu64 link_xon_rx;\t\t/* lxonrxc */\n+\tu64 link_xoff_rx;\t\t/* lxoffrxc */\n+\tu64 priority_xon_rx[8];\t\t/* pxonrxc[8] */\n+\tu64 priority_xoff_rx[8];\t/* pxoffrxc[8] */\n+\tu64 link_xon_tx;\t\t/* lxontxc */\n+\tu64 link_xoff_tx;\t\t/* lxofftxc */\n+\tu64 priority_xon_tx[8];\t\t/* pxontxc[8] */\n+\tu64 priority_xoff_tx[8];\t/* pxofftxc[8] */\n+\tu64 priority_xon_2_xoff[8];\t/* pxon2offc[8] */\n+\tu64 rx_size_64;\t\t\t/* prc64 */\n+\tu64 rx_size_127;\t\t/* prc127 */\n+\tu64 rx_size_255;\t\t/* prc255 */\n+\tu64 rx_size_511;\t\t/* prc511 */\n+\tu64 rx_size_1023;\t\t/* prc1023 */\n+\tu64 rx_size_1522;\t\t/* prc1522 */\n+\tu64 rx_size_big;\t\t/* prc9522 */\n+\tu64 rx_undersize;\t\t/* ruc */\n+\tu64 rx_fragments;\t\t/* rfc */\n+\tu64 rx_oversize;\t\t/* roc */\n+\tu64 rx_jabber;\t\t\t/* rjc */\n+\tu64 tx_size_64;\t\t\t/* ptc64 */\n+\tu64 tx_size_127;\t\t/* ptc127 */\n+\tu64 tx_size_255;\t\t/* ptc255 */\n+\tu64 tx_size_511;\t\t/* ptc511 */\n+\tu64 tx_size_1023;\t\t/* ptc1023 */\n+\tu64 tx_size_1522;\t\t/* ptc1522 */\n+\tu64 tx_size_big;\t\t/* ptc9522 */\n+\tu64 mac_short_packet_dropped;\t/* mspdc */\n+\tu64 checksum_error;\t\t/* xec */\n+};\n+/* Static buffer size to initialize control queue */\n+struct idpf_ctlq_size {\n+\tu16 asq_buf_size;\n+\tu16 asq_ring_size;\n+\tu16 arq_buf_size;\n+\tu16 arq_ring_size;\n+};\n+\n+/* Temporary definition to compile - TBD if needed */\n+struct idpf_arq_event_info {\n+\tstruct idpf_ctlq_desc desc;\n+\tu16 msg_len;\n+\tu16 buf_len;\n+\tu8 *msg_buf;\n+};\n+\n+struct idpf_get_set_rss_key_data {\n+\tu8 standard_rss_key[0x28];\n+\tu8 extended_hash_key[0xc];\n+};\n+\n+struct idpf_aq_get_phy_abilities_resp {\n+\t__le32 phy_type;\n+};\n+\n+struct idpf_filter_program_desc {\n+\t__le32 qid;\n+};\n+\n+#endif /* _IDPF_TYPE_H_ */\ndiff --git a/drivers/common/idpf/base/meson.build b/drivers/common/idpf/base/meson.build\nnew file mode 100644\nindex 0000000000..ce2b5346e4\n--- /dev/null\n+++ b/drivers/common/idpf/base/meson.build\n@@ -0,0 +1,14 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2022 Intel Corporation\n+\n+sources = files(\n+        'idpf_common.c',\n+        'idpf_controlq.c',\n+        'idpf_controlq_setup.c',\n+)\n+\n+cflags += ['-Wno-unused-value']\n+cflags += ['-Wno-unused-variable']\n+cflags += ['-Wno-unused-parameter']\n+cflags += ['-Wno-implicit-fallthrough']\n+cflags += ['-Wno-strict-aliasing']\n\\ No newline at end of file\ndiff --git a/drivers/common/idpf/base/siov_regs.h b/drivers/common/idpf/base/siov_regs.h\nnew file mode 100644\nindex 0000000000..3ac4f8f177\n--- /dev/null\n+++ b/drivers/common/idpf/base/siov_regs.h\n@@ -0,0 +1,47 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+#ifndef _SIOV_REGS_H_\n+#define _SIOV_REGS_H_\n+#define VDEV_MBX_START\t\t\t0x20000 /* Begin at 128KB */\n+#define VDEV_MBX_ATQBAL\t\t\t(VDEV_MBX_START + 0x0000)\n+#define VDEV_MBX_ATQBAH\t\t\t(VDEV_MBX_START + 0x0004)\n+#define VDEV_MBX_ATQLEN\t\t\t(VDEV_MBX_START + 0x0008)\n+#define VDEV_MBX_ATQH\t\t\t(VDEV_MBX_START + 0x000C)\n+#define VDEV_MBX_ATQT\t\t\t(VDEV_MBX_START + 0x0010)\n+#define VDEV_MBX_ARQBAL\t\t\t(VDEV_MBX_START + 0x0014)\n+#define VDEV_MBX_ARQBAH\t\t\t(VDEV_MBX_START + 0x0018)\n+#define VDEV_MBX_ARQLEN\t\t\t(VDEV_MBX_START + 0x001C)\n+#define VDEV_MBX_ARQH\t\t\t(VDEV_MBX_START + 0x0020)\n+#define VDEV_MBX_ARQT\t\t\t(VDEV_MBX_START + 0x0024)\n+#define VDEV_GET_RSTAT\t\t\t0x21000 /* 132KB for RSTAT */\n+\n+/* Begin at offset after 1MB (after 256 4k pages) */\n+#define VDEV_QRX_TAIL_START\t\t0x100000\n+#define VDEV_QRX_TAIL(_i)\t\t(VDEV_QRX_TAIL_START + ((_i) * 0x1000)) /* 2k Rx queues */\n+\n+/* Begin at offset of 9MB for Rx buffer queue tail register pages */\n+#define VDEV_QRX_BUFQ_TAIL_START\t0x900000\n+/* 2k Rx buffer queues */\n+#define VDEV_QRX_BUFQ_TAIL(_i)\t\t(VDEV_QRX_BUFQ_TAIL_START + ((_i) * 0x1000))\n+\n+/* Begin at offset of 17MB for 2k Tx queues */\n+#define VDEV_QTX_TAIL_START\t\t0x1100000\n+#define VDEV_QTX_TAIL(_i)\t\t(VDEV_QTX_TAIL_START + ((_i) * 0x1000)) /* 2k Tx queues */\n+\n+/* Begin at offset of 25MB for 2k Tx completion queues */\n+#define VDEV_QTX_COMPL_TAIL_START\t0x1900000\n+/* 2k Tx completion queues */\n+#define VDEV_QTX_COMPL_TAIL(_i)\t\t(VDEV_QTX_COMPL_TAIL_START + ((_i) * 0x1000))\n+\n+#define VDEV_INT_DYN_CTL01\t\t0x2100000 /* Begin at offset 33MB */\n+\n+/* Begin at offset of 33MB + 4k to accommodate CTL01 register */\n+#define VDEV_INT_DYN_START\t\t(VDEV_INT_DYN_CTL01 + 0x1000)\n+#define VDEV_INT_DYN_CTL(_i)\t\t(VDEV_INT_DYN_START + ((_i) * 0x1000))\n+#define VDEV_INT_ITR_0(_i)\t\t(VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x04)\n+#define VDEV_INT_ITR_1(_i)\t\t(VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08)\n+#define VDEV_INT_ITR_2(_i)\t\t(VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C)\n+\n+/* Next offset to begin at 42MB (0x2A00000) */\n+#endif /* _SIOV_REGS_H_ */\ndiff --git a/drivers/common/idpf/base/virtchnl.h b/drivers/common/idpf/base/virtchnl.h\nnew file mode 100644\nindex 0000000000..ea798e3971\n--- /dev/null\n+++ b/drivers/common/idpf/base/virtchnl.h\n@@ -0,0 +1,2866 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _VIRTCHNL_H_\n+#define _VIRTCHNL_H_\n+\n+/* Description:\n+ * This header file describes the Virtual Function (VF) - Physical Function\n+ * (PF) communication protocol used by the drivers for all devices starting\n+ * from our 40G product line\n+ *\n+ * Admin queue buffer usage:\n+ * desc->opcode is always aqc_opc_send_msg_to_pf\n+ * flags, retval, datalen, and data addr are all used normally.\n+ * The Firmware copies the cookie fields when sending messages between the\n+ * PF and VF, but uses all other fields internally. Due to this limitation,\n+ * we must send all messages as \"indirect\", i.e. using an external buffer.\n+ *\n+ * All the VSI indexes are relative to the VF. Each VF can have maximum of\n+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can\n+ * have a maximum of sixteen queues for all of its VSIs.\n+ *\n+ * The PF is required to return a status code in v_retval for all messages\n+ * except RESET_VF, which does not require any response. The returned value\n+ * is of virtchnl_status_code type, defined here.\n+ *\n+ * In general, VF driver initialization should roughly follow the order of\n+ * these opcodes. The VF driver must first validate the API version of the\n+ * PF driver, then request a reset, then get resources, then configure\n+ * queues and interrupts. After these operations are complete, the VF\n+ * driver may start its queues, optionally add MAC and VLAN filters, and\n+ * process traffic.\n+ */\n+\n+/* START GENERIC DEFINES\n+ * Need to ensure the following enums and defines hold the same meaning and\n+ * value in current and future projects\n+ */\n+\n+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS\t6\n+\n+/* These macros are used to generate compilation errors if a structure/union\n+ * is not exactly the correct length. It gives a divide by zero error if the\n+ * structure/union is not of the correct size, otherwise it creates an enum\n+ * that is never used.\n+ */\n+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \\\n+\t{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }\n+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \\\n+\t{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }\n+\n+\n+/* Error Codes\n+ * Note that many older versions of various iAVF drivers convert the reported\n+ * status code directly into an iavf_status enumeration. For this reason, it\n+ * is important that the values of these enumerations line up.\n+ */\n+enum virtchnl_status_code {\n+\tVIRTCHNL_STATUS_SUCCESS\t\t\t\t= 0,\n+\tVIRTCHNL_STATUS_ERR_PARAM\t\t\t= -5,\n+\tVIRTCHNL_STATUS_ERR_NO_MEMORY\t\t\t= -18,\n+\tVIRTCHNL_STATUS_ERR_OPCODE_MISMATCH\t\t= -38,\n+\tVIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR\t\t= -39,\n+\tVIRTCHNL_STATUS_ERR_INVALID_VF_ID\t\t= -40,\n+\tVIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR\t\t= -53,\n+\tVIRTCHNL_STATUS_ERR_NOT_SUPPORTED\t\t= -64,\n+};\n+\n+/* Backward compatibility */\n+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM\n+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED\n+\n+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT\t\t0x0\n+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT\t\t0x1\n+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT\t0x2\n+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT\t\t0x3\n+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT\t\t0x4\n+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT\t\t0x5\n+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT\t\t0x6\n+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT\t\t0x7\n+\n+enum virtchnl_link_speed {\n+\tVIRTCHNL_LINK_SPEED_UNKNOWN\t= 0,\n+\tVIRTCHNL_LINK_SPEED_100MB\t= BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),\n+\tVIRTCHNL_LINK_SPEED_1GB\t\t= BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),\n+\tVIRTCHNL_LINK_SPEED_10GB\t= BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),\n+\tVIRTCHNL_LINK_SPEED_40GB\t= BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),\n+\tVIRTCHNL_LINK_SPEED_20GB\t= BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),\n+\tVIRTCHNL_LINK_SPEED_25GB\t= BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),\n+\tVIRTCHNL_LINK_SPEED_2_5GB\t= BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),\n+\tVIRTCHNL_LINK_SPEED_5GB\t\t= BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),\n+};\n+\n+/* for hsplit_0 field of Rx HMC context */\n+/* deprecated with AVF 1.0 */\n+enum virtchnl_rx_hsplit {\n+\tVIRTCHNL_RX_HSPLIT_NO_SPLIT      = 0,\n+\tVIRTCHNL_RX_HSPLIT_SPLIT_L2      = 1,\n+\tVIRTCHNL_RX_HSPLIT_SPLIT_IP      = 2,\n+\tVIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,\n+\tVIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,\n+};\n+\n+enum virtchnl_bw_limit_type {\n+\tVIRTCHNL_BW_SHAPER = 0,\n+};\n+/* END GENERIC DEFINES */\n+\n+/* Opcodes for VF-PF communication. These are placed in the v_opcode field\n+ * of the virtchnl_msg structure.\n+ */\n+enum virtchnl_ops {\n+/* The PF sends status change events to VFs using\n+ * the VIRTCHNL_OP_EVENT opcode.\n+ * VFs send requests to the PF using the other ops.\n+ * Use of \"advanced opcode\" features must be negotiated as part of capabilities\n+ * exchange and are not considered part of base mode feature set.\n+ *\n+ */\n+\tVIRTCHNL_OP_UNKNOWN = 0,\n+\tVIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */\n+\tVIRTCHNL_OP_RESET_VF = 2,\n+\tVIRTCHNL_OP_GET_VF_RESOURCES = 3,\n+\tVIRTCHNL_OP_CONFIG_TX_QUEUE = 4,\n+\tVIRTCHNL_OP_CONFIG_RX_QUEUE = 5,\n+\tVIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,\n+\tVIRTCHNL_OP_CONFIG_IRQ_MAP = 7,\n+\tVIRTCHNL_OP_ENABLE_QUEUES = 8,\n+\tVIRTCHNL_OP_DISABLE_QUEUES = 9,\n+\tVIRTCHNL_OP_ADD_ETH_ADDR = 10,\n+\tVIRTCHNL_OP_DEL_ETH_ADDR = 11,\n+\tVIRTCHNL_OP_ADD_VLAN = 12,\n+\tVIRTCHNL_OP_DEL_VLAN = 13,\n+\tVIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,\n+\tVIRTCHNL_OP_GET_STATS = 15,\n+\tVIRTCHNL_OP_RSVD = 16,\n+\tVIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */\n+\t/* opcode 19 is reserved */\n+\t/* opcodes 20, 21, and 22 are reserved */\n+\tVIRTCHNL_OP_CONFIG_RSS_KEY = 23,\n+\tVIRTCHNL_OP_CONFIG_RSS_LUT = 24,\n+\tVIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,\n+\tVIRTCHNL_OP_SET_RSS_HENA = 26,\n+\tVIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,\n+\tVIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,\n+\tVIRTCHNL_OP_REQUEST_QUEUES = 29,\n+\tVIRTCHNL_OP_ENABLE_CHANNELS = 30,\n+\tVIRTCHNL_OP_DISABLE_CHANNELS = 31,\n+\tVIRTCHNL_OP_ADD_CLOUD_FILTER = 32,\n+\tVIRTCHNL_OP_DEL_CLOUD_FILTER = 33,\n+\t/* opcode 34 is reserved */\n+\t/* opcodes 38, 39, 40, 41, 42 and 43 are reserved */\n+\t/* opcode 44 is reserved */\n+\tVIRTCHNL_OP_ADD_RSS_CFG = 45,\n+\tVIRTCHNL_OP_DEL_RSS_CFG = 46,\n+\tVIRTCHNL_OP_ADD_FDIR_FILTER = 47,\n+\tVIRTCHNL_OP_DEL_FDIR_FILTER = 48,\n+\tVIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,\n+\tVIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,\n+\tVIRTCHNL_OP_ADD_VLAN_V2 = 52,\n+\tVIRTCHNL_OP_DEL_VLAN_V2 = 53,\n+\tVIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,\n+\tVIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,\n+\tVIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,\n+\tVIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,\n+\tVIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,\n+\tVIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,\n+\tVIRTCHNL_OP_1588_PTP_GET_CAPS = 60,\n+\tVIRTCHNL_OP_1588_PTP_GET_TIME = 61,\n+\tVIRTCHNL_OP_1588_PTP_SET_TIME = 62,\n+\tVIRTCHNL_OP_1588_PTP_ADJ_TIME = 63,\n+\tVIRTCHNL_OP_1588_PTP_ADJ_FREQ = 64,\n+\tVIRTCHNL_OP_1588_PTP_TX_TIMESTAMP = 65,\n+\tVIRTCHNL_OP_GET_QOS_CAPS = 66,\n+\tVIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,\n+\tVIRTCHNL_OP_1588_PTP_GET_PIN_CFGS = 68,\n+\tVIRTCHNL_OP_1588_PTP_SET_PIN_CFG = 69,\n+\tVIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP = 70,\n+\tVIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,\n+\tVIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,\n+\tVIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,\n+\tVIRTCHNL_OP_CONFIG_QUEUE_BW = 112,\n+\tVIRTCHNL_OP_CONFIG_QUANTA = 113,\n+\tVIRTCHNL_OP_MAX,\n+};\n+\n+static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)\n+{\n+\tswitch (v_opcode) {\n+\tcase VIRTCHNL_OP_UNKNOWN:\n+\t\treturn \"VIRTCHNL_OP_UNKNOWN\";\n+\tcase VIRTCHNL_OP_VERSION:\n+\t\treturn \"VIRTCHNL_OP_VERSION\";\n+\tcase VIRTCHNL_OP_RESET_VF:\n+\t\treturn \"VIRTCHNL_OP_RESET_VF\";\n+\tcase VIRTCHNL_OP_GET_VF_RESOURCES:\n+\t\treturn \"VIRTCHNL_OP_GET_VF_RESOURCES\";\n+\tcase VIRTCHNL_OP_CONFIG_TX_QUEUE:\n+\t\treturn \"VIRTCHNL_OP_CONFIG_TX_QUEUE\";\n+\tcase VIRTCHNL_OP_CONFIG_RX_QUEUE:\n+\t\treturn \"VIRTCHNL_OP_CONFIG_RX_QUEUE\";\n+\tcase VIRTCHNL_OP_CONFIG_VSI_QUEUES:\n+\t\treturn \"VIRTCHNL_OP_CONFIG_VSI_QUEUES\";\n+\tcase VIRTCHNL_OP_CONFIG_IRQ_MAP:\n+\t\treturn \"VIRTCHNL_OP_CONFIG_IRQ_MAP\";\n+\tcase VIRTCHNL_OP_ENABLE_QUEUES:\n+\t\treturn \"VIRTCHNL_OP_ENABLE_QUEUES\";\n+\tcase VIRTCHNL_OP_DISABLE_QUEUES:\n+\t\treturn \"VIRTCHNL_OP_DISABLE_QUEUES\";\n+\tcase VIRTCHNL_OP_ADD_ETH_ADDR:\n+\t\treturn \"VIRTCHNL_OP_ADD_ETH_ADDR\";\n+\tcase VIRTCHNL_OP_DEL_ETH_ADDR:\n+\t\treturn \"VIRTCHNL_OP_DEL_ETH_ADDR\";\n+\tcase VIRTCHNL_OP_ADD_VLAN:\n+\t\treturn \"VIRTCHNL_OP_ADD_VLAN\";\n+\tcase VIRTCHNL_OP_DEL_VLAN:\n+\t\treturn \"VIRTCHNL_OP_DEL_VLAN\";\n+\tcase VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:\n+\t\treturn \"VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE\";\n+\tcase VIRTCHNL_OP_GET_STATS:\n+\t\treturn \"VIRTCHNL_OP_GET_STATS\";\n+\tcase VIRTCHNL_OP_RSVD:\n+\t\treturn \"VIRTCHNL_OP_RSVD\";\n+\tcase VIRTCHNL_OP_EVENT:\n+\t\treturn \"VIRTCHNL_OP_EVENT\";\n+\tcase VIRTCHNL_OP_CONFIG_RSS_KEY:\n+\t\treturn \"VIRTCHNL_OP_CONFIG_RSS_KEY\";\n+\tcase VIRTCHNL_OP_CONFIG_RSS_LUT:\n+\t\treturn \"VIRTCHNL_OP_CONFIG_RSS_LUT\";\n+\tcase VIRTCHNL_OP_GET_RSS_HENA_CAPS:\n+\t\treturn \"VIRTCHNL_OP_GET_RSS_HENA_CAPS\";\n+\tcase VIRTCHNL_OP_SET_RSS_HENA:\n+\t\treturn \"VIRTCHNL_OP_SET_RSS_HENA\";\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:\n+\t\treturn \"VIRTCHNL_OP_ENABLE_VLAN_STRIPPING\";\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:\n+\t\treturn \"VIRTCHNL_OP_DISABLE_VLAN_STRIPPING\";\n+\tcase VIRTCHNL_OP_REQUEST_QUEUES:\n+\t\treturn \"VIRTCHNL_OP_REQUEST_QUEUES\";\n+\tcase VIRTCHNL_OP_ENABLE_CHANNELS:\n+\t\treturn \"VIRTCHNL_OP_ENABLE_CHANNELS\";\n+\tcase VIRTCHNL_OP_DISABLE_CHANNELS:\n+\t\treturn \"VIRTCHNL_OP_DISABLE_CHANNELS\";\n+\tcase VIRTCHNL_OP_ADD_CLOUD_FILTER:\n+\t\treturn \"VIRTCHNL_OP_ADD_CLOUD_FILTER\";\n+\tcase VIRTCHNL_OP_DEL_CLOUD_FILTER:\n+\t\treturn \"VIRTCHNL_OP_DEL_CLOUD_FILTER\";\n+\tcase VIRTCHNL_OP_ADD_RSS_CFG:\n+\t\treturn \"VIRTCHNL_OP_ADD_RSS_CFG\";\n+\tcase VIRTCHNL_OP_DEL_RSS_CFG:\n+\t\treturn \"VIRTCHNL_OP_DEL_RSS_CFG\";\n+\tcase VIRTCHNL_OP_ADD_FDIR_FILTER:\n+\t\treturn \"VIRTCHNL_OP_ADD_FDIR_FILTER\";\n+\tcase VIRTCHNL_OP_DEL_FDIR_FILTER:\n+\t\treturn \"VIRTCHNL_OP_DEL_FDIR_FILTER\";\n+\tcase VIRTCHNL_OP_GET_MAX_RSS_QREGION:\n+\t\treturn \"VIRTCHNL_OP_GET_MAX_RSS_QREGION\";\n+\tcase VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:\n+\t\treturn \"VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS\";\n+\tcase VIRTCHNL_OP_ADD_VLAN_V2:\n+\t\treturn \"VIRTCHNL_OP_ADD_VLAN_V2\";\n+\tcase VIRTCHNL_OP_DEL_VLAN_V2:\n+\t\treturn \"VIRTCHNL_OP_DEL_VLAN_V2\";\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:\n+\t\treturn \"VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2\";\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:\n+\t\treturn \"VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2\";\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:\n+\t\treturn \"VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2\";\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:\n+\t\treturn \"VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2\";\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:\n+\t\treturn \"VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2\";\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:\n+\t\treturn \"VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2\";\n+\tcase VIRTCHNL_OP_1588_PTP_GET_CAPS:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_GET_CAPS\";\n+\tcase VIRTCHNL_OP_1588_PTP_GET_TIME:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_GET_TIME\";\n+\tcase VIRTCHNL_OP_1588_PTP_SET_TIME:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_SET_TIME\";\n+\tcase VIRTCHNL_OP_1588_PTP_ADJ_TIME:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_ADJ_TIME\";\n+\tcase VIRTCHNL_OP_1588_PTP_ADJ_FREQ:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_ADJ_FREQ\";\n+\tcase VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP\";\n+\tcase VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS\";\n+\tcase VIRTCHNL_OP_1588_PTP_SET_PIN_CFG:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_SET_PIN_CFG\";\n+\tcase VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP:\n+\t\treturn \"VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP\";\n+\tcase VIRTCHNL_OP_ENABLE_QUEUES_V2:\n+\t\treturn \"VIRTCHNL_OP_ENABLE_QUEUES_V2\";\n+\tcase VIRTCHNL_OP_DISABLE_QUEUES_V2:\n+\t\treturn \"VIRTCHNL_OP_DISABLE_QUEUES_V2\";\n+\tcase VIRTCHNL_OP_MAP_QUEUE_VECTOR:\n+\t\treturn \"VIRTCHNL_OP_MAP_QUEUE_VECTOR\";\n+\tcase VIRTCHNL_OP_MAX:\n+\t\treturn \"VIRTCHNL_OP_MAX\";\n+\tdefault:\n+\t\treturn \"Unsupported (update virtchnl.h)\";\n+\t}\n+}\n+\n+static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status)\n+{\n+\tswitch (v_status) {\n+\tcase VIRTCHNL_STATUS_SUCCESS:\n+\t\treturn \"VIRTCHNL_STATUS_SUCCESS\";\n+\tcase VIRTCHNL_STATUS_ERR_PARAM:\n+\t\treturn \"VIRTCHNL_STATUS_ERR_PARAM\";\n+\tcase VIRTCHNL_STATUS_ERR_NO_MEMORY:\n+\t\treturn \"VIRTCHNL_STATUS_ERR_NO_MEMORY\";\n+\tcase VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:\n+\t\treturn \"VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH\";\n+\tcase VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:\n+\t\treturn \"VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR\";\n+\tcase VIRTCHNL_STATUS_ERR_INVALID_VF_ID:\n+\t\treturn \"VIRTCHNL_STATUS_ERR_INVALID_VF_ID\";\n+\tcase VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:\n+\t\treturn \"VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR\";\n+\tcase VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:\n+\t\treturn \"VIRTCHNL_STATUS_ERR_NOT_SUPPORTED\";\n+\tdefault:\n+\t\treturn \"Unknown status code (update virtchnl.h)\";\n+\t}\n+}\n+\n+/* Virtual channel message descriptor. This overlays the admin queue\n+ * descriptor. All other data is passed in external buffers.\n+ */\n+\n+struct virtchnl_msg {\n+\tu8 pad[8];\t\t\t /* AQ flags/opcode/len/retval fields */\n+\n+\t/* avoid confusion with desc->opcode */\n+\tenum virtchnl_ops v_opcode;\n+\n+\t/* ditto for desc->retval */\n+\tenum virtchnl_status_code v_retval;\n+\tu32 vfid;\t\t\t /* used by PF when sending to VF */\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);\n+\n+/* Message descriptions and data structures. */\n+\n+/* VIRTCHNL_OP_VERSION\n+ * VF posts its version number to the PF. PF responds with its version number\n+ * in the same format, along with a return code.\n+ * Reply from PF has its major/minor versions also in param0 and param1.\n+ * If there is a major version mismatch, then the VF cannot operate.\n+ * If there is a minor version mismatch, then the VF can operate but should\n+ * add a warning to the system log.\n+ *\n+ * This enum element MUST always be specified as == 1, regardless of other\n+ * changes in the API. The PF must always respond to this message without\n+ * error regardless of version mismatch.\n+ */\n+#define VIRTCHNL_VERSION_MAJOR\t\t1\n+#define VIRTCHNL_VERSION_MINOR\t\t1\n+#define VIRTCHNL_VERSION_MAJOR_2\t2\n+#define VIRTCHNL_VERSION_MINOR_0\t0\n+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS\t0\n+\n+struct virtchnl_version_info {\n+\tu32 major;\n+\tu32 minor;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);\n+\n+#define VF_IS_V10(_ver) (((_ver)->major == 1) && ((_ver)->minor == 0))\n+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))\n+#define VF_IS_V20(_ver) (((_ver)->major == 2) && ((_ver)->minor == 0))\n+\n+/* VIRTCHNL_OP_RESET_VF\n+ * VF sends this request to PF with no parameters\n+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register\n+ * until reset completion is indicated. The admin queue must be reinitialized\n+ * after this operation.\n+ *\n+ * When reset is complete, PF must ensure that all queues in all VSIs associated\n+ * with the VF are stopped, all queue configurations in the HMC are set to 0,\n+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs\n+ * are cleared.\n+ */\n+\n+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV\n+ * vsi_type should always be 6 for backward compatibility. Add other fields\n+ * as needed.\n+ */\n+enum virtchnl_vsi_type {\n+\tVIRTCHNL_VSI_TYPE_INVALID = 0,\n+\tVIRTCHNL_VSI_SRIOV = 6,\n+};\n+\n+/* VIRTCHNL_OP_GET_VF_RESOURCES\n+ * Version 1.0 VF sends this request to PF with no parameters\n+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities\n+ * PF responds with an indirect message containing\n+ * virtchnl_vf_resource and one or more\n+ * virtchnl_vsi_resource structures.\n+ */\n+\n+struct virtchnl_vsi_resource {\n+\tu16 vsi_id;\n+\tu16 num_queue_pairs;\n+\n+\t/* see enum virtchnl_vsi_type */\n+\ts32 vsi_type;\n+\tu16 qset_handle;\n+\tu8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);\n+\n+/* VF capability flags\n+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including\n+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.\n+ */\n+#define VIRTCHNL_VF_OFFLOAD_L2\t\t\tBIT(0)\n+#define VIRTCHNL_VF_OFFLOAD_IWARP\t\tBIT(1)\n+#define VIRTCHNL_VF_CAP_RDMA\t\t\tVIRTCHNL_VF_OFFLOAD_IWARP\n+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ\t\tBIT(3)\n+#define VIRTCHNL_VF_OFFLOAD_RSS_REG\t\tBIT(4)\n+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR\t\tBIT(5)\n+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES\t\tBIT(6)\n+/* used to negotiate communicating link speeds in Mbps */\n+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED\t\tBIT(7)\n+\t/* BIT(8) is reserved */\n+#define VIRTCHNL_VF_LARGE_NUM_QPAIRS\t\tBIT(9)\n+#define VIRTCHNL_VF_OFFLOAD_CRC\t\t\tBIT(10)\n+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2\t\tBIT(15)\n+#define VIRTCHNL_VF_OFFLOAD_VLAN\t\tBIT(16)\n+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING\t\tBIT(17)\n+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2\tBIT(18)\n+#define VIRTCHNL_VF_OFFLOAD_RSS_PF\t\tBIT(19)\n+#define VIRTCHNL_VF_OFFLOAD_ENCAP\t\tBIT(20)\n+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM\t\tBIT(21)\n+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM\tBIT(22)\n+#define VIRTCHNL_VF_OFFLOAD_ADQ\t\t\tBIT(23)\n+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2\t\tBIT(24)\n+#define VIRTCHNL_VF_OFFLOAD_USO\t\t\tBIT(25)\n+\t/* BIT(26) is reserved */\n+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF\t\tBIT(27)\n+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF\t\tBIT(28)\n+#define VIRTCHNL_VF_OFFLOAD_QOS\t\t\tBIT(29)\n+\t/* BIT(30) is reserved */\n+#define VIRTCHNL_VF_CAP_PTP\t\t\tBIT(31)\n+\n+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \\\n+\t\t\t       VIRTCHNL_VF_OFFLOAD_VLAN | \\\n+\t\t\t       VIRTCHNL_VF_OFFLOAD_RSS_PF)\n+\n+struct virtchnl_vf_resource {\n+\tu16 num_vsis;\n+\tu16 num_queue_pairs;\n+\tu16 max_vectors;\n+\tu16 max_mtu;\n+\n+\tu32 vf_cap_flags;\n+\tu32 rss_key_size;\n+\tu32 rss_lut_size;\n+\n+\tstruct virtchnl_vsi_resource vsi_res[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);\n+\n+/* VIRTCHNL_OP_CONFIG_TX_QUEUE\n+ * VF sends this message to set up parameters for one TX queue.\n+ * External data buffer contains one instance of virtchnl_txq_info.\n+ * PF configures requested queue and returns a status code.\n+ */\n+\n+/* Tx queue config info */\n+struct virtchnl_txq_info {\n+\tu16 vsi_id;\n+\tu16 queue_id;\n+\tu16 ring_len;\t\t/* number of descriptors, multiple of 8 */\n+\tu16 headwb_enabled; /* deprecated with AVF 1.0 */\n+\tu64 dma_ring_addr;\n+\tu64 dma_headwb_addr; /* deprecated with AVF 1.0 */\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);\n+\n+/* RX descriptor IDs (range from 0 to 63) */\n+enum virtchnl_rx_desc_ids {\n+\tVIRTCHNL_RXDID_0_16B_BASE\t\t= 0,\n+\tVIRTCHNL_RXDID_1_32B_BASE\t\t= 1,\n+\tVIRTCHNL_RXDID_2_FLEX_SQ_NIC\t\t= 2,\n+\tVIRTCHNL_RXDID_3_FLEX_SQ_SW\t\t= 3,\n+\tVIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB\t= 4,\n+\tVIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL\t= 5,\n+\tVIRTCHNL_RXDID_6_FLEX_SQ_NIC_2\t\t= 6,\n+\tVIRTCHNL_RXDID_7_HW_RSVD\t\t= 7,\n+\t/* 8 through 15 are reserved */\n+\tVIRTCHNL_RXDID_16_COMMS_GENERIC\t\t= 16,\n+\tVIRTCHNL_RXDID_17_COMMS_AUX_VLAN\t= 17,\n+\tVIRTCHNL_RXDID_18_COMMS_AUX_IPV4\t= 18,\n+\tVIRTCHNL_RXDID_19_COMMS_AUX_IPV6\t= 19,\n+\tVIRTCHNL_RXDID_20_COMMS_AUX_FLOW\t= 20,\n+\tVIRTCHNL_RXDID_21_COMMS_AUX_TCP\t\t= 21,\n+\t/* 22 through 63 are reserved */\n+};\n+\n+/* RX descriptor ID bitmasks */\n+enum virtchnl_rx_desc_id_bitmasks {\n+\tVIRTCHNL_RXDID_0_16B_BASE_M\t\t= BIT(VIRTCHNL_RXDID_0_16B_BASE),\n+\tVIRTCHNL_RXDID_1_32B_BASE_M\t\t= BIT(VIRTCHNL_RXDID_1_32B_BASE),\n+\tVIRTCHNL_RXDID_2_FLEX_SQ_NIC_M\t\t= BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),\n+\tVIRTCHNL_RXDID_3_FLEX_SQ_SW_M\t\t= BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),\n+\tVIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M\t= BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),\n+\tVIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M\t= BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL),\n+\tVIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M\t= BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2),\n+\tVIRTCHNL_RXDID_7_HW_RSVD_M\t\t= BIT(VIRTCHNL_RXDID_7_HW_RSVD),\n+\t/* 9 through 15 are reserved */\n+\tVIRTCHNL_RXDID_16_COMMS_GENERIC_M\t= BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC),\n+\tVIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M\t= BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN),\n+\tVIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M\t= BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4),\n+\tVIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M\t= BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6),\n+\tVIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M\t= BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW),\n+\tVIRTCHNL_RXDID_21_COMMS_AUX_TCP_M\t= BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP),\n+\t/* 22 through 63 are reserved */\n+};\n+\n+/* virtchnl_rxq_info_flags\n+ *\n+ * Definition of bits in the flags field of the virtchnl_rxq_info structure.\n+ */\n+enum virtchnl_rxq_info_flags {\n+\t/* If the VIRTCHNL_PTP_RX_TSTAMP bit of the flag field is set, this is\n+\t * a request to enable Rx timestamp. Other flag bits are currently\n+\t * reserved and they may be extended in the future.\n+\t */\n+\tVIRTCHNL_PTP_RX_TSTAMP = BIT(0),\n+};\n+\n+/* VIRTCHNL_OP_CONFIG_RX_QUEUE\n+ * VF sends this message to set up parameters for one RX queue.\n+ * External data buffer contains one instance of virtchnl_rxq_info.\n+ * PF configures requested queue and returns a status code. The\n+ * crc_disable flag disables CRC stripping on the VF. Setting\n+ * the crc_disable flag to 1 will disable CRC stripping for each\n+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC\n+ * offload must have been set prior to sending this info or the PF\n+ * will ignore the request. This flag should be set the same for\n+ * all of the queues for a VF.\n+ */\n+\n+/* Rx queue config info */\n+struct virtchnl_rxq_info {\n+\tu16 vsi_id;\n+\tu16 queue_id;\n+\tu32 ring_len;\t\t/* number of descriptors, multiple of 32 */\n+\tu16 hdr_size;\n+\tu16 splithdr_enabled; /* deprecated with AVF 1.0 */\n+\tu32 databuffer_size;\n+\tu32 max_pkt_size;\n+\tu8 crc_disable;\n+\tu8 pad1[3];\n+\tu64 dma_ring_addr;\n+\n+\t/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */\n+\ts32 rx_split_pos;\n+\tu32 pad2;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);\n+\n+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES\n+ * VF sends this message to set parameters for active TX and RX queues\n+ * associated with the specified VSI.\n+ * PF configures queues and returns status.\n+ * If the number of queues specified is greater than the number of queues\n+ * associated with the VSI, an error is returned and no queues are configured.\n+ * NOTE: The VF is not required to configure all queues in a single request.\n+ * It may send multiple messages. PF drivers must correctly handle all VF\n+ * requests.\n+ */\n+struct virtchnl_queue_pair_info {\n+\t/* NOTE: vsi_id and queue_id should be identical for both queues. */\n+\tstruct virtchnl_txq_info txq;\n+\tstruct virtchnl_rxq_info rxq;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);\n+\n+struct virtchnl_vsi_queue_config_info {\n+\tu16 vsi_id;\n+\tu16 num_queue_pairs;\n+\tu32 pad;\n+\tstruct virtchnl_queue_pair_info qpair[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);\n+\n+/* VIRTCHNL_OP_REQUEST_QUEUES\n+ * VF sends this message to request the PF to allocate additional queues to\n+ * this VF.  Each VF gets a guaranteed number of queues on init but asking for\n+ * additional queues must be negotiated.  This is a best effort request as it\n+ * is possible the PF does not have enough queues left to support the request.\n+ * If the PF cannot support the number requested it will respond with the\n+ * maximum number it is able to support.  If the request is successful, PF will\n+ * then reset the VF to institute required changes.\n+ */\n+\n+/* VF resource request */\n+struct virtchnl_vf_res_request {\n+\tu16 num_queue_pairs;\n+};\n+\n+/* VIRTCHNL_OP_CONFIG_IRQ_MAP\n+ * VF uses this message to map vectors to queues.\n+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues\n+ * are to be associated with the specified vector.\n+ * The \"other\" causes are always mapped to vector 0. The VF may not request\n+ * that vector 0 be used for traffic.\n+ * PF configures interrupt mapping and returns status.\n+ * NOTE: due to hardware requirements, all active queues (both TX and RX)\n+ * should be mapped to interrupts, even if the driver intends to operate\n+ * only in polling mode. In this case the interrupt may be disabled, but\n+ * the ITR timer will still run to trigger writebacks.\n+ */\n+struct virtchnl_vector_map {\n+\tu16 vsi_id;\n+\tu16 vector_id;\n+\tu16 rxq_map;\n+\tu16 txq_map;\n+\tu16 rxitr_idx;\n+\tu16 txitr_idx;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);\n+\n+struct virtchnl_irq_map_info {\n+\tu16 num_vectors;\n+\tstruct virtchnl_vector_map vecmap[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);\n+\n+/* VIRTCHNL_OP_ENABLE_QUEUES\n+ * VIRTCHNL_OP_DISABLE_QUEUES\n+ * VF sends these message to enable or disable TX/RX queue pairs.\n+ * The queues fields are bitmaps indicating which queues to act upon.\n+ * (Currently, we only support 16 queues per VF, but we make the field\n+ * u32 to allow for expansion.)\n+ * PF performs requested action and returns status.\n+ * NOTE: The VF is not required to enable/disable all queues in a single\n+ * request. It may send multiple messages.\n+ * PF drivers must correctly handle all VF requests.\n+ */\n+struct virtchnl_queue_select {\n+\tu16 vsi_id;\n+\tu16 pad;\n+\tu32 rx_queues;\n+\tu32 tx_queues;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);\n+\n+/* VIRTCHNL_OP_GET_MAX_RSS_QREGION\n+ *\n+ * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES\n+ * then this op must be supported.\n+ *\n+ * VF sends this message in order to query the max RSS queue region\n+ * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled.\n+ * This information should be used when configuring the RSS LUT and/or\n+ * configuring queue region based filters.\n+ *\n+ * The maximum RSS queue region is 2^qregion_width. So, a qregion_width\n+ * of 6 would inform the VF that the PF supports a maximum RSS queue region\n+ * of 64.\n+ *\n+ * A queue region represents a range of queues that can be used to configure\n+ * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue\n+ * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able\n+ * to configure the RSS LUT with queue indices from 0 to 15. However, other\n+ * filters can be used to direct packets to queues >15 via specifying a queue\n+ * base/offset and queue region width.\n+ */\n+struct virtchnl_max_rss_qregion {\n+\tu16 vport_id;\n+\tu16 qregion_width;\n+\tu8 pad[4];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion);\n+\n+/* VIRTCHNL_OP_ADD_ETH_ADDR\n+ * VF sends this message in order to add one or more unicast or multicast\n+ * address filters for the specified VSI.\n+ * PF adds the filters and returns status.\n+ */\n+\n+/* VIRTCHNL_OP_DEL_ETH_ADDR\n+ * VF sends this message in order to remove one or more unicast or multicast\n+ * filters for the specified VSI.\n+ * PF removes the filters and returns status.\n+ */\n+\n+/* VIRTCHNL_ETHER_ADDR_LEGACY\n+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad\n+ * bytes. Moving forward all VF drivers should not set type to\n+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy\n+ * behavior. The control plane function (i.e. PF) can use a best effort method\n+ * of tracking the primary/device unicast in this case, but there is no\n+ * guarantee and functionality depends on the implementation of the PF.\n+ */\n+\n+/* VIRTCHNL_ETHER_ADDR_PRIMARY\n+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the\n+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and\n+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane\n+ * function (i.e. PF) to accurately track and use this MAC address for\n+ * displaying on the host and for VM/function reset.\n+ */\n+\n+/* VIRTCHNL_ETHER_ADDR_EXTRA\n+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra\n+ * unicast and/or multicast filters that are being added/deleted via\n+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.\n+ */\n+struct virtchnl_ether_addr {\n+\tu8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\n+\tu8 type;\n+#define VIRTCHNL_ETHER_ADDR_LEGACY\t0\n+#define VIRTCHNL_ETHER_ADDR_PRIMARY\t1\n+#define VIRTCHNL_ETHER_ADDR_EXTRA\t2\n+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK\t3 /* first two bits of type are valid */\n+\tu8 pad;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);\n+\n+struct virtchnl_ether_addr_list {\n+\tu16 vsi_id;\n+\tu16 num_elements;\n+\tstruct virtchnl_ether_addr list[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);\n+\n+/* VIRTCHNL_OP_ADD_VLAN\n+ * VF sends this message to add one or more VLAN tag filters for receives.\n+ * PF adds the filters and returns status.\n+ * If a port VLAN is configured by the PF, this operation will return an\n+ * error to the VF.\n+ */\n+\n+/* VIRTCHNL_OP_DEL_VLAN\n+ * VF sends this message to remove one or more VLAN tag filters for receives.\n+ * PF removes the filters and returns status.\n+ * If a port VLAN is configured by the PF, this operation will return an\n+ * error to the VF.\n+ */\n+\n+struct virtchnl_vlan_filter_list {\n+\tu16 vsi_id;\n+\tu16 num_elements;\n+\tu16 vlan_id[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);\n+\n+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related\n+ * structures and opcodes.\n+ *\n+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver\n+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.\n+ *\n+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.\n+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.\n+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.\n+ *\n+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported\n+ * by the PF concurrently. For example, if the PF can support\n+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it\n+ * would OR the following bits:\n+ *\n+ *\tVIRTHCNL_VLAN_ETHERTYPE_8100 |\n+ *\tVIRTCHNL_VLAN_ETHERTYPE_88A8 |\n+ *\tVIRTCHNL_VLAN_ETHERTYPE_AND;\n+ *\n+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100\n+ * and 0x88A8 VLAN ethertypes.\n+ *\n+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported\n+ * by the PF concurrently. For example if the PF can support\n+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping\n+ * offload it would OR the following bits:\n+ *\n+ *\tVIRTCHNL_VLAN_ETHERTYPE_8100 |\n+ *\tVIRTCHNL_VLAN_ETHERTYPE_88A8 |\n+ *\tVIRTCHNL_VLAN_ETHERTYPE_XOR;\n+ *\n+ * The VF would interpret this as VLAN stripping can be supported on either\n+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via\n+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override\n+ * the previously set value.\n+ *\n+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or\n+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.\n+ *\n+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware\n+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.\n+ *\n+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware\n+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.\n+ *\n+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for\n+ * VLAN filtering if the underlying PF supports it.\n+ *\n+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a\n+ * certain VLAN capability can be toggled. For example if the underlying PF/CP\n+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should\n+ * set this bit along with the supported ethertypes.\n+ */\n+enum virtchnl_vlan_support {\n+\tVIRTCHNL_VLAN_UNSUPPORTED =\t\t0,\n+\tVIRTCHNL_VLAN_ETHERTYPE_8100 =\t\t0x00000001,\n+\tVIRTCHNL_VLAN_ETHERTYPE_88A8 =\t\t0x00000002,\n+\tVIRTCHNL_VLAN_ETHERTYPE_9100 =\t\t0x00000004,\n+\tVIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 =\t0x00000100,\n+\tVIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 =\t0x00000200,\n+\tVIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 =\t0x00000400,\n+\tVIRTCHNL_VLAN_PRIO =\t\t\t0x01000000,\n+\tVIRTCHNL_VLAN_FILTER_MASK =\t\t0x10000000,\n+\tVIRTCHNL_VLAN_ETHERTYPE_AND =\t\t0x20000000,\n+\tVIRTCHNL_VLAN_ETHERTYPE_XOR =\t\t0x40000000,\n+\tVIRTCHNL_VLAN_TOGGLE =\t\t\t0x80000000\n+};\n+\n+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS\n+ * for filtering, insertion, and stripping capabilities.\n+ *\n+ * If only outer capabilities are supported (for filtering, insertion, and/or\n+ * stripping) then this refers to the outer most or single VLAN from the VF's\n+ * perspective.\n+ *\n+ * If only inner capabilities are supported (for filtering, insertion, and/or\n+ * stripping) then this refers to the outer most or single VLAN from the VF's\n+ * perspective. Functionally this is the same as if only outer capabilities are\n+ * supported. The VF driver is just forced to use the inner fields when\n+ * adding/deleting filters and enabling/disabling offloads (if supported).\n+ *\n+ * If both outer and inner capabilities are supported (for filtering, insertion,\n+ * and/or stripping) then outer refers to the outer most or single VLAN and\n+ * inner refers to the second VLAN, if it exists, in the packet.\n+ *\n+ * There is no support for tunneled VLAN offloads, so outer or inner are never\n+ * referring to a tunneled packet from the VF's perspective.\n+ */\n+struct virtchnl_vlan_supported_caps {\n+\tu32 outer;\n+\tu32 inner;\n+};\n+\n+/* The PF populates these fields based on the supported VLAN filtering. If a\n+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will\n+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using\n+ * the unsupported fields.\n+ *\n+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the\n+ * VIRTCHNL_VLAN_TOGGLE bit is set.\n+ *\n+ * The ethertype(s) specified in the ethertype_init field are the ethertypes\n+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer\n+ * most VLAN from the VF's perspective. If both inner and outer filtering are\n+ * allowed then ethertype_init only refers to the outer most VLAN as only\n+ * VLAN ethertype supported for inner VLAN filtering is\n+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled\n+ * when both inner and outer filtering are allowed.\n+ *\n+ * The max_filters field tells the VF how many VLAN filters it's allowed to have\n+ * at any one time. If it exceeds this amount and tries to add another filter,\n+ * then the request will be rejected by the PF. To prevent failures, the VF\n+ * should keep track of how many VLAN filters it has added and not attempt to\n+ * add more than max_filters.\n+ */\n+struct virtchnl_vlan_filtering_caps {\n+\tstruct virtchnl_vlan_supported_caps filtering_support;\n+\tu32 ethertype_init;\n+\tu16 max_filters;\n+\tu8 pad[2];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);\n+\n+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify\n+ * if the PF supports a different ethertype for stripping and insertion.\n+ *\n+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified\n+ * for stripping affect the ethertype(s) specified for insertion and visa versa\n+ * as well. If the VF tries to configure VLAN stripping via\n+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then\n+ * that will be the ethertype for both stripping and insertion.\n+ *\n+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for\n+ * stripping do not affect the ethertype(s) specified for insertion and visa\n+ * versa.\n+ */\n+enum virtchnl_vlan_ethertype_match {\n+\tVIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,\n+\tVIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,\n+};\n+\n+/* The PF populates these fields based on the supported VLAN offloads. If a\n+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will\n+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or\n+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.\n+ *\n+ * Also, a VF is only allowed to toggle its VLAN offload setting if the\n+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.\n+ *\n+ * The VF driver needs to be aware of how the tags are stripped by hardware and\n+ * inserted by the VF driver based on the level of offload support. The PF will\n+ * populate these fields based on where the VLAN tags are expected to be\n+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to\n+ * interpret these fields. See the definition of the\n+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support\n+ * enumeration.\n+ */\n+struct virtchnl_vlan_offload_caps {\n+\tstruct virtchnl_vlan_supported_caps stripping_support;\n+\tstruct virtchnl_vlan_supported_caps insertion_support;\n+\tu32 ethertype_init;\n+\tu8 ethertype_match;\n+\tu8 pad[3];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);\n+\n+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS\n+ * VF sends this message to determine its VLAN capabilities.\n+ *\n+ * PF will mark which capabilities it supports based on hardware support and\n+ * current configuration. For example, if a port VLAN is configured the PF will\n+ * not allow outer VLAN filtering, stripping, or insertion to be configured so\n+ * it will block these features from the VF.\n+ *\n+ * The VF will need to cross reference its capabilities with the PFs\n+ * capabilities in the response message from the PF to determine the VLAN\n+ * support.\n+ */\n+struct virtchnl_vlan_caps {\n+\tstruct virtchnl_vlan_filtering_caps filtering;\n+\tstruct virtchnl_vlan_offload_caps offloads;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);\n+\n+struct virtchnl_vlan {\n+\tu16 tci;\t/* tci[15:13] = PCP and tci[11:0] = VID */\n+\tu16 tci_mask;\t/* only valid if VIRTCHNL_VLAN_FILTER_MASK set in\n+\t\t\t * filtering caps\n+\t\t\t */\n+\tu16 tpid;\t/* 0x8100, 0x88a8, etc. and only type(s) set in\n+\t\t\t * filtering caps. Note that tpid here does not refer to\n+\t\t\t * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the\n+\t\t\t * actual 2-byte VLAN TPID\n+\t\t\t */\n+\tu8 pad[2];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);\n+\n+struct virtchnl_vlan_filter {\n+\tstruct virtchnl_vlan inner;\n+\tstruct virtchnl_vlan outer;\n+\tu8 pad[16];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);\n+\n+/* VIRTCHNL_OP_ADD_VLAN_V2\n+ * VIRTCHNL_OP_DEL_VLAN_V2\n+ *\n+ * VF sends these messages to add/del one or more VLAN tag filters for Rx\n+ * traffic.\n+ *\n+ * The PF attempts to add the filters and returns status.\n+ *\n+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the\n+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.\n+ */\n+struct virtchnl_vlan_filter_list_v2 {\n+\tu16 vport_id;\n+\tu16 num_elements;\n+\tu8 pad[4];\n+\tstruct virtchnl_vlan_filter filters[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);\n+\n+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2\n+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2\n+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2\n+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2\n+ *\n+ * VF sends this message to enable or disable VLAN stripping or insertion. It\n+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are\n+ * allowed and whether or not it's allowed to enable/disable the specific\n+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to\n+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload\n+ * messages are allowed.\n+ *\n+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the\n+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner\n+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this\n+ * case means the outer most or single VLAN from the VF's perspective. This is\n+ * because no outer offloads are supported. See the comments above the\n+ * virtchnl_vlan_supported_caps structure for more details.\n+ *\n+ * virtchnl_vlan_caps.offloads.stripping_support.inner =\n+ *\t\t\tVIRTCHNL_VLAN_TOGGLE |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100;\n+ *\n+ * virtchnl_vlan_caps.offloads.insertion_support.inner =\n+ *\t\t\tVIRTCHNL_VLAN_TOGGLE |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100;\n+ *\n+ * In order to enable inner (again note that in this case inner is the outer\n+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100\n+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the\n+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.\n+ *\n+ * virtchnl_vlan_setting.inner_ethertype_setting =\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100;\n+ *\n+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on\n+ * initialization.\n+ *\n+ * The reason that VLAN TPID(s) are not being used for the\n+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's\n+ * possible a device could support VLAN insertion and/or stripping offload on\n+ * multiple ethertypes concurrently, so this method allows a VF to request\n+ * multiple ethertypes in one message using the virtchnl_vlan_support\n+ * enumeration.\n+ *\n+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the\n+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer\n+ * VLAN insertion and stripping simultaneously. The\n+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be\n+ * populated based on what the PF can support.\n+ *\n+ * virtchnl_vlan_caps.offloads.stripping_support.outer =\n+ *\t\t\tVIRTCHNL_VLAN_TOGGLE |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_88A8 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_AND;\n+ *\n+ * virtchnl_vlan_caps.offloads.insertion_support.outer =\n+ *\t\t\tVIRTCHNL_VLAN_TOGGLE |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_88A8 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_AND;\n+ *\n+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF\n+ * would populate the virthcnl_vlan_offload_structure in the following manner\n+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.\n+ *\n+ * virtchnl_vlan_setting.outer_ethertype_setting =\n+ *\t\t\tVIRTHCNL_VLAN_ETHERTYPE_8100 |\n+ *\t\t\tVIRTHCNL_VLAN_ETHERTYPE_88A8;\n+ *\n+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on\n+ * initialization.\n+ *\n+ * There is also the case where a PF and the underlying hardware can support\n+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if\n+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the\n+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN\n+ * offloads. The ethertypes must match for stripping and insertion.\n+ *\n+ * virtchnl_vlan_caps.offloads.stripping_support.outer =\n+ *\t\t\tVIRTCHNL_VLAN_TOGGLE |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_88A8 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_XOR;\n+ *\n+ * virtchnl_vlan_caps.offloads.insertion_support.outer =\n+ *\t\t\tVIRTCHNL_VLAN_TOGGLE |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_88A8 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_XOR;\n+ *\n+ * virtchnl_vlan_caps.offloads.ethertype_match =\n+ *\t\t\tVIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;\n+ *\n+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would\n+ * populate the virtchnl_vlan_setting structure in the following manner and send\n+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the\n+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a\n+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.\n+ *\n+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;\n+ *\n+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on\n+ * initialization.\n+ *\n+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2\n+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2\n+ *\n+ * VF sends this message to enable or disable VLAN filtering. It also needs to\n+ * specify an ethertype. The VF knows which VLAN ethertypes are allowed and\n+ * whether or not it's allowed to enable/disable filtering via the\n+ * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to\n+ * parse the virtchnl_vlan_caps.filtering fields to determine which, if any,\n+ * filtering messages are allowed.\n+ *\n+ * For example, if the PF populates the virtchnl_vlan_caps.filtering in the\n+ * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8\n+ * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND\n+ * means that all filtering ethertypes will to be enabled and disabled together\n+ * regardless of the request from the VF. This means that the underlying\n+ * hardware only supports VLAN filtering for all VLAN the specified ethertypes\n+ * or none of them.\n+ *\n+ * virtchnl_vlan_caps.filtering.filtering_support.outer =\n+ *\t\t\tVIRTCHNL_VLAN_TOGGLE |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100 |\n+ *\t\t\tVIRTHCNL_VLAN_ETHERTYPE_88A8 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_9100 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_AND;\n+ *\n+ * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100\n+ * VLANs aren't supported by the VF driver), the VF would populate the\n+ * virtchnl_vlan_setting structure in the following manner and send the\n+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used\n+ * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the\n+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used.\n+ *\n+ * virtchnl_vlan_setting.outer_ethertype_setting =\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_8100 |\n+ *\t\t\tVIRTCHNL_VLAN_ETHERTYPE_88A8;\n+ *\n+ */\n+struct virtchnl_vlan_setting {\n+\tu32 outer_ethertype_setting;\n+\tu32 inner_ethertype_setting;\n+\tu16 vport_id;\n+\tu8 pad[6];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);\n+\n+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE\n+ * VF sends VSI id and flags.\n+ * PF returns status code in retval.\n+ * Note: we assume that broadcast accept mode is always enabled.\n+ */\n+struct virtchnl_promisc_info {\n+\tu16 vsi_id;\n+\tu16 flags;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);\n+\n+#define FLAG_VF_UNICAST_PROMISC\t0x00000001\n+#define FLAG_VF_MULTICAST_PROMISC\t0x00000002\n+\n+/* VIRTCHNL_OP_GET_STATS\n+ * VF sends this message to request stats for the selected VSI. VF uses\n+ * the virtchnl_queue_select struct to specify the VSI. The queue_id\n+ * field is ignored by the PF.\n+ *\n+ * PF replies with struct virtchnl_eth_stats in an external buffer.\n+ */\n+\n+struct virtchnl_eth_stats {\n+\tu64 rx_bytes;\t\t\t/* received bytes */\n+\tu64 rx_unicast;\t\t\t/* received unicast pkts */\n+\tu64 rx_multicast;\t\t/* received multicast pkts */\n+\tu64 rx_broadcast;\t\t/* received broadcast pkts */\n+\tu64 rx_discards;\n+\tu64 rx_unknown_protocol;\n+\tu64 tx_bytes;\t\t\t/* transmitted bytes */\n+\tu64 tx_unicast;\t\t\t/* transmitted unicast pkts */\n+\tu64 tx_multicast;\t\t/* transmitted multicast pkts */\n+\tu64 tx_broadcast;\t\t/* transmitted broadcast pkts */\n+\tu64 tx_discards;\n+\tu64 tx_errors;\n+};\n+\n+/* VIRTCHNL_OP_CONFIG_RSS_KEY\n+ * VIRTCHNL_OP_CONFIG_RSS_LUT\n+ * VF sends these messages to configure RSS. Only supported if both PF\n+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during\n+ * configuration negotiation. If this is the case, then the RSS fields in\n+ * the VF resource struct are valid.\n+ * Both the key and LUT are initialized to 0 by the PF, meaning that\n+ * RSS is effectively disabled until set up by the VF.\n+ */\n+struct virtchnl_rss_key {\n+\tu16 vsi_id;\n+\tu16 key_len;\n+\tu8 key[1];         /* RSS hash key, packed bytes */\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);\n+\n+struct virtchnl_rss_lut {\n+\tu16 vsi_id;\n+\tu16 lut_entries;\n+\tu8 lut[1];        /* RSS lookup table */\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);\n+\n+/* enum virthcnl_hash_filter\n+ *\n+ * Bits defining the hash filters in the hena field of the virtchnl_rss_hena\n+ * structure. Each bit indicates a specific hash filter for RSS.\n+ *\n+ * Note that not all bits are supported on all hardware. The VF should use\n+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of\n+ * before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters.\n+ */\n+enum virtchnl_hash_filter {\n+\t/* Bits 0 through 28 are reserved for future use */\n+\t/* Bit 29, 30, and 32 are not supported on XL710 a X710 */\n+\tVIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP\t\t= 29,\n+\tVIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP\t\t= 30,\n+\tVIRTCHNL_HASH_FILTER_IPV4_UDP\t\t\t= 31,\n+\tVIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK\t= 32,\n+\tVIRTCHNL_HASH_FILTER_IPV4_TCP\t\t\t= 33,\n+\tVIRTCHNL_HASH_FILTER_IPV4_SCTP\t\t\t= 34,\n+\tVIRTCHNL_HASH_FILTER_IPV4_OTHER\t\t\t= 35,\n+\tVIRTCHNL_HASH_FILTER_FRAG_IPV4\t\t\t= 36,\n+\t/* Bits 37 and 38 are reserved for future use */\n+\t/* Bit 39, 40, and 42 are not supported on XL710 a X710 */\n+\tVIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP\t\t= 39,\n+\tVIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP\t\t= 40,\n+\tVIRTCHNL_HASH_FILTER_IPV6_UDP\t\t\t= 41,\n+\tVIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK\t= 42,\n+\tVIRTCHNL_HASH_FILTER_IPV6_TCP\t\t\t= 43,\n+\tVIRTCHNL_HASH_FILTER_IPV6_SCTP\t\t\t= 44,\n+\tVIRTCHNL_HASH_FILTER_IPV6_OTHER\t\t\t= 45,\n+\tVIRTCHNL_HASH_FILTER_FRAG_IPV6\t\t\t= 46,\n+\t/* Bit 37 is reserved for future use */\n+\tVIRTCHNL_HASH_FILTER_FCOE_OX\t\t\t= 48,\n+\tVIRTCHNL_HASH_FILTER_FCOE_RX\t\t\t= 49,\n+\tVIRTCHNL_HASH_FILTER_FCOE_OTHER\t\t\t= 50,\n+\t/* Bits 51 through 62 are reserved for future use */\n+\tVIRTCHNL_HASH_FILTER_L2_PAYLOAD\t\t\t= 63,\n+};\n+\n+#define VIRTCHNL_HASH_FILTER_INVALID\t(0)\n+\n+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS\n+ * VIRTCHNL_OP_SET_RSS_HENA\n+ * VF sends these messages to get and set the hash filter enable bits for RSS.\n+ * By default, the PF sets these to all possible traffic types that the\n+ * hardware supports. The VF can query this value if it wants to change the\n+ * traffic types that are hashed by the hardware.\n+ */\n+struct virtchnl_rss_hena {\n+\t/* see enum virtchnl_hash_filter */\n+\tu64 hena;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);\n+\n+/* Type of RSS algorithm */\n+enum virtchnl_rss_algorithm {\n+\tVIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC\t= 0,\n+\tVIRTCHNL_RSS_ALG_R_ASYMMETRIC\t\t= 1,\n+\tVIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC\t= 2,\n+\tVIRTCHNL_RSS_ALG_XOR_SYMMETRIC\t\t= 3,\n+};\n+\n+/* This is used by PF driver to enforce how many channels can be supported.\n+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise\n+ * PF driver will allow only max 4 channels\n+ */\n+#define VIRTCHNL_MAX_ADQ_CHANNELS 4\n+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16\n+\n+/* VIRTCHNL_OP_ENABLE_CHANNELS\n+ * VIRTCHNL_OP_DISABLE_CHANNELS\n+ * VF sends these messages to enable or disable channels based on\n+ * the user specified queue count and queue offset for each traffic class.\n+ * This struct encompasses all the information that the PF needs from\n+ * VF to create a channel.\n+ */\n+struct virtchnl_channel_info {\n+\tu16 count; /* number of queues in a channel */\n+\tu16 offset; /* queues in a channel start from 'offset' */\n+\tu32 pad;\n+\tu64 max_tx_rate;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);\n+\n+struct virtchnl_tc_info {\n+\tu32\tnum_tc;\n+\tu32\tpad;\n+\tstruct\tvirtchnl_channel_info list[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);\n+\n+/* VIRTCHNL_ADD_CLOUD_FILTER\n+ * VIRTCHNL_DEL_CLOUD_FILTER\n+ * VF sends these messages to add or delete a cloud filter based on the\n+ * user specified match and action filters. These structures encompass\n+ * all the information that the PF needs from the VF to add/delete a\n+ * cloud filter.\n+ */\n+\n+struct virtchnl_l4_spec {\n+\tu8\tsrc_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\n+\tu8\tdst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];\n+\t/* vlan_prio is part of this 16 bit field even from OS perspective\n+\t * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio\n+\t * in future, when decided to offload vlan_prio, pass that information\n+\t * as part of the \"vlan_id\" field, Bit14..12\n+\t */\n+\t__be16\tvlan_id;\n+\t__be16\tpad; /* reserved for future use */\n+\t__be32\tsrc_ip[4];\n+\t__be32\tdst_ip[4];\n+\t__be16\tsrc_port;\n+\t__be16\tdst_port;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);\n+\n+union virtchnl_flow_spec {\n+\tstruct\tvirtchnl_l4_spec tcp_spec;\n+\tu8\tbuffer[128]; /* reserved for future use */\n+};\n+\n+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);\n+\n+enum virtchnl_action {\n+\t/* action types */\n+\tVIRTCHNL_ACTION_DROP = 0,\n+\tVIRTCHNL_ACTION_TC_REDIRECT,\n+\tVIRTCHNL_ACTION_PASSTHRU,\n+\tVIRTCHNL_ACTION_QUEUE,\n+\tVIRTCHNL_ACTION_Q_REGION,\n+\tVIRTCHNL_ACTION_MARK,\n+\tVIRTCHNL_ACTION_COUNT,\n+};\n+\n+enum virtchnl_flow_type {\n+\t/* flow types */\n+\tVIRTCHNL_TCP_V4_FLOW = 0,\n+\tVIRTCHNL_TCP_V6_FLOW,\n+\tVIRTCHNL_UDP_V4_FLOW,\n+\tVIRTCHNL_UDP_V6_FLOW,\n+};\n+\n+struct virtchnl_filter {\n+\tunion\tvirtchnl_flow_spec data;\n+\tunion\tvirtchnl_flow_spec mask;\n+\n+\t/* see enum virtchnl_flow_type */\n+\ts32\tflow_type;\n+\n+\t/* see enum virtchnl_action */\n+\ts32\taction;\n+\tu32\taction_meta;\n+\tu8\tfield_flags;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);\n+\n+struct virtchnl_shaper_bw {\n+\t/* Unit is Kbps */\n+\tu32 committed;\n+\tu32 peak;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);\n+\n+\n+\n+/* VIRTCHNL_OP_EVENT\n+ * PF sends this message to inform the VF driver of events that may affect it.\n+ * No direct response is expected from the VF, though it may generate other\n+ * messages in response to this one.\n+ */\n+enum virtchnl_event_codes {\n+\tVIRTCHNL_EVENT_UNKNOWN = 0,\n+\tVIRTCHNL_EVENT_LINK_CHANGE,\n+\tVIRTCHNL_EVENT_RESET_IMPENDING,\n+\tVIRTCHNL_EVENT_PF_DRIVER_CLOSE,\n+};\n+\n+#define PF_EVENT_SEVERITY_INFO\t\t0\n+#define PF_EVENT_SEVERITY_ATTENTION\t1\n+#define PF_EVENT_SEVERITY_ACTION_REQUIRED\t2\n+#define PF_EVENT_SEVERITY_CERTAIN_DOOM\t255\n+\n+struct virtchnl_pf_event {\n+\t/* see enum virtchnl_event_codes */\n+\ts32 event;\n+\tunion {\n+\t\t/* If the PF driver does not support the new speed reporting\n+\t\t * capabilities then use link_event else use link_event_adv to\n+\t\t * get the speed and link information. The ability to understand\n+\t\t * new speeds is indicated by setting the capability flag\n+\t\t * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter\n+\t\t * in virtchnl_vf_resource struct and can be used to determine\n+\t\t * which link event struct to use below.\n+\t\t */\n+\t\tstruct {\n+\t\t\tenum virtchnl_link_speed link_speed;\n+\t\t\tbool link_status;\n+\t\t\tu8 pad[3];\n+\t\t} link_event;\n+\t\tstruct {\n+\t\t\t/* link_speed provided in Mbps */\n+\t\t\tu32 link_speed;\n+\t\t\tu8 link_status;\n+\t\t\tu8 pad[3];\n+\t\t} link_event_adv;\n+\t} event_data;\n+\n+\ts32 severity;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);\n+\n+\n+/* VF reset states - these are written into the RSTAT register:\n+ * VFGEN_RSTAT on the VF\n+ * When the PF initiates a reset, it writes 0\n+ * When the reset is complete, it writes 1\n+ * When the PF detects that the VF has recovered, it writes 2\n+ * VF checks this register periodically to determine if a reset has occurred,\n+ * then polls it to know when the reset is complete.\n+ * If either the PF or VF reads the register while the hardware\n+ * is in a reset state, it will return DEADBEEF, which, when masked\n+ * will result in 3.\n+ */\n+enum virtchnl_vfr_states {\n+\tVIRTCHNL_VFR_INPROGRESS = 0,\n+\tVIRTCHNL_VFR_COMPLETED,\n+\tVIRTCHNL_VFR_VFACTIVE,\n+};\n+\n+#define VIRTCHNL_MAX_NUM_PROTO_HDRS\t32\n+#define VIRTCHNL_MAX_SIZE_RAW_PACKET\t1024\n+#define PROTO_HDR_SHIFT\t\t\t5\n+#define PROTO_HDR_FIELD_START(proto_hdr_type) \\\n+\t\t\t\t\t(proto_hdr_type << PROTO_HDR_SHIFT)\n+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)\n+\n+/* VF use these macros to configure each protocol header.\n+ * Specify which protocol headers and protocol header fields base on\n+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.\n+ * @param hdr: a struct of virtchnl_proto_hdr\n+ * @param hdr_type: ETH/IPV4/TCP, etc\n+ * @param field: SRC/DST/TEID/SPI, etc\n+ */\n+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \\\n+\t((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))\n+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \\\n+\t((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))\n+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \\\n+\t((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))\n+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr)\t((hdr)->field_selector)\n+\n+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \\\n+\t(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \\\n+\t\tVIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))\n+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \\\n+\t(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \\\n+\t\tVIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))\n+\n+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \\\n+\t((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)\n+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \\\n+\t(((hdr)->type) >> PROTO_HDR_SHIFT)\n+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \\\n+\t((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))\n+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \\\n+\t(VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \\\n+\t VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))\n+\n+/* Protocol header type within a packet segment. A segment consists of one or\n+ * more protocol headers that make up a logical group of protocol headers. Each\n+ * logical group of protocol headers encapsulates or is encapsulated using/by\n+ * tunneling or encapsulation protocols for network virtualization.\n+ */\n+enum virtchnl_proto_hdr_type {\n+\tVIRTCHNL_PROTO_HDR_NONE,\n+\tVIRTCHNL_PROTO_HDR_ETH,\n+\tVIRTCHNL_PROTO_HDR_S_VLAN,\n+\tVIRTCHNL_PROTO_HDR_C_VLAN,\n+\tVIRTCHNL_PROTO_HDR_IPV4,\n+\tVIRTCHNL_PROTO_HDR_IPV6,\n+\tVIRTCHNL_PROTO_HDR_TCP,\n+\tVIRTCHNL_PROTO_HDR_UDP,\n+\tVIRTCHNL_PROTO_HDR_SCTP,\n+\tVIRTCHNL_PROTO_HDR_GTPU_IP,\n+\tVIRTCHNL_PROTO_HDR_GTPU_EH,\n+\tVIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,\n+\tVIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,\n+\tVIRTCHNL_PROTO_HDR_PPPOE,\n+\tVIRTCHNL_PROTO_HDR_L2TPV3,\n+\tVIRTCHNL_PROTO_HDR_ESP,\n+\tVIRTCHNL_PROTO_HDR_AH,\n+\tVIRTCHNL_PROTO_HDR_PFCP,\n+\tVIRTCHNL_PROTO_HDR_GTPC,\n+\tVIRTCHNL_PROTO_HDR_ECPRI,\n+\tVIRTCHNL_PROTO_HDR_L2TPV2,\n+\tVIRTCHNL_PROTO_HDR_PPP,\n+\t/* IPv4 and IPv6 Fragment header types are only associated to\n+\t * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,\n+\t * cannot be used independently.\n+\t */\n+\tVIRTCHNL_PROTO_HDR_IPV4_FRAG,\n+\tVIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,\n+\tVIRTCHNL_PROTO_HDR_GRE,\n+};\n+\n+/* Protocol header field within a protocol header. */\n+enum virtchnl_proto_hdr_field {\n+\t/* ETHER */\n+\tVIRTCHNL_PROTO_HDR_ETH_SRC =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),\n+\tVIRTCHNL_PROTO_HDR_ETH_DST,\n+\tVIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,\n+\t/* S-VLAN */\n+\tVIRTCHNL_PROTO_HDR_S_VLAN_ID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),\n+\t/* C-VLAN */\n+\tVIRTCHNL_PROTO_HDR_C_VLAN_ID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),\n+\t/* IPV4 */\n+\tVIRTCHNL_PROTO_HDR_IPV4_SRC =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),\n+\tVIRTCHNL_PROTO_HDR_IPV4_DST,\n+\tVIRTCHNL_PROTO_HDR_IPV4_DSCP,\n+\tVIRTCHNL_PROTO_HDR_IPV4_TTL,\n+\tVIRTCHNL_PROTO_HDR_IPV4_PROT,\n+\tVIRTCHNL_PROTO_HDR_IPV4_CHKSUM,\n+\t/* IPV6 */\n+\tVIRTCHNL_PROTO_HDR_IPV6_SRC =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),\n+\tVIRTCHNL_PROTO_HDR_IPV6_DST,\n+\tVIRTCHNL_PROTO_HDR_IPV6_TC,\n+\tVIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PROT,\n+\t/* IPV6 Prefix */\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,\n+\tVIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,\n+\t/* TCP */\n+\tVIRTCHNL_PROTO_HDR_TCP_SRC_PORT =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),\n+\tVIRTCHNL_PROTO_HDR_TCP_DST_PORT,\n+\tVIRTCHNL_PROTO_HDR_TCP_CHKSUM,\n+\t/* UDP */\n+\tVIRTCHNL_PROTO_HDR_UDP_SRC_PORT =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),\n+\tVIRTCHNL_PROTO_HDR_UDP_DST_PORT,\n+\tVIRTCHNL_PROTO_HDR_UDP_CHKSUM,\n+\t/* SCTP */\n+\tVIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),\n+\tVIRTCHNL_PROTO_HDR_SCTP_DST_PORT,\n+\tVIRTCHNL_PROTO_HDR_SCTP_CHKSUM,\n+\t/* GTPU_IP */\n+\tVIRTCHNL_PROTO_HDR_GTPU_IP_TEID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),\n+\t/* GTPU_EH */\n+\tVIRTCHNL_PROTO_HDR_GTPU_EH_PDU =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),\n+\tVIRTCHNL_PROTO_HDR_GTPU_EH_QFI,\n+\t/* PPPOE */\n+\tVIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),\n+\t/* L2TPV3 */\n+\tVIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),\n+\t/* ESP */\n+\tVIRTCHNL_PROTO_HDR_ESP_SPI =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),\n+\t/* AH */\n+\tVIRTCHNL_PROTO_HDR_AH_SPI =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),\n+\t/* PFCP */\n+\tVIRTCHNL_PROTO_HDR_PFCP_S_FIELD =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),\n+\tVIRTCHNL_PROTO_HDR_PFCP_SEID,\n+\t/* GTPC */\n+\tVIRTCHNL_PROTO_HDR_GTPC_TEID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),\n+\t/* ECPRI */\n+\tVIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),\n+\tVIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,\n+\t/* IPv4 Dummy Fragment */\n+\tVIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),\n+\t/* IPv6 Extension Fragment */\n+\tVIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),\n+\t/* GTPU_DWN/UP */\n+\tVIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),\n+\tVIRTCHNL_PROTO_HDR_GTPU_UP_QFI =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),\n+\t/* L2TPv2 */\n+\tVIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =\n+\t\tPROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),\n+\tVIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,\n+};\n+\n+struct virtchnl_proto_hdr {\n+\t/* see enum virtchnl_proto_hdr_type */\n+\ts32 type;\n+\tu32 field_selector; /* a bit mask to select field for header type */\n+\tu8 buffer[64];\n+\t/**\n+\t * binary buffer in network order for specific header type.\n+\t * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4\n+\t * header is expected to be copied into the buffer.\n+\t */\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);\n+\n+struct virtchnl_proto_hdrs {\n+\tu8 tunnel_level;\n+\t/**\n+\t * specify where protocol header start from.\n+\t * must be 0 when sending a raw packet request.\n+\t * 0 - from the outer layer\n+\t * 1 - from the first inner layer\n+\t * 2 - from the second inner layer\n+\t * ....\n+\t */\n+\tint count;\n+\t/**\n+\t * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS\n+\t * must be 0 for a raw packet request.\n+\t */\n+\tunion {\n+\t\tstruct virtchnl_proto_hdr\n+\t\t\tproto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];\n+\t\tstruct {\n+\t\t\tu16 pkt_len;\n+\t\t\tu8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];\n+\t\t\tu8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];\n+\t\t} raw;\n+\t};\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);\n+\n+struct virtchnl_rss_cfg {\n+\tstruct virtchnl_proto_hdrs proto_hdrs;\t   /* protocol headers */\n+\n+\t/* see enum virtchnl_rss_algorithm; rss algorithm type */\n+\ts32 rss_algorithm;\n+\tu8 reserved[128];                          /* reserve for future */\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);\n+\n+/* action configuration for FDIR */\n+struct virtchnl_filter_action {\n+\t/* see enum virtchnl_action type */\n+\ts32 type;\n+\tunion {\n+\t\t/* used for queue and qgroup action */\n+\t\tstruct {\n+\t\t\tu16 index;\n+\t\t\tu8 region;\n+\t\t} queue;\n+\t\t/* used for count action */\n+\t\tstruct {\n+\t\t\t/* share counter ID with other flow rules */\n+\t\t\tu8 shared;\n+\t\t\tu32 id; /* counter ID */\n+\t\t} count;\n+\t\t/* used for mark action */\n+\t\tu32 mark_id;\n+\t\tu8 reserve[32];\n+\t} act_conf;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);\n+\n+#define VIRTCHNL_MAX_NUM_ACTIONS  8\n+\n+struct virtchnl_filter_action_set {\n+\t/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */\n+\tint count;\n+\tstruct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);\n+\n+/* pattern and action for FDIR rule */\n+struct virtchnl_fdir_rule {\n+\tstruct virtchnl_proto_hdrs proto_hdrs;\n+\tstruct virtchnl_filter_action_set action_set;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);\n+\n+/* Status returned to VF after VF requests FDIR commands\n+ * VIRTCHNL_FDIR_SUCCESS\n+ * VF FDIR related request is successfully done by PF\n+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.\n+ *\n+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE\n+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.\n+ *\n+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST\n+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.\n+ *\n+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT\n+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.\n+ *\n+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST\n+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.\n+ *\n+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID\n+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation\n+ * or HW doesn't support.\n+ *\n+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT\n+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out\n+ * for programming.\n+ *\n+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID\n+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,\n+ * for example, VF query counter of a rule who has no counter action.\n+ */\n+enum virtchnl_fdir_prgm_status {\n+\tVIRTCHNL_FDIR_SUCCESS = 0,\n+\tVIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,\n+\tVIRTCHNL_FDIR_FAILURE_RULE_EXIST,\n+\tVIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,\n+\tVIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,\n+\tVIRTCHNL_FDIR_FAILURE_RULE_INVALID,\n+\tVIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,\n+\tVIRTCHNL_FDIR_FAILURE_QUERY_INVALID,\n+};\n+\n+/* VIRTCHNL_OP_ADD_FDIR_FILTER\n+ * VF sends this request to PF by filling out vsi_id,\n+ * validate_only and rule_cfg. PF will return flow_id\n+ * if the request is successfully done and return add_status to VF.\n+ */\n+struct virtchnl_fdir_add {\n+\tu16 vsi_id;  /* INPUT */\n+\t/*\n+\t * 1 for validating a fdir rule, 0 for creating a fdir rule.\n+\t * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.\n+\t */\n+\tu16 validate_only; /* INPUT */\n+\tu32 flow_id;       /* OUTPUT */\n+\tstruct virtchnl_fdir_rule rule_cfg; /* INPUT */\n+\n+\t/* see enum virtchnl_fdir_prgm_status; OUTPUT */\n+\ts32 status;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);\n+\n+/* VIRTCHNL_OP_DEL_FDIR_FILTER\n+ * VF sends this request to PF by filling out vsi_id\n+ * and flow_id. PF will return del_status to VF.\n+ */\n+struct virtchnl_fdir_del {\n+\tu16 vsi_id;  /* INPUT */\n+\tu16 pad;\n+\tu32 flow_id; /* INPUT */\n+\n+\t/* see enum virtchnl_fdir_prgm_status; OUTPUT */\n+\ts32 status;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);\n+\n+/* VIRTCHNL_OP_GET_QOS_CAPS\n+ * VF sends this message to get its QoS Caps, such as\n+ * TC number, Arbiter and Bandwidth.\n+ */\n+struct virtchnl_qos_cap_elem {\n+\tu8 tc_num;\n+\tu8 tc_prio;\n+#define VIRTCHNL_ABITER_STRICT      0\n+#define VIRTCHNL_ABITER_ETS         2\n+\tu8 arbiter;\n+#define VIRTCHNL_STRICT_WEIGHT      1\n+\tu8 weight;\n+\tenum virtchnl_bw_limit_type type;\n+\tunion {\n+\t\tstruct virtchnl_shaper_bw shaper;\n+\t\tu8 pad2[32];\n+\t};\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);\n+\n+struct virtchnl_qos_cap_list {\n+\tu16 vsi_id;\n+\tu16 num_elem;\n+\tstruct virtchnl_qos_cap_elem cap[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list);\n+\n+/* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP\n+ * VF sends message virtchnl_queue_tc_mapping to set queue to tc\n+ * mapping for all the Tx and Rx queues with a specified VSI, and\n+ * would get response about bitmap of valid user priorities\n+ * associated with queues.\n+ */\n+struct virtchnl_queue_tc_mapping {\n+\tu16 vsi_id;\n+\tu16 num_tc;\n+\tu16 num_queue_pairs;\n+\tu8 pad[2];\n+\tunion {\n+\t\tstruct {\n+\t\t\tu16 start_queue_id;\n+\t\t\tu16 queue_count;\n+\t\t} req;\n+\t\tstruct {\n+#define VIRTCHNL_USER_PRIO_TYPE_UP\t0\n+#define VIRTCHNL_USER_PRIO_TYPE_DSCP\t1\n+\t\t\tu16 prio_type;\n+\t\t\tu16 valid_prio_bitmap;\n+\t\t} resp;\n+\t} tc[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);\n+\n+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */\n+struct virtchnl_queue_bw {\n+\tu16 queue_id;\n+\tu8 tc;\n+\tu8 pad;\n+\tstruct virtchnl_shaper_bw shaper;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);\n+\n+struct virtchnl_queues_bw_cfg {\n+\tu16 vsi_id;\n+\tu16 num_queues;\n+\tstruct virtchnl_queue_bw cfg[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);\n+\n+/* queue types */\n+enum virtchnl_queue_type {\n+\tVIRTCHNL_QUEUE_TYPE_TX\t\t\t= 0,\n+\tVIRTCHNL_QUEUE_TYPE_RX\t\t\t= 1,\n+};\n+\n+/* structure to specify a chunk of contiguous queues */\n+struct virtchnl_queue_chunk {\n+\t/* see enum virtchnl_queue_type */\n+\ts32 type;\n+\tu16 start_queue_id;\n+\tu16 num_queues;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);\n+\n+/* structure to specify several chunks of contiguous queues */\n+struct virtchnl_queue_chunks {\n+\tu16 num_chunks;\n+\tu16 rsvd;\n+\tstruct virtchnl_queue_chunk chunks[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);\n+\n+/* VIRTCHNL_OP_ENABLE_QUEUES_V2\n+ * VIRTCHNL_OP_DISABLE_QUEUES_V2\n+ *\n+ * These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in\n+ * VIRTCHNL_OP_GET_VF_RESOURCES\n+ *\n+ * VF sends virtchnl_ena_dis_queues struct to specify the queues to be\n+ * enabled/disabled in chunks. Also applicable to single queue RX or\n+ * TX. PF performs requested action and returns status.\n+ */\n+struct virtchnl_del_ena_dis_queues {\n+\tu16 vport_id;\n+\tu16 pad;\n+\tstruct virtchnl_queue_chunks chunks;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues);\n+\n+/* Virtchannel interrupt throttling rate index */\n+enum virtchnl_itr_idx {\n+\tVIRTCHNL_ITR_IDX_0\t= 0,\n+\tVIRTCHNL_ITR_IDX_1\t= 1,\n+\tVIRTCHNL_ITR_IDX_NO_ITR\t= 3,\n+};\n+\n+/* Queue to vector mapping */\n+struct virtchnl_queue_vector {\n+\tu16 queue_id;\n+\tu16 vector_id;\n+\tu8 pad[4];\n+\n+\t/* see enum virtchnl_itr_idx */\n+\ts32 itr_idx;\n+\n+\t/* see enum virtchnl_queue_type */\n+\ts32 queue_type;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);\n+\n+/* VIRTCHNL_OP_MAP_QUEUE_VECTOR\n+ *\n+ * This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated\n+ * in VIRTCHNL_OP_GET_VF_RESOURCES\n+ *\n+ * VF sends this message to map queues to vectors and ITR index registers.\n+ * External data buffer contains virtchnl_queue_vector_maps structure\n+ * that contains num_qv_maps of virtchnl_queue_vector structures.\n+ * PF maps the requested queue vector maps after validating the queue and vector\n+ * ids and returns a status code.\n+ */\n+struct virtchnl_queue_vector_maps {\n+\tu16 vport_id;\n+\tu16 num_qv_maps;\n+\tu8 pad[4];\n+\tstruct virtchnl_queue_vector qv_maps[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);\n+\n+struct virtchnl_quanta_cfg {\n+\tu16 quanta_size;\n+\tstruct virtchnl_queue_chunk queue_select;\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);\n+\n+/* VIRTCHNL_VF_CAP_PTP\n+ *   VIRTCHNL_OP_1588_PTP_GET_CAPS\n+ *   VIRTCHNL_OP_1588_PTP_GET_TIME\n+ *   VIRTCHNL_OP_1588_PTP_SET_TIME\n+ *   VIRTCHNL_OP_1588_PTP_ADJ_TIME\n+ *   VIRTCHNL_OP_1588_PTP_ADJ_FREQ\n+ *   VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP\n+ *   VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS\n+ *   VIRTCHNL_OP_1588_PTP_SET_PIN_CFG\n+ *   VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP\n+ *\n+ * Support for offloading control of the device PTP hardware clock (PHC) is enabled\n+ * by VIRTCHNL_VF_CAP_PTP. This capability allows a VF to request that PF\n+ * enable Tx and Rx timestamps, and request access to read and/or write the\n+ * PHC on the device, as well as query if the VF has direct access to the PHC\n+ * time registers.\n+ *\n+ * The VF must set VIRTCHNL_VF_CAP_PTP in its capabilities when requesting\n+ * resources. If the capability is set in reply, the VF must then send\n+ * a VIRTCHNL_OP_1588_PTP_GET_CAPS request during initialization. The VF indicates\n+ * what extended capabilities it wants by setting the appropriate flags in the\n+ * caps field. The PF reply will indicate what features are enabled for\n+ * that VF.\n+ */\n+#define VIRTCHNL_1588_PTP_CAP_TX_TSTAMP\t\tBIT(0)\n+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP\t\tBIT(1)\n+#define VIRTCHNL_1588_PTP_CAP_READ_PHC\t\tBIT(2)\n+#define VIRTCHNL_1588_PTP_CAP_WRITE_PHC\t\tBIT(3)\n+#define VIRTCHNL_1588_PTP_CAP_PHC_REGS\t\tBIT(4)\n+#define VIRTCHNL_1588_PTP_CAP_PIN_CFG\t\tBIT(5)\n+\n+/**\n+ * virtchnl_phc_regs\n+ *\n+ * Structure defines how the VF should access PHC related registers. The VF\n+ * must request VIRTCHNL_1588_PTP_CAP_PHC_REGS. If the VF has access to PHC\n+ * registers, the PF will reply with the capability flag set, and with this\n+ * structure detailing what PCIe region and what offsets to use. If direct\n+ * access is not available, this entire structure is reserved and the fields\n+ * will be zero.\n+ *\n+ * If necessary in a future extension, a separate capability mutually\n+ * exclusive with VIRTCHNL_1588_PTP_CAP_PHC_REGS might be used to change the\n+ * entire format of this structure within virtchnl_ptp_caps.\n+ *\n+ * @clock_hi: Register offset of the high 32 bits of clock time\n+ * @clock_lo: Register offset of the low 32 bits of clock time\n+ * @pcie_region: The PCIe region the registers are located in.\n+ * @rsvd: Reserved bits for future extension\n+ */\n+struct virtchnl_phc_regs {\n+\tu32 clock_hi;\n+\tu32 clock_lo;\n+\tu8 pcie_region;\n+\tu8 rsvd[15];\n+};\n+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_phc_regs);\n+\n+/* timestamp format enumeration\n+ *\n+ * VIRTCHNL_1588_PTP_TSTAMP_40BIT\n+ *\n+ *   This format indicates a timestamp that uses the 40bit format from the\n+ *   flexible Rx descriptors. It is also the default Tx timestamp format used\n+ *   today.\n+ *\n+ *   Such a timestamp has the following 40bit format:\n+ *\n+ *   *--------------------------------*-------------------------------*-----------*\n+ *   | 32 bits of time in nanoseconds | 7 bits of sub-nanosecond time | valid bit |\n+ *   *--------------------------------*-------------------------------*-----------*\n+ *\n+ *   The timestamp is passed in a u64, with the upper 24bits of the field\n+ *   reserved as zero.\n+ *\n+ *   With this format, in order to report a full 64bit timestamp to userspace\n+ *   applications, the VF is responsible for performing timestamp extension by\n+ *   carefully comparing the timestamp with the PHC time. This can correctly\n+ *   be achieved with a recent cached copy of the PHC time by doing delta\n+ *   comparison between the 32bits of nanoseconds in the timestamp with the\n+ *   lower 32 bits of the clock time. For this to work, the cached PHC time\n+ *   must be from within 2^31 nanoseconds (~2.1 seconds) of when the timestamp\n+ *   was captured.\n+ *\n+ * VIRTCHNL_1588_PTP_TSTAMP_64BIT_NS\n+ *\n+ *   This format indicates a timestamp that is 64 bits of nanoseconds.\n+ */\n+enum virtchnl_ptp_tstamp_format {\n+\tVIRTCHNL_1588_PTP_TSTAMP_40BIT = 0,\n+\tVIRTCHNL_1588_PTP_TSTAMP_64BIT_NS = 1,\n+};\n+\n+/**\n+ * virtchnl_ptp_caps\n+ *\n+ * Structure that defines the PTP capabilities available to the VF. The VF\n+ * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field\n+ * indicating what capabilities it is requesting. The PF will respond with the\n+ * same message with the virtchnl_ptp_caps structure indicating what is\n+ * enabled for the VF.\n+ *\n+ * @phc_regs: If VIRTCHNL_1588_PTP_CAP_PHC_REGS is set, contains information\n+ *            on the PHC related registers available to the VF.\n+ * @caps: On send, VF sets what capabilities it requests. On reply, PF\n+ *        indicates what has been enabled for this VF. The PF shall not set\n+ *        bits which were not requested by the VF.\n+ * @max_adj: The maximum adjustment capable of being requested by\n+ *           VIRTCHNL_OP_1588_PTP_ADJ_FREQ, in parts per billion. Note that 1 ppb\n+ *           is approximately 65.5 scaled_ppm. The PF shall clamp any\n+ *           frequency adjustment in VIRTCHNL_op_1588_ADJ_FREQ to +/- max_adj.\n+ *           Use of ppb in this field allows fitting the value into 4 bytes\n+ *           instead of potentially requiring 8 if scaled_ppm units were used.\n+ * @tx_tstamp_idx: The Tx timestamp index to set in the transmit descriptor\n+ *                 when requesting a timestamp for an outgoing packet.\n+ *                 Reserved if VIRTCHNL_1588_PTP_CAP_TX_TSTAMP is not enabled.\n+ * @n_ext_ts: Number of external timestamp functions available. Reserved\n+ *            if VIRTCHNL_1588_PTP_CAP_PIN_CFG is not enabled.\n+ * @n_per_out: Number of periodic output functions available. Reserved if\n+ *             VIRTCHNL_1588_PTP_CAP_PIN_CFG is not enabled.\n+ * @n_pins: Number of physical programmable pins able to be controlled.\n+ *          Reserved if VIRTCHNL_1588_PTP_CAP_PIN_CFG is not enabled.\n+ * @tx_tstamp_format: Format of the Tx timestamps. Valid formats are defined\n+ *                    by the virtchnl_ptp_tstamp enumeration. Note that Rx\n+ *                    timestamps are tied to the descriptor format, and do not\n+ *                    have a separate format field.\n+ * @rsvd: Reserved bits for future extension.\n+ *\n+ * PTP capabilities\n+ *\n+ * VIRTCHNL_1588_PTP_CAP_TX_TSTAMP indicates that the VF can request transmit\n+ * timestamps for packets in its transmit descriptors. If this is unset,\n+ * transmit timestamp requests are ignored. Note that only one outstanding Tx\n+ * timestamp request will be honored at a time. The PF shall handle receipt of\n+ * the timestamp from the hardware, and will forward this to the VF by sending\n+ * a VIRTCHNL_OP_1588_TX_TIMESTAMP message.\n+ *\n+ * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have\n+ * receive timestamps enabled in the flexible descriptors. Note that this\n+ * requires a VF to also negotiate to enable advanced flexible descriptors in\n+ * the receive path instead of the default legacy descriptor format.\n+ *\n+ * For a detailed description of the current Tx and Rx timestamp format, see\n+ * the section on virtchnl_phc_tx_tstamp. Future extensions may indicate\n+ * timestamp format in the capability structure.\n+ *\n+ * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time\n+ * via the VIRTCHNL_OP_1588_PTP_GET_TIME command, or by directly reading PHC\n+ * registers if VIRTCHNL_1588_PTP_CAP_PHC_REGS is also set.\n+ *\n+ * VIRTCHNL_1588_PTP_CAP_WRITE_PHC indicates that the VF may request updates\n+ * to the PHC time via VIRTCHNL_OP_1588_PTP_SET_TIME,\n+ * VIRTCHNL_OP_1588_PTP_ADJ_TIME, and VIRTCHNL_OP_1588_PTP_ADJ_FREQ.\n+ *\n+ * VIRTCHNL_1588_PTP_CAP_PHC_REGS indicates that the VF has direct access to\n+ * certain PHC related registers, primarily for lower latency access to the\n+ * PHC time. If this is set, the VF shall read the virtchnl_phc_regs section\n+ * of the capabilities to determine the location of the clock registers. If\n+ * this capability is not set, the entire 24 bytes of virtchnl_phc_regs is\n+ * reserved as zero. Future extensions define alternative formats for this\n+ * data, in which case they will be mutually exclusive with this capability.\n+ *\n+ * VIRTCHNL_1588_PTP_CAP_PIN_CFG indicates that the VF has the capability to\n+ * control software defined pins. These pins can be assigned either as an\n+ * input to timestamp external events, or as an output to cause a periodic\n+ * signal output.\n+ *\n+ * Note that in the future, additional capability flags may be added which\n+ * indicate additional extended support. All fields marked as reserved by this\n+ * header will be set to zero. VF implementations should verify this to ensure\n+ * that future extensions do not break compatibility.\n+ */\n+struct virtchnl_ptp_caps {\n+\tstruct virtchnl_phc_regs phc_regs;\n+\tu32 caps;\n+\ts32 max_adj;\n+\tu8 tx_tstamp_idx;\n+\tu8 n_ext_ts;\n+\tu8 n_per_out;\n+\tu8 n_pins;\n+\t/* see enum virtchnl_ptp_tstamp_format */\n+\tu8 tx_tstamp_format;\n+\tu8 rsvd[11];\n+};\n+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);\n+\n+/**\n+ * virtchnl_phc_time\n+ * @time: PHC time in nanoseconds\n+ * @rsvd: Reserved for future extension\n+ *\n+ * Structure sent with VIRTCHNL_OP_1588_PTP_SET_TIME and received with\n+ * VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits of PHC clock time in\n+ * nanoseconds.\n+ *\n+ * VIRTCHNL_OP_1588_PTP_SET_TIME may be sent by the VF if\n+ * VIRTCHNL_1588_PTP_CAP_WRITE_PHC is set. This will request that the PHC time\n+ * be set to the requested value. This operation is non-atomic and thus does\n+ * not adjust for the delay between request and completion. It is recommended\n+ * that the VF use VIRTCHNL_OP_1588_PTP_ADJ_TIME and\n+ * VIRTCHNL_OP_1588_PTP_ADJ_FREQ when possible to steer the PHC clock.\n+ *\n+ * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of\n+ * the PHC. This op is available in case direct access via the PHC registers\n+ * is not available.\n+ */\n+struct virtchnl_phc_time {\n+\tu64 time;\n+\tu8 rsvd[8];\n+};\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);\n+\n+/**\n+ * virtchnl_phc_adj_time\n+ * @delta: offset requested to adjust clock by\n+ * @rsvd: reserved for future extension\n+ *\n+ * Sent with VIRTCHNL_OP_1588_PTP_ADJ_TIME. Used to request an adjustment of\n+ * the clock time by the provided delta, with negative values representing\n+ * subtraction. VIRTCHNL_OP_1588_PTP_ADJ_TIME may not be sent unless\n+ * VIRTCHNL_1588_PTP_CAP_WRITE_PHC is set.\n+ *\n+ * The atomicity of this operation is not guaranteed. The PF should perform an\n+ * atomic update using appropriate mechanisms if possible. However, this is\n+ * not guaranteed.\n+ */\n+struct virtchnl_phc_adj_time {\n+\ts64 delta;\n+\tu8 rsvd[8];\n+};\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_adj_time);\n+\n+/**\n+ * virtchnl_phc_adj_freq\n+ * @scaled_ppm: frequency adjustment represented in scaled parts per million\n+ * @rsvd: Reserved for future extension\n+ *\n+ * Sent with the VIRTCHNL_OP_1588_PTP_ADJ_FREQ to request an adjustment to the\n+ * clock frequency. The adjustment is in scaled_ppm, which is parts per\n+ * million with a 16bit binary fractional portion. 1 part per billion is\n+ * approximately 65.5 scaled_ppm.\n+ *\n+ *  ppm = scaled_ppm / 2^16\n+ *\n+ *  ppb = scaled_ppm * 1000 / 2^16 or\n+ *\n+ *  ppb = scaled_ppm * 125 / 2^13\n+ *\n+ * The PF shall clamp any adjustment request to plus or minus the specified\n+ * max_adj in the PTP capabilities.\n+ *\n+ * Requests for adjustment are always based off of nominal clock frequency and\n+ * not compounding. To reset clock frequency, send a request with a scaled_ppm\n+ * of 0.\n+ */\n+struct virtchnl_phc_adj_freq {\n+\ts64 scaled_ppm;\n+\tu8 rsvd[8];\n+};\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_adj_freq);\n+\n+/**\n+ * virtchnl_phc_tx_stamp\n+ * @tstamp: timestamp value\n+ * @rsvd: Reserved for future extension\n+ *\n+ * Sent along with VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP from the PF when a Tx\n+ * timestamp for the index associated with this VF in the tx_tstamp_idx field\n+ * is captured by hardware.\n+ *\n+ * If VIRTCHNL_1588_PTP_CAP_TX_TSTAMP is set, the VF may request a timestamp\n+ * for a packet in its transmit context descriptor by setting the appropriate\n+ * flag and setting the timestamp index provided by the PF. On transmission,\n+ * the timestamp will be captured and sent to the PF. The PF will forward this\n+ * timestamp to the VF via the VIRTCHNL_1588_PTP_CAP_TX_TSTAMP op.\n+ *\n+ * The timestamp format is defined by the tx_tstamp_format field of the\n+ * virtchnl_ptp_caps structure.\n+ */\n+struct virtchnl_phc_tx_tstamp {\n+\tu64 tstamp;\n+\tu8 rsvd[8];\n+};\n+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_tx_tstamp);\n+\n+enum virtchnl_phc_pin_func {\n+\tVIRTCHNL_PHC_PIN_FUNC_NONE = 0, /* Not assigned to any function */\n+\tVIRTCHNL_PHC_PIN_FUNC_EXT_TS = 1, /* Assigned to external timestamp */\n+\tVIRTCHNL_PHC_PIN_FUNC_PER_OUT = 2, /* Assigned to periodic output */\n+};\n+\n+/* Length of the pin configuration data. All pin configurations belong within\n+ * the same union and *must* have this length in bytes.\n+ */\n+#define VIRTCHNL_PIN_CFG_LEN 64\n+\n+/* virtchnl_phc_ext_ts_mode\n+ *\n+ * Mode of the external timestamp, indicating which edges of the input signal\n+ * to timestamp.\n+ */\n+enum virtchnl_phc_ext_ts_mode {\n+\tVIRTCHNL_PHC_EXT_TS_NONE = 0,\n+\tVIRTCHNL_PHC_EXT_TS_RISING_EDGE = 1,\n+\tVIRTCHNL_PHC_EXT_TS_FALLING_EDGE = 2,\n+\tVIRTCHNL_PHC_EXT_TS_BOTH_EDGES = 3,\n+};\n+\n+/**\n+ * virtchnl_phc_ext_ts\n+ * @mode: mode of external timestamp request\n+ * @rsvd: reserved for future extension\n+ *\n+ * External timestamp configuration. Defines the configuration for this\n+ * external timestamp function.\n+ *\n+ * If mode is VIRTCHNL_PHC_EXT_TS_NONE, the function is essentially disabled,\n+ * timestamping nothing.\n+ *\n+ * If mode is VIRTCHNL_PHC_EXT_TS_RISING_EDGE, the function shall timestamp\n+ * the rising edge of the input when it transitions from low to high signal.\n+ *\n+ * If mode is VIRTCHNL_PHC_EXT_TS_FALLING_EDGE, the function shall timestamp\n+ * the falling edge of the input when it transitions from high to low signal.\n+ *\n+ * If mode is VIRTCHNL_PHC_EXT_TS_BOTH_EDGES, the function shall timestamp\n+ * both the rising and falling edge of the signal whenever it changes.\n+ *\n+ * The PF shall return an error if the requested mode cannot be implemented on\n+ * the function.\n+ */\n+struct virtchnl_phc_ext_ts {\n+\tu8 mode; /* see virtchnl_phc_ext_ts_mode */\n+\tu8 rsvd[63];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(VIRTCHNL_PIN_CFG_LEN, virtchnl_phc_ext_ts);\n+\n+/* virtchnl_phc_per_out_flags\n+ *\n+ * Flags defining periodic output functionality.\n+ */\n+enum virtchnl_phc_per_out_flags {\n+\tVIRTCHNL_PHC_PER_OUT_PHASE_START = BIT(0),\n+};\n+\n+/**\n+ * virtchnl_phc_per_out\n+ * @start: absolute start time (if VIRTCHNL_PHC_PER_OUT_PHASE_START unset)\n+ * @phase: phase offset to start (if VIRTCHNL_PHC_PER_OUT_PHASE_START set)\n+ * @period: time to complete a full clock cycle (low - > high -> low)\n+ * @on: length of time the signal should stay high\n+ * @flags: flags defining the periodic output operation.\n+ * rsvd: reserved for future extension\n+ *\n+ * Configuration for a periodic output signal. Used to define the signal that\n+ * should be generated on a given function.\n+ *\n+ * The period field determines the full length of the clock cycle, including\n+ * both duration hold high transition and duration to hold low transition in\n+ * nanoseconds.\n+ *\n+ * The on field determines how long the signal should remain high. For\n+ * a traditional square wave clock that is on for some duration and off for\n+ * the same duration, use an on length of precisely half the period. The duty\n+ * cycle of the clock is period/on.\n+ *\n+ * If VIRTCHNL_PHC_PER_OUT_PHASE_START is unset, then the request is to start\n+ * a clock an absolute time. This means that the clock should start precisely\n+ * at the specified time in the start field. If the start time is in the past,\n+ * then the periodic output should start at the next valid multiple of the\n+ * period plus the start time:\n+ *\n+ *   new_start = (n * period) + start\n+ *     (choose n such that new start is in the future)\n+ *\n+ * Note that the PF should not reject a start time in the past because it is\n+ * possible that such a start time was valid when the request was made, but\n+ * became invalid due to delay in programming the pin.\n+ *\n+ * If VIRTCHNL_PHC_PER_OUT_PHASE_START is set, then the request is to start\n+ * the next multiple of the period plus the phase offset. The phase must be\n+ * less than the period. In this case, the clock should start as soon possible\n+ * at the next available multiple of the period. To calculate a start time\n+ * when programming this mode, use:\n+ *\n+ *   start = (n * period) + phase\n+ *     (choose n such that start is in the future)\n+ *\n+ * A period of zero should be treated as a request to disable the clock\n+ * output.\n+ */\n+struct virtchnl_phc_per_out {\n+\tunion {\n+\t\tu64 start;\n+\t\tu64 phase;\n+\t};\n+\tu64 period;\n+\tu64 on;\n+\tu32 flags;\n+\tu8 rsvd[36];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(VIRTCHNL_PIN_CFG_LEN, virtchnl_phc_per_out);\n+\n+/* virtchnl_phc_pin_cfg_flags\n+ *\n+ * Definition of bits in the flags field of the virtchnl_phc_pin_cfg\n+ * structure.\n+ */\n+enum virtchnl_phc_pin_cfg_flags {\n+\t/* Valid for VIRTCHNL_OP_1588_PTP_SET_PIN_CFG. If set, indicates this\n+\t * is a request to verify if the function can be assigned to the\n+\t * provided pin. In this case, the ext_ts and per_out fields are\n+\t * ignored, and the PF response must be an error if the pin cannot be\n+\t * assigned to that function index.\n+\t */\n+\tVIRTCHNL_PHC_PIN_CFG_VERIFY = BIT(0),\n+};\n+\n+/**\n+ * virtchnl_phc_set_pin\n+ * @pin_index: The pin to get or set\n+ * @func: the function type the pin is assigned to\n+ * @func_index: the index of the function the pin is assigned to\n+ * @ext_ts: external timestamp configuration\n+ * @per_out: periodic output configuration\n+ * @rsvd1: Reserved for future extension\n+ * @rsvd2: Reserved for future extension\n+ *\n+ * Sent along with the VIRTCHNL_OP_1588_PTP_SET_PIN_CFG op.\n+ *\n+ * The VF issues a VIRTCHNL_OP_1588_PTP_SET_PIN_CFG to assign the pin to one\n+ * of the functions. It must set the pin_index field, the func field, and\n+ * the func_index field. The pin_index must be less than n_pins, and the\n+ * func_index must be less than the n_ext_ts or n_per_out depending on which\n+ * function type is selected. If func is for an external timestamp, the\n+ * ext_ts field must be filled in with the desired configuration. Similarly,\n+ * if the function is for a periodic output, the per_out field must be\n+ * configured.\n+ *\n+ * If the VIRTCHNL_PHC_PIN_CFG_VERIFY bit of the flag field is set, this is\n+ * a request only to verify the configuration, not to set it. In this case,\n+ * the PF should simply report an error if the requested pin cannot be\n+ * assigned to the requested function. This allows VF to determine whether or\n+ * not a given function can be assigned to a specific pin. Other flag bits are\n+ * currently reserved and must be verified as zero on both sides. They may be\n+ * extended in the future.\n+ */\n+struct virtchnl_phc_set_pin {\n+\tu32 flags; /* see virtchnl_phc_pin_cfg_flags */\n+\tu8 pin_index;\n+\tu8 func; /* see virtchnl_phc_pin_func */\n+\tu8 func_index;\n+\tu8 rsvd1;\n+\tunion {\n+\t\tstruct virtchnl_phc_ext_ts ext_ts;\n+\t\tstruct virtchnl_phc_per_out per_out;\n+\t};\n+\tu8 rsvd2[8];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(80, virtchnl_phc_set_pin);\n+\n+/**\n+ * virtchnl_phc_pin\n+ * @pin_index: The pin to get or set\n+ * @func: the function type the pin is assigned to\n+ * @func_index: the index of the function the pin is assigned to\n+ * @rsvd: Reserved for future extension\n+ * @name: human readable pin name, supplied by PF on GET_PIN_CFGS\n+ *\n+ * Sent by the PF as part of the VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS response.\n+ *\n+ * The VF issues a VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS request to the PF in\n+ * order to obtain the current pin configuration for all of the pins that were\n+ * assigned to this VF.\n+ *\n+ * This structure details the pin configuration state, including a pin name\n+ * and which function is assigned to the pin currently.\n+ */\n+struct virtchnl_phc_pin {\n+\tu8 pin_index;\n+\tu8 func; /* see virtchnl_phc_pin_func */\n+\tu8 func_index;\n+\tu8 rsvd[5];\n+\tchar name[64];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_phc_pin);\n+\n+/**\n+ * virtchnl_phc_pin_cfg\n+ * @len: length of the variable pin config array\n+ * @pins: variable length pin configuration array\n+ *\n+ * Variable structure sent by the PF in reply to\n+ * VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS. The VF does not send this structure with\n+ * its request of the operation.\n+ *\n+ * It is possible that the PF may need to send more pin configuration data\n+ * than can be sent in one virtchnl message. To handle this, the PF should\n+ * issue multiple VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS responses. Each response\n+ * will indicate the number of pins it covers. The VF should be ready to wait\n+ * for multiple responses until it has received a total length equal to the\n+ * number of n_pins negotiated during extended PTP capabilities exchange.\n+ */\n+struct virtchnl_phc_get_pins {\n+\tu8 len;\n+\tu8 rsvd[7];\n+\tstruct virtchnl_phc_pin pins[1];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(80, virtchnl_phc_get_pins);\n+\n+/**\n+ * virtchnl_phc_ext_stamp\n+ * @tstamp: timestamp value\n+ * @tstamp_rsvd: Reserved for future extension of the timestamp value.\n+ * @tstamp_format: format of the timstamp\n+ * @func_index: external timestamp function this timestamp is for\n+ * @rsvd2: Reserved for future extension\n+ *\n+ * Sent along with the VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP from the PF when an\n+ * external timestamp function is triggered.\n+ *\n+ * This will be sent only if one of the external timestamp functions is\n+ * configured by the VF, and is only valid if VIRTCHNL_1588_PTP_CAP_PIN_CFG is\n+ * negotiated with the PF.\n+ *\n+ * The timestamp format is defined by the tstamp_format field using the\n+ * virtchnl_ptp_tstamp_format enumeration. The tstamp_rsvd field is\n+ * exclusively reserved for possible future variants of the timestamp format,\n+ * and its access will be controlled by the tstamp_format field.\n+ */\n+struct virtchnl_phc_ext_tstamp {\n+\tu64 tstamp;\n+\tu8 tstamp_rsvd[8];\n+\tu8 tstamp_format;\n+\tu8 func_index;\n+\tu8 rsvd2[6];\n+};\n+\n+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_phc_ext_tstamp);\n+\n+/* Since VF messages are limited by u16 size, precalculate the maximum possible\n+ * values of nested elements in virtchnl structures that virtual channel can\n+ * possibly handle in a single message.\n+ */\n+enum virtchnl_vector_limits {\n+\tVIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /\n+\t\tsizeof(struct virtchnl_queue_pair_info),\n+\n+\tVIRTCHNL_OP_CONFIG_IRQ_MAP_MAX\t\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /\n+\t\tsizeof(struct virtchnl_vector_map),\n+\n+\tVIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /\n+\t\tsizeof(struct virtchnl_ether_addr),\n+\n+\tVIRTCHNL_OP_ADD_DEL_VLAN_MAX\t\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /\n+\t\tsizeof(u16),\n+\n+\n+\tVIRTCHNL_OP_ENABLE_CHANNELS_MAX\t\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_tc_info)) /\n+\t\tsizeof(struct virtchnl_channel_info),\n+\n+\tVIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) /\n+\t\tsizeof(struct virtchnl_queue_chunk),\n+\n+\tVIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /\n+\t\tsizeof(struct virtchnl_queue_vector),\n+\n+\tVIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX\t\t=\n+\t\t((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) /\n+\t\tsizeof(struct virtchnl_vlan_filter),\n+};\n+\n+/**\n+ * virtchnl_vc_validate_vf_msg\n+ * @ver: Virtchnl version info\n+ * @v_opcode: Opcode for the message\n+ * @msg: pointer to the msg buffer\n+ * @msglen: msg length\n+ *\n+ * validate msg format against struct for each opcode\n+ */\n+static inline int\n+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,\n+\t\t\t    u8 *msg, u16 msglen)\n+{\n+\tbool err_msg_format = false;\n+\tu32 valid_len = 0;\n+\n+\t/* Validate message length. */\n+\tswitch (v_opcode) {\n+\tcase VIRTCHNL_OP_VERSION:\n+\t\tvalid_len = sizeof(struct virtchnl_version_info);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_RESET_VF:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_GET_VF_RESOURCES:\n+\t\tif (VF_IS_V11(ver))\n+\t\t\tvalid_len = sizeof(u32);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_TX_QUEUE:\n+\t\tvalid_len = sizeof(struct virtchnl_txq_info);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_RX_QUEUE:\n+\t\tvalid_len = sizeof(struct virtchnl_rxq_info);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_VSI_QUEUES:\n+\t\tvalid_len = sizeof(struct virtchnl_vsi_queue_config_info);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_vsi_queue_config_info *vqc =\n+\t\t\t    (struct virtchnl_vsi_queue_config_info *)msg;\n+\n+\t\t\tif (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >\n+\t\t\t    VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (vqc->num_queue_pairs *\n+\t\t\t\t      sizeof(struct\n+\t\t\t\t\t     virtchnl_queue_pair_info));\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_IRQ_MAP:\n+\t\tvalid_len = sizeof(struct virtchnl_irq_map_info);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_irq_map_info *vimi =\n+\t\t\t    (struct virtchnl_irq_map_info *)msg;\n+\n+\t\t\tif (vimi->num_vectors == 0 || vimi->num_vectors >\n+\t\t\t    VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (vimi->num_vectors *\n+\t\t\t\t      sizeof(struct virtchnl_vector_map));\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ENABLE_QUEUES:\n+\tcase VIRTCHNL_OP_DISABLE_QUEUES:\n+\t\tvalid_len = sizeof(struct virtchnl_queue_select);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_GET_MAX_RSS_QREGION:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ADD_ETH_ADDR:\n+\tcase VIRTCHNL_OP_DEL_ETH_ADDR:\n+\t\tvalid_len = sizeof(struct virtchnl_ether_addr_list);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_ether_addr_list *veal =\n+\t\t\t    (struct virtchnl_ether_addr_list *)msg;\n+\n+\t\t\tif (veal->num_elements == 0 || veal->num_elements >\n+\t\t\t    VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += veal->num_elements *\n+\t\t\t    sizeof(struct virtchnl_ether_addr);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ADD_VLAN:\n+\tcase VIRTCHNL_OP_DEL_VLAN:\n+\t\tvalid_len = sizeof(struct virtchnl_vlan_filter_list);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_vlan_filter_list *vfl =\n+\t\t\t    (struct virtchnl_vlan_filter_list *)msg;\n+\n+\t\t\tif (vfl->num_elements == 0 || vfl->num_elements >\n+\t\t\t    VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += vfl->num_elements * sizeof(u16);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:\n+\t\tvalid_len = sizeof(struct virtchnl_promisc_info);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_GET_STATS:\n+\t\tvalid_len = sizeof(struct virtchnl_queue_select);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_RSS_KEY:\n+\t\tvalid_len = sizeof(struct virtchnl_rss_key);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_rss_key *vrk =\n+\t\t\t\t(struct virtchnl_rss_key *)msg;\n+\n+\t\t\tif (vrk->key_len == 0) {\n+\t\t\t\t/* zero length is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += vrk->key_len - 1;\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_RSS_LUT:\n+\t\tvalid_len = sizeof(struct virtchnl_rss_lut);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_rss_lut *vrl =\n+\t\t\t\t(struct virtchnl_rss_lut *)msg;\n+\n+\t\t\tif (vrl->lut_entries == 0) {\n+\t\t\t\t/* zero entries is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += vrl->lut_entries - 1;\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_GET_RSS_HENA_CAPS:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_SET_RSS_HENA:\n+\t\tvalid_len = sizeof(struct virtchnl_rss_hena);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_REQUEST_QUEUES:\n+\t\tvalid_len = sizeof(struct virtchnl_vf_res_request);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ENABLE_CHANNELS:\n+\t\tvalid_len = sizeof(struct virtchnl_tc_info);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_tc_info *vti =\n+\t\t\t\t(struct virtchnl_tc_info *)msg;\n+\n+\t\t\tif (vti->num_tc == 0 || vti->num_tc >\n+\t\t\t    VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (vti->num_tc - 1) *\n+\t\t\t\t     sizeof(struct virtchnl_channel_info);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_DISABLE_CHANNELS:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ADD_CLOUD_FILTER:\n+\tcase VIRTCHNL_OP_DEL_CLOUD_FILTER:\n+\t\tvalid_len = sizeof(struct virtchnl_filter);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ADD_RSS_CFG:\n+\tcase VIRTCHNL_OP_DEL_RSS_CFG:\n+\t\tvalid_len = sizeof(struct virtchnl_rss_cfg);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ADD_FDIR_FILTER:\n+\t\tvalid_len = sizeof(struct virtchnl_fdir_add);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_DEL_FDIR_FILTER:\n+\t\tvalid_len = sizeof(struct virtchnl_fdir_del);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_GET_QOS_CAPS:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:\n+\t\tvalid_len = sizeof(struct virtchnl_queue_tc_mapping);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_queue_tc_mapping *q_tc =\n+\t\t\t\t(struct virtchnl_queue_tc_mapping *)msg;\n+\t\t\tif (q_tc->num_tc == 0) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (q_tc->num_tc - 1) *\n+\t\t\t\t\t sizeof(q_tc->tc[0]);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_QUEUE_BW:\n+\t\tvalid_len = sizeof(struct virtchnl_queues_bw_cfg);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_queues_bw_cfg *q_bw =\n+\t\t\t\t(struct virtchnl_queues_bw_cfg *)msg;\n+\t\t\tif (q_bw->num_queues == 0) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (q_bw->num_queues - 1) *\n+\t\t\t\t\t sizeof(q_bw->cfg[0]);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_CONFIG_QUANTA:\n+\t\tvalid_len = sizeof(struct virtchnl_quanta_cfg);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_quanta_cfg *q_quanta =\n+\t\t\t\t(struct virtchnl_quanta_cfg *)msg;\n+\t\t\tif (q_quanta->quanta_size == 0 ||\n+\t\t\t    q_quanta->queue_select.num_queues == 0) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ADD_VLAN_V2:\n+\tcase VIRTCHNL_OP_DEL_VLAN_V2:\n+\t\tvalid_len = sizeof(struct virtchnl_vlan_filter_list_v2);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_vlan_filter_list_v2 *vfl =\n+\t\t\t    (struct virtchnl_vlan_filter_list_v2 *)msg;\n+\n+\t\t\tif (vfl->num_elements == 0 || vfl->num_elements >\n+\t\t\t    VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (vfl->num_elements - 1) *\n+\t\t\t\tsizeof(struct virtchnl_vlan_filter);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:\n+\tcase VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:\n+\tcase VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:\n+\t\tvalid_len = sizeof(struct virtchnl_vlan_setting);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_GET_CAPS:\n+\t\tvalid_len = sizeof(struct virtchnl_ptp_caps);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_GET_TIME:\n+\tcase VIRTCHNL_OP_1588_PTP_SET_TIME:\n+\t\tvalid_len = sizeof(struct virtchnl_phc_time);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_ADJ_TIME:\n+\t\tvalid_len = sizeof(struct virtchnl_phc_adj_time);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_ADJ_FREQ:\n+\t\tvalid_len = sizeof(struct virtchnl_phc_adj_freq);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP:\n+\t\tvalid_len = sizeof(struct virtchnl_phc_tx_tstamp);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_SET_PIN_CFG:\n+\t\tvalid_len = sizeof(struct virtchnl_phc_set_pin);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS:\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP:\n+\t\tvalid_len = sizeof(struct virtchnl_phc_ext_tstamp);\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_ENABLE_QUEUES_V2:\n+\tcase VIRTCHNL_OP_DISABLE_QUEUES_V2:\n+\t\tvalid_len = sizeof(struct virtchnl_del_ena_dis_queues);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_del_ena_dis_queues *qs =\n+\t\t\t\t(struct virtchnl_del_ena_dis_queues *)msg;\n+\t\t\tif (qs->chunks.num_chunks == 0 ||\n+\t\t\t    qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (qs->chunks.num_chunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl_queue_chunk);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL_OP_MAP_QUEUE_VECTOR:\n+\t\tvalid_len = sizeof(struct virtchnl_queue_vector_maps);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl_queue_vector_maps *v_qp =\n+\t\t\t\t(struct virtchnl_queue_vector_maps *)msg;\n+\t\t\tif (v_qp->num_qv_maps == 0 ||\n+\t\t\t    v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (v_qp->num_qv_maps - 1) *\n+\t\t\t\t      sizeof(struct virtchnl_queue_vector);\n+\t\t}\n+\t\tbreak;\n+\t/* These are always errors coming from the VF. */\n+\tcase VIRTCHNL_OP_EVENT:\n+\tcase VIRTCHNL_OP_UNKNOWN:\n+\tdefault:\n+\t\treturn VIRTCHNL_STATUS_ERR_PARAM;\n+\t}\n+\t/* few more checks */\n+\tif (err_msg_format || valid_len != msglen)\n+\t\treturn VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;\n+\n+\treturn 0;\n+}\n+#endif /* _VIRTCHNL_H_ */\ndiff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h\nnew file mode 100644\nindex 0000000000..d496f2388e\n--- /dev/null\n+++ b/drivers/common/idpf/base/virtchnl2.h\n@@ -0,0 +1,1462 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _VIRTCHNL2_H_\n+#define _VIRTCHNL2_H_\n+\n+/* All opcodes associated with virtchnl 2 are prefixed with virtchnl2 or\n+ * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,\n+ * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.\n+ */\n+\n+#include \"virtchnl2_lan_desc.h\"\n+\n+/* Error Codes\n+ * Note that many older versions of various iAVF drivers convert the reported\n+ * status code directly into an iavf_status enumeration. For this reason, it\n+ * is important that the values of these enumerations line up.\n+ */\n+#define\t\tVIRTCHNL2_STATUS_SUCCESS\t\t0\n+#define\t\tVIRTCHNL2_STATUS_ERR_PARAM\t\t-5\n+#define\t\tVIRTCHNL2_STATUS_ERR_OPCODE_MISMATCH\t-38\n+\n+/* These macros are used to generate compilation errors if a structure/union\n+ * is not exactly the correct length. It gives a divide by zero error if the\n+ * structure/union is not of the correct size, otherwise it creates an enum\n+ * that is never used.\n+ */\n+#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) enum virtchnl2_static_assert_enum_##X \\\n+\t{ virtchnl2_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }\n+#define VIRTCHNL2_CHECK_UNION_LEN(n, X) enum virtchnl2_static_asset_enum_##X \\\n+\t{ virtchnl2_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }\n+\n+/* New major set of opcodes introduced and so leaving room for\n+ * old misc opcodes to be added in future. Also these opcodes may only\n+ * be used if both the PF and VF have successfully negotiated the\n+ * VIRTCHNL version as 2.0 during VIRTCHNL22_OP_VERSION exchange.\n+ */\n+#define\t\tVIRTCHNL2_OP_UNKNOWN\t\t\t0\n+#define\t\tVIRTCHNL2_OP_VERSION\t\t\t1\n+#define\t\tVIRTCHNL2_OP_GET_CAPS\t\t\t500\n+#define\t\tVIRTCHNL2_OP_CREATE_VPORT\t\t501\n+#define\t\tVIRTCHNL2_OP_DESTROY_VPORT\t\t502\n+#define\t\tVIRTCHNL2_OP_ENABLE_VPORT\t\t503\n+#define\t\tVIRTCHNL2_OP_DISABLE_VPORT\t\t504\n+#define\t\tVIRTCHNL2_OP_CONFIG_TX_QUEUES\t\t505\n+#define\t\tVIRTCHNL2_OP_CONFIG_RX_QUEUES\t\t506\n+#define\t\tVIRTCHNL2_OP_ENABLE_QUEUES\t\t507\n+#define\t\tVIRTCHNL2_OP_DISABLE_QUEUES\t\t508\n+#define\t\tVIRTCHNL2_OP_ADD_QUEUES\t\t\t509\n+#define\t\tVIRTCHNL2_OP_DEL_QUEUES\t\t\t510\n+#define\t\tVIRTCHNL2_OP_MAP_QUEUE_VECTOR\t\t511\n+#define\t\tVIRTCHNL2_OP_UNMAP_QUEUE_VECTOR\t\t512\n+#define\t\tVIRTCHNL2_OP_GET_RSS_KEY\t\t513\n+#define\t\tVIRTCHNL2_OP_SET_RSS_KEY\t\t514\n+#define\t\tVIRTCHNL2_OP_GET_RSS_LUT\t\t515\n+#define\t\tVIRTCHNL2_OP_SET_RSS_LUT\t\t516\n+#define\t\tVIRTCHNL2_OP_GET_RSS_HASH\t\t517\n+#define\t\tVIRTCHNL2_OP_SET_RSS_HASH\t\t518\n+#define\t\tVIRTCHNL2_OP_SET_SRIOV_VFS\t\t519\n+#define\t\tVIRTCHNL2_OP_ALLOC_VECTORS\t\t520\n+#define\t\tVIRTCHNL2_OP_DEALLOC_VECTORS\t\t521\n+#define\t\tVIRTCHNL2_OP_EVENT\t\t\t522\n+#define\t\tVIRTCHNL2_OP_GET_STATS\t\t\t523\n+#define\t\tVIRTCHNL2_OP_RESET_VF\t\t\t524\n+\t/* opcode 525 is reserved */\n+#define\t\tVIRTCHNL2_OP_GET_PTYPE_INFO\t\t526\n+\t/* opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and\n+\t * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW\n+\t */\n+\t/* opcodes 529, 530, and 531 are reserved */\n+#define\t\tVIRTCHNL2_OP_CREATE_ADI\t\t\t532\n+#define\t\tVIRTCHNL2_OP_DESTROY_ADI\t\t533\n+#define\t\tVIRTCHNL2_OP_LOOPBACK\t\t\t534\n+#define\t\tVIRTCHNL2_OP_ADD_MAC_ADDR\t\t535\n+#define\t\tVIRTCHNL2_OP_DEL_MAC_ADDR\t\t536\n+#define\t\tVIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE\t537\n+\n+#define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX\t0xFFFF\n+\n+/* VIRTCHNL2_VPORT_TYPE\n+ * Type of virtual port\n+ */\n+#define VIRTCHNL2_VPORT_TYPE_DEFAULT\t\t0\n+#define VIRTCHNL2_VPORT_TYPE_SRIOV\t\t1\n+#define VIRTCHNL2_VPORT_TYPE_SIOV\t\t2\n+#define VIRTCHNL2_VPORT_TYPE_SUBDEV\t\t3\n+#define VIRTCHNL2_VPORT_TYPE_MNG\t\t4\n+\n+/* VIRTCHNL2_QUEUE_MODEL\n+ * Type of queue model\n+ *\n+ * In the single queue model, the same transmit descriptor queue is used by\n+ * software to post descriptors to hardware and by hardware to post completed\n+ * descriptors to software.\n+ * Likewise, the same receive descriptor queue is used by hardware to post\n+ * completions to software and by software to post buffers to hardware.\n+ */\n+#define VIRTCHNL2_QUEUE_MODEL_SINGLE\t\t0\n+/* In the split queue model, hardware uses transmit completion queues to post\n+ * descriptor/buffer completions to software, while software uses transmit\n+ * descriptor queues to post descriptors to hardware.\n+ * Likewise, hardware posts descriptor completions to the receive descriptor\n+ * queue, while software uses receive buffer queues to post buffers to hardware.\n+ */\n+#define VIRTCHNL2_QUEUE_MODEL_SPLIT\t\t1\n+\n+/* VIRTCHNL2_CHECKSUM_OFFLOAD_CAPS\n+ * Checksum offload capability flags\n+ */\n+#define VIRTCHNL2_CAP_TX_CSUM_L3_IPV4\t\tBIT(0)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP\tBIT(1)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP\tBIT(2)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP\tBIT(3)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP\tBIT(4)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP\tBIT(5)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP\tBIT(6)\n+#define VIRTCHNL2_CAP_TX_CSUM_GENERIC\t\tBIT(7)\n+#define VIRTCHNL2_CAP_RX_CSUM_L3_IPV4\t\tBIT(8)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP\tBIT(9)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP\tBIT(10)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP\tBIT(11)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP\tBIT(12)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP\tBIT(13)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP\tBIT(14)\n+#define VIRTCHNL2_CAP_RX_CSUM_GENERIC\t\tBIT(15)\n+#define VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL\tBIT(16)\n+#define VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL\tBIT(17)\n+#define VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL\tBIT(18)\n+#define VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL\tBIT(19)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL\tBIT(20)\n+#define VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL\tBIT(21)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL\tBIT(22)\n+#define VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL\tBIT(23)\n+\n+/* VIRTCHNL2_SEGMENTATION_OFFLOAD_CAPS\n+ * Segmentation offload capability flags\n+ */\n+#define VIRTCHNL2_CAP_SEG_IPV4_TCP\t\tBIT(0)\n+#define VIRTCHNL2_CAP_SEG_IPV4_UDP\t\tBIT(1)\n+#define VIRTCHNL2_CAP_SEG_IPV4_SCTP\t\tBIT(2)\n+#define VIRTCHNL2_CAP_SEG_IPV6_TCP\t\tBIT(3)\n+#define VIRTCHNL2_CAP_SEG_IPV6_UDP\t\tBIT(4)\n+#define VIRTCHNL2_CAP_SEG_IPV6_SCTP\t\tBIT(5)\n+#define VIRTCHNL2_CAP_SEG_GENERIC\t\tBIT(6)\n+#define VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL\tBIT(7)\n+#define VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL\tBIT(8)\n+\n+/* VIRTCHNL2_RSS_FLOW_TYPE_CAPS\n+ * Receive Side Scaling Flow type capability flags\n+ */\n+#define VIRTCHNL2_CAP_RSS_IPV4_TCP\t\tBIT(0)\n+#define VIRTCHNL2_CAP_RSS_IPV4_UDP\t\tBIT(1)\n+#define VIRTCHNL2_CAP_RSS_IPV4_SCTP\t\tBIT(2)\n+#define VIRTCHNL2_CAP_RSS_IPV4_OTHER\t\tBIT(3)\n+#define VIRTCHNL2_CAP_RSS_IPV6_TCP\t\tBIT(4)\n+#define VIRTCHNL2_CAP_RSS_IPV6_UDP\t\tBIT(5)\n+#define VIRTCHNL2_CAP_RSS_IPV6_SCTP\t\tBIT(6)\n+#define VIRTCHNL2_CAP_RSS_IPV6_OTHER\t\tBIT(7)\n+#define VIRTCHNL2_CAP_RSS_IPV4_AH\t\tBIT(8)\n+#define VIRTCHNL2_CAP_RSS_IPV4_ESP\t\tBIT(9)\n+#define VIRTCHNL2_CAP_RSS_IPV4_AH_ESP\t\tBIT(10)\n+#define VIRTCHNL2_CAP_RSS_IPV6_AH\t\tBIT(11)\n+#define VIRTCHNL2_CAP_RSS_IPV6_ESP\t\tBIT(12)\n+#define VIRTCHNL2_CAP_RSS_IPV6_AH_ESP\t\tBIT(13)\n+\n+/* VIRTCHNL2_HEADER_SPLIT_CAPS\n+ * Header split capability flags\n+ */\n+/* for prepended metadata  */\n+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L2\t\tBIT(0)\n+/* all VLANs go into header buffer */\n+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L3\t\tBIT(1)\n+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4\t\tBIT(2)\n+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6\t\tBIT(3)\n+\n+/* VIRTCHNL2_RSC_OFFLOAD_CAPS\n+ * Receive Side Coalescing offload capability flags\n+ */\n+#define VIRTCHNL2_CAP_RSC_IPV4_TCP\t\tBIT(0)\n+#define VIRTCHNL2_CAP_RSC_IPV4_SCTP\t\tBIT(1)\n+#define VIRTCHNL2_CAP_RSC_IPV6_TCP\t\tBIT(2)\n+#define VIRTCHNL2_CAP_RSC_IPV6_SCTP\t\tBIT(3)\n+\n+/* VIRTCHNL2_OTHER_CAPS\n+ * Other capability flags\n+ * SPLITQ_QSCHED: Queue based scheduling using split queue model\n+ * TX_VLAN: VLAN tag insertion\n+ * RX_VLAN: VLAN tag stripping\n+ */\n+#define VIRTCHNL2_CAP_RDMA\t\t\tBIT(0)\n+#define VIRTCHNL2_CAP_SRIOV\t\t\tBIT(1)\n+#define VIRTCHNL2_CAP_MACFILTER\t\t\tBIT(2)\n+#define VIRTCHNL2_CAP_FLOW_DIRECTOR\t\tBIT(3)\n+#define VIRTCHNL2_CAP_SPLITQ_QSCHED\t\tBIT(4)\n+#define VIRTCHNL2_CAP_CRC\t\t\tBIT(5)\n+#define VIRTCHNL2_CAP_ADQ\t\t\tBIT(6)\n+#define VIRTCHNL2_CAP_WB_ON_ITR\t\t\tBIT(7)\n+#define VIRTCHNL2_CAP_PROMISC\t\t\tBIT(8)\n+#define VIRTCHNL2_CAP_LINK_SPEED\t\tBIT(9)\n+#define VIRTCHNL2_CAP_INLINE_IPSEC\t\tBIT(10)\n+#define VIRTCHNL2_CAP_LARGE_NUM_QUEUES\t\tBIT(11)\n+/* require additional info */\n+#define VIRTCHNL2_CAP_VLAN\t\t\tBIT(12)\n+#define VIRTCHNL2_CAP_PTP\t\t\tBIT(13)\n+#define VIRTCHNL2_CAP_ADV_RSS\t\t\tBIT(15)\n+#define VIRTCHNL2_CAP_FDIR\t\t\tBIT(16)\n+#define VIRTCHNL2_CAP_RX_FLEX_DESC\t\tBIT(17)\n+#define VIRTCHNL2_CAP_PTYPE\t\t\tBIT(18)\n+#define VIRTCHNL2_CAP_LOOPBACK\t\t\tBIT(19)\n+#define VIRTCHNL2_CAP_OEM\t\t\tBIT(20)\n+\n+/* VIRTCHNL2_DEVICE_TYPE */\n+/* underlying device type */\n+#define VIRTCHNL2_MEV_DEVICE\t\t\t0\n+\n+/* VIRTCHNL2_TXQ_SCHED_MODE\n+ * Transmit Queue Scheduling Modes - Queue mode is the legacy mode i.e. inorder\n+ * completions where descriptors and buffers are completed at the same time.\n+ * Flow scheduling mode allows for out of order packet processing where\n+ * descriptors are cleaned in order, but buffers can be completed out of order.\n+ */\n+#define VIRTCHNL2_TXQ_SCHED_MODE_QUEUE\t\t0\n+#define VIRTCHNL2_TXQ_SCHED_MODE_FLOW\t\t1\n+\n+/* VIRTCHNL2_TXQ_FLAGS\n+ * Transmit Queue feature flags\n+ *\n+ * Enable rule miss completion type; packet completion for a packet\n+ * sent on exception path; only relevant in flow scheduling mode\n+ */\n+#define VIRTCHNL2_TXQ_ENABLE_MISS_COMPL\t\tBIT(0)\n+\n+/* VIRTCHNL2_PEER_TYPE\n+ * Transmit mailbox peer type\n+ */\n+#define VIRTCHNL2_RDMA_CPF\t\t\t0\n+#define VIRTCHNL2_NVME_CPF\t\t\t1\n+#define VIRTCHNL2_ATE_CPF\t\t\t2\n+#define VIRTCHNL2_LCE_CPF\t\t\t3\n+\n+/* VIRTCHNL2_RXQ_FLAGS\n+ * Receive Queue Feature flags\n+ */\n+#define VIRTCHNL2_RXQ_RSC\t\t\tBIT(0)\n+#define VIRTCHNL2_RXQ_HDR_SPLIT\t\t\tBIT(1)\n+/* When set, packet descriptors are flushed by hardware immediately after\n+ * processing each packet.\n+ */\n+#define VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK\tBIT(2)\n+#define VIRTCHNL2_RX_DESC_SIZE_16BYTE\t\tBIT(3)\n+#define VIRTCHNL2_RX_DESC_SIZE_32BYTE\t\tBIT(4)\n+\n+/* VIRTCHNL2_RSS_ALGORITHM\n+ * Type of RSS algorithm\n+ */\n+#define VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC\t\t0\n+#define VIRTCHNL2_RSS_ALG_R_ASYMMETRIC\t\t\t1\n+#define VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC\t\t2\n+#define VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC\t\t\t3\n+\n+/* VIRTCHNL2_EVENT_CODES\n+ * Type of event\n+ */\n+#define VIRTCHNL2_EVENT_UNKNOWN\t\t\t0\n+#define VIRTCHNL2_EVENT_LINK_CHANGE\t\t1\n+/* These messages are only sent to PF from CP */\n+#define VIRTCHNL2_EVENT_START_RESET_ADI\t\t2\n+#define VIRTCHNL2_EVENT_FINISH_RESET_ADI\t3\n+\n+/* VIRTCHNL2_QUEUE_TYPE\n+ * Transmit and Receive queue types are valid in legacy as well as split queue\n+ * models. With Split Queue model, 2 additional types are introduced -\n+ * TX_COMPLETION and RX_BUFFER. In split queue model, receive  corresponds to\n+ * the queue where hardware posts completions.\n+ */\n+#define VIRTCHNL2_QUEUE_TYPE_TX\t\t\t0\n+#define VIRTCHNL2_QUEUE_TYPE_RX\t\t\t1\n+#define VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION\t2\n+#define VIRTCHNL2_QUEUE_TYPE_RX_BUFFER\t\t3\n+#define VIRTCHNL2_QUEUE_TYPE_CONFIG_TX\t\t4\n+#define VIRTCHNL2_QUEUE_TYPE_CONFIG_RX\t\t5\n+#define VIRTCHNL2_QUEUE_TYPE_P2P_TX\t\t6\n+#define VIRTCHNL2_QUEUE_TYPE_P2P_RX\t\t7\n+#define VIRTCHNL2_QUEUE_TYPE_P2P_TX_COMPLETION\t8\n+#define VIRTCHNL2_QUEUE_TYPE_P2P_RX_BUFFER\t9\n+#define VIRTCHNL2_QUEUE_TYPE_MBX_TX\t\t10\n+#define VIRTCHNL2_QUEUE_TYPE_MBX_RX\t\t11\n+\n+/* VIRTCHNL2_ITR_IDX\n+ * Virtchannel interrupt throttling rate index\n+ */\n+#define VIRTCHNL2_ITR_IDX_0\t\t\t0\n+#define VIRTCHNL2_ITR_IDX_1\t\t\t1\n+#define VIRTCHNL2_ITR_IDX_2\t\t\t2\n+#define VIRTCHNL2_ITR_IDX_NO_ITR\t\t3\n+\n+/* VIRTCHNL2_VECTOR_LIMITS\n+ * Since PF/VF messages are limited by __le16 size, precalculate the maximum\n+ * possible values of nested elements in virtchnl structures that virtual\n+ * channel can possibly handle in a single message.\n+ */\n+\n+#define VIRTCHNL2_OP_DEL_ENABLE_DISABLE_QUEUES_MAX (\\\n+\t\t((__le16)(~0) - sizeof(struct virtchnl2_del_ena_dis_queues)) / \\\n+\t\tsizeof(struct virtchnl2_queue_chunk))\n+\n+#define VIRTCHNL2_OP_MAP_UNMAP_QUEUE_VECTOR_MAX (\\\n+\t\t((__le16)(~0) - sizeof(struct virtchnl2_queue_vector_maps)) / \\\n+\t\tsizeof(struct virtchnl2_queue_vector))\n+\n+/* VIRTCHNL2_MAC_TYPE\n+ * VIRTCHNL2_MAC_ADDR_PRIMARY\n+ * PF/VF driver should set @type to VIRTCHNL2_MAC_ADDR_PRIMARY for the\n+ * primary/device unicast MAC address filter for VIRTCHNL2_OP_ADD_MAC_ADDR and\n+ * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the underlying control plane\n+ * function to accurately track the MAC address and for VM/function reset.\n+ *\n+ * VIRTCHNL2_MAC_ADDR_EXTRA\n+ * PF/VF driver should set @type to VIRTCHNL2_MAC_ADDR_EXTRA for any extra\n+ * unicast and/or multicast filters that are being added/deleted via\n+ * VIRTCHNL2_OP_ADD_MAC_ADDR/VIRTCHNL2_OP_DEL_MAC_ADDR respectively.\n+ */\n+#define VIRTCHNL2_MAC_ADDR_PRIMARY\t\t1\n+#define VIRTCHNL2_MAC_ADDR_EXTRA\t\t2\n+\n+/* VIRTCHNL2_PROMISC_FLAGS\n+ * Flags used for promiscuous mode\n+ */\n+#define VIRTCHNL2_UNICAST_PROMISC\t\tBIT(0)\n+#define VIRTCHNL2_MULTICAST_PROMISC\t\tBIT(1)\n+\n+/* VIRTCHNL2_PROTO_HDR_TYPE\n+ * Protocol header type within a packet segment. A segment consists of one or\n+ * more protocol headers that make up a logical group of protocol headers. Each\n+ * logical group of protocol headers encapsulates or is encapsulated using/by\n+ * tunneling or encapsulation protocols for network virtualization.\n+ */\n+/* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_ANY\t\t\t0\n+#define VIRTCHNL2_PROTO_HDR_PRE_MAC\t\t1\n+/* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_MAC\t\t\t2\n+#define VIRTCHNL2_PROTO_HDR_POST_MAC\t\t3\n+#define VIRTCHNL2_PROTO_HDR_ETHERTYPE\t\t4\n+#define VIRTCHNL2_PROTO_HDR_VLAN\t\t5\n+#define VIRTCHNL2_PROTO_HDR_SVLAN\t\t6\n+#define VIRTCHNL2_PROTO_HDR_CVLAN\t\t7\n+#define VIRTCHNL2_PROTO_HDR_MPLS\t\t8\n+#define VIRTCHNL2_PROTO_HDR_UMPLS\t\t9\n+#define VIRTCHNL2_PROTO_HDR_MMPLS\t\t10\n+#define VIRTCHNL2_PROTO_HDR_PTP\t\t\t11\n+#define VIRTCHNL2_PROTO_HDR_CTRL\t\t12\n+#define VIRTCHNL2_PROTO_HDR_LLDP\t\t13\n+#define VIRTCHNL2_PROTO_HDR_ARP\t\t\t14\n+#define VIRTCHNL2_PROTO_HDR_ECP\t\t\t15\n+#define VIRTCHNL2_PROTO_HDR_EAPOL\t\t16\n+#define VIRTCHNL2_PROTO_HDR_PPPOD\t\t17\n+#define VIRTCHNL2_PROTO_HDR_PPPOE\t\t18\n+/* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_IPV4\t\t19\n+/* IPv4 and IPv6 Fragment header types are only associated to\n+ * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,\n+ * cannot be used independently.\n+ */\n+/* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_IPV4_FRAG\t\t20\n+/* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_IPV6\t\t21\n+/* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_IPV6_FRAG\t\t22\n+#define VIRTCHNL2_PROTO_HDR_IPV6_EH\t\t23\n+/* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_UDP\t\t\t24\n+/* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_TCP\t\t\t25\n+/* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_SCTP\t\t26\n+/* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_ICMP\t\t27\n+/* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_ICMPV6\t\t28\n+#define VIRTCHNL2_PROTO_HDR_IGMP\t\t29\n+#define VIRTCHNL2_PROTO_HDR_AH\t\t\t30\n+#define VIRTCHNL2_PROTO_HDR_ESP\t\t\t31\n+#define VIRTCHNL2_PROTO_HDR_IKE\t\t\t32\n+#define VIRTCHNL2_PROTO_HDR_NATT_KEEP\t\t33\n+/* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_PAY\t\t\t34\n+#define VIRTCHNL2_PROTO_HDR_L2TPV2\t\t35\n+#define VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL\t36\n+#define VIRTCHNL2_PROTO_HDR_L2TPV3\t\t37\n+#define VIRTCHNL2_PROTO_HDR_GTP\t\t\t38\n+#define VIRTCHNL2_PROTO_HDR_GTP_EH\t\t39\n+#define VIRTCHNL2_PROTO_HDR_GTPCV2\t\t40\n+#define VIRTCHNL2_PROTO_HDR_GTPC_TEID\t\t41\n+#define VIRTCHNL2_PROTO_HDR_GTPU\t\t42\n+#define VIRTCHNL2_PROTO_HDR_GTPU_UL\t\t43\n+#define VIRTCHNL2_PROTO_HDR_GTPU_DL\t\t44\n+#define VIRTCHNL2_PROTO_HDR_ECPRI\t\t45\n+#define VIRTCHNL2_PROTO_HDR_VRRP\t\t46\n+#define VIRTCHNL2_PROTO_HDR_OSPF\t\t47\n+/* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_TUN\t\t\t48\n+#define VIRTCHNL2_PROTO_HDR_GRE\t\t\t49\n+#define VIRTCHNL2_PROTO_HDR_NVGRE\t\t50\n+#define VIRTCHNL2_PROTO_HDR_VXLAN\t\t51\n+#define VIRTCHNL2_PROTO_HDR_VXLAN_GPE\t\t52\n+#define VIRTCHNL2_PROTO_HDR_GENEVE\t\t53\n+#define VIRTCHNL2_PROTO_HDR_NSH\t\t\t54\n+#define VIRTCHNL2_PROTO_HDR_QUIC\t\t55\n+#define VIRTCHNL2_PROTO_HDR_PFCP\t\t56\n+#define VIRTCHNL2_PROTO_HDR_PFCP_NODE\t\t57\n+#define VIRTCHNL2_PROTO_HDR_PFCP_SESSION\t58\n+#define VIRTCHNL2_PROTO_HDR_RTP\t\t\t59\n+#define VIRTCHNL2_PROTO_HDR_ROCE\t\t60\n+#define VIRTCHNL2_PROTO_HDR_ROCEV1\t\t61\n+#define VIRTCHNL2_PROTO_HDR_ROCEV2\t\t62\n+/* protocol ids up to 32767 are reserved for AVF use */\n+/* 32768 - 65534 are used for user defined protocol ids */\n+/* VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id */\n+#define VIRTCHNL2_PROTO_HDR_NO_PROTO\t\t65535\n+\n+#define VIRTCHNL2_VERSION_MAJOR_2        2\n+#define VIRTCHNL2_VERSION_MINOR_0        0\n+\n+\n+/* VIRTCHNL2_OP_VERSION\n+ * VF posts its version number to the CP. CP responds with its version number\n+ * in the same format, along with a return code.\n+ * If there is a major version mismatch, then the VF cannot operate.\n+ * If there is a minor version mismatch, then the VF can operate but should\n+ * add a warning to the system log.\n+ *\n+ * This version opcode  MUST always be specified as == 1, regardless of other\n+ * changes in the API. The CP must always respond to this message without\n+ * error regardless of version mismatch.\n+ */\n+struct virtchnl2_version_info {\n+\tu32 major;\n+\tu32 minor;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);\n+\n+/* VIRTCHNL2_OP_GET_CAPS\n+ * Dataplane driver sends this message to CP to negotiate capabilities and\n+ * provides a virtchnl2_get_capabilities structure with its desired\n+ * capabilities, max_sriov_vfs and num_allocated_vectors.\n+ * CP responds with a virtchnl2_get_capabilities structure updated\n+ * with allowed capabilities and the other fields as below.\n+ * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs\n+ * that can be created by this PF. For any other value 'n', CP responds\n+ * with max_sriov_vfs set to min(n, x) where x is the max number of VFs\n+ * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.\n+ * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1\n+ * which is default vector associated with the default mailbox. For any other\n+ * value 'n', CP responds with a value <= n based on the CP's policy of\n+ * max number of vectors for a PF.\n+ * CP will respond with the vector ID of mailbox allocated to the PF in\n+ * mailbox_vector_id and the number of itr index registers in itr_idx_map.\n+ * It also responds with default number of vports that the dataplane driver\n+ * should comeup with in default_num_vports and maximum number of vports that\n+ * can be supported in max_vports\n+ */\n+struct virtchnl2_get_capabilities {\n+\t/* see VIRTCHNL2_CHECKSUM_OFFLOAD_CAPS definitions */\n+\t__le32 csum_caps;\n+\n+\t/* see VIRTCHNL2_SEGMENTATION_OFFLOAD_CAPS definitions */\n+\t__le32 seg_caps;\n+\n+\t/* see VIRTCHNL2_HEADER_SPLIT_CAPS definitions */\n+\t__le32 hsplit_caps;\n+\n+\t/* see VIRTCHNL2_RSC_OFFLOAD_CAPS definitions */\n+\t__le32 rsc_caps;\n+\n+\t/* see VIRTCHNL2_RSS_FLOW_TYPE_CAPS definitions  */\n+\t__le64 rss_caps;\n+\n+\n+\t/* see VIRTCHNL2_OTHER_CAPS definitions  */\n+\t__le64 other_caps;\n+\n+\t/* DYN_CTL register offset and vector id for mailbox provided by CP */\n+\t__le32 mailbox_dyn_ctl;\n+\t__le16 mailbox_vector_id;\n+\t/* Maximum number of allocated vectors for the device */\n+\t__le16 num_allocated_vectors;\n+\n+\t/* Maximum number of queues that can be supported */\n+\t__le16 max_rx_q;\n+\t__le16 max_tx_q;\n+\t__le16 max_rx_bufq;\n+\t__le16 max_tx_complq;\n+\n+\t/* The PF sends the maximum VFs it is requesting. The CP responds with\n+\t * the maximum VFs granted.\n+\t */\n+\t__le16 max_sriov_vfs;\n+\n+\t/* maximum number of vports that can be supported */\n+\t__le16 max_vports;\n+\t/* default number of vports driver should allocate on load */\n+\t__le16 default_num_vports;\n+\n+\t/* Max header length hardware can parse/checksum, in bytes */\n+\t__le16 max_tx_hdr_size;\n+\n+\t/* Max number of scatter gather buffers that can be sent per transmit\n+\t * packet without needing to be linearized\n+\t */\n+\tu8 max_sg_bufs_per_tx_pkt;\n+\n+\t/* see VIRTCHNL2_ITR_IDX definition */\n+\tu8 itr_idx_map;\n+\n+\t__le16 pad1;\n+\n+\t/* version of Control Plane that is running */\n+\t__le16 oem_cp_ver_major;\n+\t__le16 oem_cp_ver_minor;\n+\t/* see VIRTCHNL2_DEVICE_TYPE definitions */\n+\t__le32 device_type;\n+\n+\tu8 reserved[12];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);\n+\n+struct virtchnl2_queue_reg_chunk {\n+\t/* see VIRTCHNL2_QUEUE_TYPE definitions */\n+\t__le32 type;\n+\t__le32 start_queue_id;\n+\t__le32 num_queues;\n+\t__le32 pad;\n+\n+\t/* Queue tail register offset and spacing provided by CP */\n+\t__le64 qtail_reg_start;\n+\t__le32 qtail_reg_spacing;\n+\n+\tu8 reserved[4];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);\n+\n+/* structure to specify several chunks of contiguous queues */\n+struct virtchnl2_queue_reg_chunks {\n+\t__le16 num_chunks;\n+\tu8 reserved[6];\n+\tstruct virtchnl2_queue_reg_chunk chunks[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks);\n+\n+#define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS  6\n+\n+/* VIRTCHNL2_OP_CREATE_VPORT\n+ * PF sends this message to CP to create a vport by filling in required\n+ * fields of virtchnl2_create_vport structure.\n+ * CP responds with the updated virtchnl2_create_vport structure containing the\n+ * necessary fields followed by chunks which in turn will have an array of\n+ * num_chunks entries of virtchnl2_queue_chunk structures.\n+ */\n+struct virtchnl2_create_vport {\n+\t/* PF/VF populates the following fields on request */\n+\t/* see VIRTCHNL2_VPORT_TYPE definitions */\n+\t__le16 vport_type;\n+\n+\t/* see VIRTCHNL2_QUEUE_MODEL definitions */\n+\t__le16 txq_model;\n+\n+\t/* see VIRTCHNL2_QUEUE_MODEL definitions */\n+\t__le16 rxq_model;\n+\t__le16 num_tx_q;\n+\t/* valid only if txq_model is split queue */\n+\t__le16 num_tx_complq;\n+\t__le16 num_rx_q;\n+\t/* valid only if rxq_model is split queue */\n+\t__le16 num_rx_bufq;\n+\t/* relative receive queue index to be used as default */\n+\t__le16 default_rx_q;\n+\t/* used to align PF and CP in case of default multiple vports, it is\n+\t * filled by the PF and CP returns the same value, to enable the driver\n+\t * to support multiple asynchronous parallel CREATE_VPORT requests and\n+\t * associate a response to a specific request\n+\t */\n+\t__le16 vport_index;\n+\n+\t/* CP populates the following fields on response */\n+\t__le16 max_mtu;\n+\t__le32 vport_id;\n+\tu8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];\n+\t__le16 pad;\n+\t/* see VIRTCHNL2_RX_DESC_IDS definitions */\n+\t__le64 rx_desc_ids;\n+\t/* see VIRTCHNL2_TX_DESC_IDS definitions */\n+\t__le64 tx_desc_ids;\n+\n+#define MAX_Q_REGIONS 16\n+\t__le32 max_qs_per_qregion[MAX_Q_REGIONS];\n+\t__le32 qregion_total_qs;\n+\t__le16 qregion_type;\n+\t__le16 pad2;\n+\n+\t/* see VIRTCHNL2_RSS_ALGORITHM definitions */\n+\t__le32 rss_algorithm;\n+\t__le16 rss_key_size;\n+\t__le16 rss_lut_size;\n+\n+\t/* see VIRTCHNL2_HEADER_SPLIT_CAPS definitions */\n+\t__le32 rx_split_pos;\n+\n+\tu8 reserved[20];\n+\tstruct virtchnl2_queue_reg_chunks chunks;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(192, virtchnl2_create_vport);\n+\n+/* VIRTCHNL2_OP_DESTROY_VPORT\n+ * VIRTCHNL2_OP_ENABLE_VPORT\n+ * VIRTCHNL2_OP_DISABLE_VPORT\n+ * PF sends this message to CP to destroy, enable or disable a vport by filling\n+ * in the vport_id in virtchnl2_vport structure.\n+ * CP responds with the status of the requested operation.\n+ */\n+struct virtchnl2_vport {\n+\t__le32 vport_id;\n+\tu8 reserved[4];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);\n+\n+/* Transmit queue config info */\n+struct virtchnl2_txq_info {\n+\t__le64 dma_ring_addr;\n+\n+\t/* see VIRTCHNL2_QUEUE_TYPE definitions */\n+\t__le32 type;\n+\n+\t__le32 queue_id;\n+\t/* valid only if queue model is split and type is transmit queue. Used\n+\t * in many to one mapping of transmit queues to completion queue\n+\t */\n+\t__le16 relative_queue_id;\n+\n+\t/* see VIRTCHNL2_QUEUE_MODEL definitions */\n+\t__le16 model;\n+\n+\t/* see VIRTCHNL2_TXQ_SCHED_MODE definitions */\n+\t__le16 sched_mode;\n+\n+\t/* see VIRTCHNL2_TXQ_FLAGS definitions */\n+\t__le16 qflags;\n+\t__le16 ring_len;\n+\n+\t/* valid only if queue model is split and type is transmit queue */\n+\t__le16 tx_compl_queue_id;\n+\t/* valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX */\n+\t/* see VIRTCHNL2_PEER_TYPE definitions */\n+\t__le16 peer_type;\n+\t/* valid only if queue type is CONFIG_TX and used to deliver messages\n+\t * for the respective CONFIG_TX queue\n+\t */\n+\t__le16 peer_rx_queue_id;\n+\n+\t/* value ranges from 0 to 15 */\n+\t__le16 qregion_id;\n+\tu8 pad[2];\n+\n+\t/* Egress pasid is used for SIOV use case */\n+\t__le32 egress_pasid;\n+\t__le32 egress_hdr_pasid;\n+\t__le32 egress_buf_pasid;\n+\n+\tu8 reserved[8];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);\n+\n+/* VIRTCHNL2_OP_CONFIG_TX_QUEUES\n+ * PF sends this message to set up parameters for one or more transmit queues.\n+ * This message contains an array of num_qinfo instances of virtchnl2_txq_info\n+ * structures. CP configures requested queues and returns a status code. If\n+ * num_qinfo specified is greater than the number of queues associated with the\n+ * vport, an error is returned and no queues are configured.\n+ */\n+struct virtchnl2_config_tx_queues {\n+\t__le32 vport_id;\n+\t__le16 num_qinfo;\n+\n+\tu8 reserved[10];\n+\tstruct virtchnl2_txq_info qinfo[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(72, virtchnl2_config_tx_queues);\n+\n+/* Receive queue config info */\n+struct virtchnl2_rxq_info {\n+\t/* see VIRTCHNL2_RX_DESC_IDS definitions */\n+\t__le64 desc_ids;\n+\t__le64 dma_ring_addr;\n+\n+\t/* see VIRTCHNL2_QUEUE_TYPE definitions */\n+\t__le32 type;\n+\t__le32 queue_id;\n+\n+\t/* see QUEUE_MODEL definitions */\n+\t__le16 model;\n+\n+\t__le16 hdr_buffer_size;\n+\t__le32 data_buffer_size;\n+\t__le32 max_pkt_size;\n+\n+\t__le16 ring_len;\n+\tu8 buffer_notif_stride;\n+\tu8 pad[1];\n+\n+\t/* Applicable only for receive buffer queues */\n+\t__le64 dma_head_wb_addr;\n+\n+\t/* Applicable only for receive completion queues */\n+\t/* see VIRTCHNL2_RXQ_FLAGS definitions */\n+\t__le16 qflags;\n+\n+\t__le16 rx_buffer_low_watermark;\n+\n+\t/* valid only in split queue model */\n+\t__le16 rx_bufq1_id;\n+\t/* valid only in split queue model */\n+\t__le16 rx_bufq2_id;\n+\t/* it indicates if there is a second buffer, rx_bufq2_id is valid only\n+\t * if this field is set\n+\t */\n+\tu8 bufq2_ena;\n+\tu8 pad2;\n+\n+\t/* value ranges from 0 to 15 */\n+\t__le16 qregion_id;\n+\n+\t/* Ingress pasid is used for SIOV use case */\n+\t__le32 ingress_pasid;\n+\t__le32 ingress_hdr_pasid;\n+\t__le32 ingress_buf_pasid;\n+\n+\tu8 reserved[16];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);\n+\n+/* VIRTCHNL2_OP_CONFIG_RX_QUEUES\n+ * PF sends this message to set up parameters for one or more receive queues.\n+ * This message contains an array of num_qinfo instances of virtchnl2_rxq_info\n+ * structures. CP configures requested queues and returns a status code.\n+ * If the number of queues specified is greater than the number of queues\n+ * associated with the vport, an error is returned and no queues are configured.\n+ */\n+struct virtchnl2_config_rx_queues {\n+\t__le32 vport_id;\n+\t__le16 num_qinfo;\n+\n+\tu8 reserved[18];\n+\tstruct virtchnl2_rxq_info qinfo[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(112, virtchnl2_config_rx_queues);\n+\n+/* VIRTCHNL2_OP_ADD_QUEUES\n+ * PF sends this message to request additional transmit/receive queues beyond\n+ * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues\n+ * structure is used to specify the number of each type of queues.\n+ * CP responds with the same structure with the actual number of queues assigned\n+ * followed by num_chunks of virtchnl2_queue_chunk structures.\n+ */\n+struct virtchnl2_add_queues {\n+\t__le32 vport_id;\n+\t__le16 num_tx_q;\n+\t__le16 num_tx_complq;\n+\t__le16 num_rx_q;\n+\t__le16 num_rx_bufq;\n+\tu8 reserved[4];\n+\tstruct virtchnl2_queue_reg_chunks chunks;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_add_queues);\n+\n+/* Structure to specify a chunk of contiguous interrupt vectors */\n+struct virtchnl2_vector_chunk {\n+\t__le16 start_vector_id;\n+\t__le16 start_evv_id;\n+\t__le16 num_vectors;\n+\t__le16 pad1;\n+\n+\t/* Register offsets and spacing provided by CP.\n+\t * dynamic control registers are used for enabling/disabling/re-enabling\n+\t * interrupts and updating interrupt rates in the hotpath. Any changes\n+\t * to interrupt rates in the dynamic control registers will be reflected\n+\t * in the interrupt throttling rate registers.\n+\t * itrn registers are used to update interrupt rates for specific\n+\t * interrupt indices without modifying the state of the interrupt.\n+\t */\n+\t__le32 dynctl_reg_start;\n+\t__le32 dynctl_reg_spacing;\n+\n+\t__le32 itrn_reg_start;\n+\t__le32 itrn_reg_spacing;\n+\tu8 reserved[8];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);\n+\n+/* Structure to specify several chunks of contiguous interrupt vectors */\n+struct virtchnl2_vector_chunks {\n+\t__le16 num_vchunks;\n+\tu8 reserved[14];\n+\tstruct virtchnl2_vector_chunk vchunks[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_vector_chunks);\n+\n+/* VIRTCHNL2_OP_ALLOC_VECTORS\n+ * PF sends this message to request additional interrupt vectors beyond the\n+ * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors\n+ * structure is used to specify the number of vectors requested. CP responds\n+ * with the same structure with the actual number of vectors assigned followed\n+ * by virtchnl2_vector_chunks structure identifying the vector ids.\n+ */\n+struct virtchnl2_alloc_vectors {\n+\t__le16 num_vectors;\n+\tu8 reserved[14];\n+\tstruct virtchnl2_vector_chunks vchunks;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(64, virtchnl2_alloc_vectors);\n+\n+/* VIRTCHNL2_OP_DEALLOC_VECTORS\n+ * PF sends this message to release the vectors.\n+ * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving\n+ * away. CP performs requested action and returns status.\n+ */\n+\n+/* VIRTCHNL2_OP_GET_RSS_LUT\n+ * VIRTCHNL2_OP_SET_RSS_LUT\n+ * PF sends this message to get or set RSS lookup table. Only supported if\n+ * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration\n+ * negotiation. Uses the virtchnl2_rss_lut structure\n+ */\n+struct virtchnl2_rss_lut {\n+\t__le32 vport_id;\n+\t__le16 lut_entries_start;\n+\t__le16 lut_entries;\n+\tu8 reserved[4];\n+\t__le32 lut[1]; /* RSS lookup table */\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_lut);\n+\n+/* VIRTCHNL2_OP_GET_RSS_KEY\n+ * PF sends this message to get RSS key. Only supported if both PF and CP\n+ * drivers set the VIRTCHNL2_CAP_RSS bit during configuration negotiation. Uses\n+ * the virtchnl2_rss_key structure\n+ */\n+\n+/* VIRTCHNL2_OP_GET_RSS_HASH\n+ * VIRTCHNL2_OP_SET_RSS_HASH\n+ * PF sends these messages to get and set the hash filter enable bits for RSS.\n+ * By default, the CP sets these to all possible traffic types that the\n+ * hardware supports. The PF can query this value if it wants to change the\n+ * traffic types that are hashed by the hardware.\n+ * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit\n+ * during configuration negotiation.\n+ */\n+struct virtchnl2_rss_hash {\n+\t/* Packet Type Groups bitmap */\n+\t__le64 ptype_groups;\n+\t__le32 vport_id;\n+\tu8 reserved[4];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);\n+\n+/* VIRTCHNL2_OP_SET_SRIOV_VFS\n+ * This message is used to set number of SRIOV VFs to be created. The actual\n+ * allocation of resources for the VFs in terms of vport, queues and interrupts\n+ * is done by CP. When this call completes, the APF driver calls\n+ * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.\n+ * The number of VFs set to 0 will destroy all the VFs of this function.\n+ */\n+\n+struct virtchnl2_sriov_vfs_info {\n+\t__le16 num_vfs;\n+\t__le16 pad;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);\n+\n+/* VIRTCHNL2_OP_CREATE_ADI\n+ * PF sends this message to CP to create ADI by filling in required\n+ * fields of virtchnl2_create_adi structure.\n+ * CP responds with the updated virtchnl2_create_adi structure containing the\n+ * necessary fields followed by chunks which in turn will have an array of\n+ * num_chunks entries of virtchnl2_queue_chunk structures.\n+ */\n+struct virtchnl2_create_adi {\n+\t/* PF sends PASID to CP */\n+\t__le32 pasid;\n+\t/*\n+\t * mbx_id is set to 1 by PF when requesting CP to provide HW mailbox\n+\t * id else it is set to 0 by PF\n+\t */\n+\t__le16 mbx_id;\n+\t/* PF sends mailbox vector id to CP */\n+\t__le16 mbx_vec_id;\n+\t/* CP populates ADI id */\n+\t__le16 adi_id;\n+\tu8 reserved[64];\n+\tu8 pad[6];\n+\t/* CP populates queue chunks */\n+\tstruct virtchnl2_queue_reg_chunks chunks;\n+\t/* PF sends vector chunks to CP */\n+\tstruct virtchnl2_vector_chunks vchunks;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_create_adi);\n+\n+/* VIRTCHNL2_OP_DESTROY_ADI\n+ * PF sends this message to CP to destroy ADI by filling\n+ * in the adi_id in virtchnl2_destropy_adi structure.\n+ * CP responds with the status of the requested operation.\n+ */\n+struct virtchnl2_destroy_adi {\n+\t__le16 adi_id;\n+\tu8 reserved[2];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_destroy_adi);\n+\n+/* Based on the descriptor type the PF supports, CP fills ptype_id_10 or\n+ * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value\n+ * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the\n+ * last ptype.\n+ */\n+struct virtchnl2_ptype {\n+\t__le16 ptype_id_10;\n+\tu8 ptype_id_8;\n+\t/* number of protocol ids the packet supports, maximum of 32\n+\t * protocol ids are supported\n+\t */\n+\tu8 proto_id_count;\n+\t__le16 pad;\n+\t/* proto_id_count decides the allocation of protocol id array */\n+\t/* see VIRTCHNL2_PROTO_HDR_TYPE */\n+\t__le16 proto_id[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptype);\n+\n+/* VIRTCHNL2_OP_GET_PTYPE_INFO\n+ * PF sends this message to CP to get all supported packet types. It does by\n+ * filling in start_ptype_id and num_ptypes. Depending on descriptor type the\n+ * PF supports, it sets num_ptypes to 1024 (10-bit ptype) for flex descriptor\n+ * and 256 (8-bit ptype) for base descriptor support. CP responds back to PF by\n+ * populating start_ptype_id, num_ptypes and array of ptypes. If all ptypes\n+ * doesn't fit into one mailbox buffer, CP splits ptype info into multiple\n+ * messages, where each message will have the start ptype id, number of ptypes\n+ * sent in that message and the ptype array itself. When CP is done updating\n+ * all ptype information it extracted from the package (number of ptypes\n+ * extracted might be less than what PF expects), it will append a dummy ptype\n+ * (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF) to the ptype\n+ * array. PF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO\n+ * messages.\n+ */\n+struct virtchnl2_get_ptype_info {\n+\t__le16 start_ptype_id;\n+\t__le16 num_ptypes;\n+\t__le32 pad;\n+\tstruct virtchnl2_ptype ptype[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_get_ptype_info);\n+\n+/* VIRTCHNL2_OP_GET_STATS\n+ * PF/VF sends this message to CP to get the update stats by specifying the\n+ * vport_id. CP responds with stats in struct virtchnl2_vport_stats.\n+ */\n+struct virtchnl2_vport_stats {\n+\t__le32 vport_id;\n+\tu8 pad[4];\n+\n+\t__le64 rx_bytes;\t\t/* received bytes */\n+\t__le64 rx_unicast;\t\t/* received unicast pkts */\n+\t__le64 rx_multicast;\t\t/* received multicast pkts */\n+\t__le64 rx_broadcast;\t\t/* received broadcast pkts */\n+\t__le64 rx_discards;\n+\t__le64 rx_errors;\n+\t__le64 rx_unknown_protocol;\n+\t__le64 tx_bytes;\t\t/* transmitted bytes */\n+\t__le64 tx_unicast;\t\t/* transmitted unicast pkts */\n+\t__le64 tx_multicast;\t\t/* transmitted multicast pkts */\n+\t__le64 tx_broadcast;\t\t/* transmitted broadcast pkts */\n+\t__le64 tx_discards;\n+\t__le64 tx_errors;\n+\t__le64 rx_invalid_frame_length;\n+\t__le64 rx_overflow_drop;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);\n+\n+/* VIRTCHNL2_OP_EVENT\n+ * CP sends this message to inform the PF/VF driver of events that may affect\n+ * it. No direct response is expected from the driver, though it may generate\n+ * other messages in response to this one.\n+ */\n+struct virtchnl2_event {\n+\t/* see VIRTCHNL2_EVENT_CODES definitions */\n+\t__le32 event;\n+\t/* link_speed provided in Mbps */\n+\t__le32 link_speed;\n+\t__le32 vport_id;\n+\tu8 link_status;\n+\tu8 pad[1];\n+\t/* CP sends reset notification to PF with corresponding ADI ID */\n+\t__le16 adi_id;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);\n+\n+/* VIRTCHNL2_OP_GET_RSS_KEY\n+ * VIRTCHNL2_OP_SET_RSS_KEY\n+ * PF/VF sends this message to get or set RSS key. Only supported if both\n+ * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration\n+ * negotiation. Uses the virtchnl2_rss_key structure\n+ */\n+struct virtchnl2_rss_key {\n+\t__le32 vport_id;\n+\t__le16 key_len;\n+\tu8 pad;\n+\tu8 key[1];         /* RSS hash key, packed bytes */\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);\n+\n+/* structure to specify a chunk of contiguous queues */\n+struct virtchnl2_queue_chunk {\n+\t/* see VIRTCHNL2_QUEUE_TYPE definitions */\n+\t__le32 type;\n+\t__le32 start_queue_id;\n+\t__le32 num_queues;\n+\tu8 reserved[4];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);\n+\n+/* structure to specify several chunks of contiguous queues */\n+struct virtchnl2_queue_chunks {\n+\t__le16 num_chunks;\n+\tu8 reserved[6];\n+\tstruct virtchnl2_queue_chunk chunks[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_chunks);\n+\n+/* VIRTCHNL2_OP_ENABLE_QUEUES\n+ * VIRTCHNL2_OP_DISABLE_QUEUES\n+ * VIRTCHNL2_OP_DEL_QUEUES\n+ *\n+ * PF sends these messages to enable, disable or delete queues specified in\n+ * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues\n+ * to be enabled/disabled/deleted. Also applicable to single queue receive or\n+ * transmit. CP performs requested action and returns status.\n+ */\n+struct virtchnl2_del_ena_dis_queues {\n+\t__le32 vport_id;\n+\tu8 reserved[4];\n+\tstruct virtchnl2_queue_chunks chunks;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_del_ena_dis_queues);\n+\n+/* Queue to vector mapping */\n+struct virtchnl2_queue_vector {\n+\t__le32 queue_id;\n+\t__le16 vector_id;\n+\tu8 pad[2];\n+\n+\t/* see VIRTCHNL2_ITR_IDX definitions */\n+\t__le32 itr_idx;\n+\n+\t/* see VIRTCHNL2_QUEUE_TYPE definitions */\n+\t__le32 queue_type;\n+\tu8 reserved[8];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);\n+\n+/* VIRTCHNL2_OP_MAP_QUEUE_VECTOR\n+ * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR\n+ *\n+ * PF sends this message to map or unmap queues to vectors and interrupt\n+ * throttling rate index registers. External data buffer contains\n+ * virtchnl2_queue_vector_maps structure that contains num_qv_maps of\n+ * virtchnl2_queue_vector structures. CP maps the requested queue vector maps\n+ * after validating the queue and vector ids and returns a status code.\n+ */\n+struct virtchnl2_queue_vector_maps {\n+\t__le32 vport_id;\n+\t__le16 num_qv_maps;\n+\tu8 pad[10];\n+\tstruct virtchnl2_queue_vector qv_maps[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_vector_maps);\n+\n+/* VIRTCHNL2_OP_LOOPBACK\n+ *\n+ * PF/VF sends this message to transition to/from the loopback state. Setting\n+ * the 'enable' to 1 enables the loopback state and setting 'enable' to 0\n+ * disables it. CP configures the state to loopback and returns status.\n+ */\n+struct virtchnl2_loopback {\n+\t__le32 vport_id;\n+\tu8 enable;\n+\tu8 pad[3];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);\n+\n+/* structure to specify each MAC address */\n+struct virtchnl2_mac_addr {\n+\tu8 addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];\n+\t/* see VIRTCHNL2_MAC_TYPE definitions */\n+\tu8 type;\n+\tu8 pad;\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);\n+\n+/* VIRTCHNL2_OP_ADD_MAC_ADDR\n+ * VIRTCHNL2_OP_DEL_MAC_ADDR\n+ *\n+ * PF/VF driver uses this structure to send list of MAC addresses to be\n+ * added/deleted to the CP where as CP performs the action and returns the\n+ * status.\n+ */\n+struct virtchnl2_mac_addr_list {\n+\t__le32 vport_id;\n+\t__le16 num_mac_addr;\n+\tu8 pad[2];\n+\tstruct virtchnl2_mac_addr mac_addr_list[1];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mac_addr_list);\n+\n+/* VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE\n+ *\n+ * PF/VF sends vport id and flags to the CP where as CP performs the action\n+ * and returns the status.\n+ */\n+struct virtchnl2_promisc_info {\n+\t__le32 vport_id;\n+\t/* see VIRTCHNL2_PROMISC_FLAGS definitions */\n+\t__le16 flags;\n+\tu8 pad[2];\n+};\n+\n+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);\n+\n+\n+static inline const char *virtchnl2_op_str(__le32 v_opcode)\n+{\n+\tswitch (v_opcode) {\n+\tcase VIRTCHNL2_OP_VERSION:\n+\t\treturn \"VIRTCHNL2_OP_VERSION\";\n+\tcase VIRTCHNL2_OP_GET_CAPS:\n+\t\treturn \"VIRTCHNL2_OP_GET_CAPS\";\n+\tcase VIRTCHNL2_OP_CREATE_VPORT:\n+\t\treturn \"VIRTCHNL2_OP_CREATE_VPORT\";\n+\tcase VIRTCHNL2_OP_DESTROY_VPORT:\n+\t\treturn \"VIRTCHNL2_OP_DESTROY_VPORT\";\n+\tcase VIRTCHNL2_OP_ENABLE_VPORT:\n+\t\treturn \"VIRTCHNL2_OP_ENABLE_VPORT\";\n+\tcase VIRTCHNL2_OP_DISABLE_VPORT:\n+\t\treturn \"VIRTCHNL2_OP_DISABLE_VPORT\";\n+\tcase VIRTCHNL2_OP_CONFIG_TX_QUEUES:\n+\t\treturn \"VIRTCHNL2_OP_CONFIG_TX_QUEUES\";\n+\tcase VIRTCHNL2_OP_CONFIG_RX_QUEUES:\n+\t\treturn \"VIRTCHNL2_OP_CONFIG_RX_QUEUES\";\n+\tcase VIRTCHNL2_OP_ENABLE_QUEUES:\n+\t\treturn \"VIRTCHNL2_OP_ENABLE_QUEUES\";\n+\tcase VIRTCHNL2_OP_DISABLE_QUEUES:\n+\t\treturn \"VIRTCHNL2_OP_DISABLE_QUEUES\";\n+\tcase VIRTCHNL2_OP_ADD_QUEUES:\n+\t\treturn \"VIRTCHNL2_OP_ADD_QUEUES\";\n+\tcase VIRTCHNL2_OP_DEL_QUEUES:\n+\t\treturn \"VIRTCHNL2_OP_DEL_QUEUES\";\n+\tcase VIRTCHNL2_OP_MAP_QUEUE_VECTOR:\n+\t\treturn \"VIRTCHNL2_OP_MAP_QUEUE_VECTOR\";\n+\tcase VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:\n+\t\treturn \"VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR\";\n+\tcase VIRTCHNL2_OP_GET_RSS_KEY:\n+\t\treturn \"VIRTCHNL2_OP_GET_RSS_KEY\";\n+\tcase VIRTCHNL2_OP_SET_RSS_KEY:\n+\t\treturn \"VIRTCHNL2_OP_SET_RSS_KEY\";\n+\tcase VIRTCHNL2_OP_GET_RSS_LUT:\n+\t\treturn \"VIRTCHNL2_OP_GET_RSS_LUT\";\n+\tcase VIRTCHNL2_OP_SET_RSS_LUT:\n+\t\treturn \"VIRTCHNL2_OP_SET_RSS_LUT\";\n+\tcase VIRTCHNL2_OP_GET_RSS_HASH:\n+\t\treturn \"VIRTCHNL2_OP_GET_RSS_HASH\";\n+\tcase VIRTCHNL2_OP_SET_RSS_HASH:\n+\t\treturn \"VIRTCHNL2_OP_SET_RSS_HASH\";\n+\tcase VIRTCHNL2_OP_SET_SRIOV_VFS:\n+\t\treturn \"VIRTCHNL2_OP_SET_SRIOV_VFS\";\n+\tcase VIRTCHNL2_OP_ALLOC_VECTORS:\n+\t\treturn \"VIRTCHNL2_OP_ALLOC_VECTORS\";\n+\tcase VIRTCHNL2_OP_DEALLOC_VECTORS:\n+\t\treturn \"VIRTCHNL2_OP_DEALLOC_VECTORS\";\n+\tcase VIRTCHNL2_OP_GET_PTYPE_INFO:\n+\t\treturn \"VIRTCHNL2_OP_GET_PTYPE_INFO\";\n+\tcase VIRTCHNL2_OP_GET_STATS:\n+\t\treturn \"VIRTCHNL2_OP_GET_STATS\";\n+\tcase VIRTCHNL2_OP_EVENT:\n+\t\treturn \"VIRTCHNL2_OP_EVENT\";\n+\tcase VIRTCHNL2_OP_RESET_VF:\n+\t\treturn \"VIRTCHNL2_OP_RESET_VF\";\n+\tcase VIRTCHNL2_OP_CREATE_ADI:\n+\t\treturn \"VIRTCHNL2_OP_CREATE_ADI\";\n+\tcase VIRTCHNL2_OP_DESTROY_ADI:\n+\t\treturn \"VIRTCHNL2_OP_DESTROY_ADI\";\n+\tdefault:\n+\t\treturn \"Unsupported (update virtchnl2.h)\";\n+\t}\n+}\n+\n+/**\n+ * virtchnl2_vc_validate_vf_msg\n+ * @ver: Virtchnl2 version info\n+ * @v_opcode: Opcode for the message\n+ * @msg: pointer to the msg buffer\n+ * @msglen: msg length\n+ *\n+ * validate msg format against struct for each opcode\n+ */\n+static inline int\n+virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u32 v_opcode,\n+\t\t\t     u8 *msg, __le16 msglen)\n+{\n+\tbool err_msg_format = false;\n+\t__le32 valid_len = 0;\n+\n+\t/* Validate message length. */\n+\tswitch (v_opcode) {\n+\tcase VIRTCHNL2_OP_VERSION:\n+\t\tvalid_len = sizeof(struct virtchnl2_version_info);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_CAPS:\n+\t\tvalid_len = sizeof(struct virtchnl2_get_capabilities);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_CREATE_VPORT:\n+\t\tvalid_len = sizeof(struct virtchnl2_create_vport);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_create_vport *cvport =\n+\t\t\t\t(struct virtchnl2_create_vport *)msg;\n+\n+\t\t\tif (cvport->chunks.num_chunks == 0) {\n+\t\t\t\t/* zero chunks is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (cvport->chunks.num_chunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_queue_reg_chunk);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_CREATE_ADI:\n+\t\tvalid_len = sizeof(struct virtchnl2_create_adi);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_create_adi *cadi =\n+\t\t\t\t(struct virtchnl2_create_adi *)msg;\n+\n+\t\t\tif (cadi->chunks.num_chunks == 0) {\n+\t\t\t\t/* zero chunks is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tif (cadi->vchunks.num_vchunks == 0) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (cadi->chunks.num_chunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_queue_reg_chunk);\n+\t\t\tvalid_len += (cadi->vchunks.num_vchunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_vector_chunk);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_DESTROY_ADI:\n+\t\tvalid_len = sizeof(struct virtchnl2_destroy_adi);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_DESTROY_VPORT:\n+\tcase VIRTCHNL2_OP_ENABLE_VPORT:\n+\tcase VIRTCHNL2_OP_DISABLE_VPORT:\n+\t\tvalid_len = sizeof(struct virtchnl2_vport);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_CONFIG_TX_QUEUES:\n+\t\tvalid_len = sizeof(struct virtchnl2_config_tx_queues);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_config_tx_queues *ctq =\n+\t\t\t\t(struct virtchnl2_config_tx_queues *)msg;\n+\t\t\tif (ctq->num_qinfo == 0) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (ctq->num_qinfo - 1) *\n+\t\t\t\t     sizeof(struct virtchnl2_txq_info);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_CONFIG_RX_QUEUES:\n+\t\tvalid_len = sizeof(struct virtchnl2_config_rx_queues);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_config_rx_queues *crq =\n+\t\t\t\t(struct virtchnl2_config_rx_queues *)msg;\n+\t\t\tif (crq->num_qinfo == 0) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (crq->num_qinfo - 1) *\n+\t\t\t\t     sizeof(struct virtchnl2_rxq_info);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_ADD_QUEUES:\n+\t\tvalid_len = sizeof(struct virtchnl2_add_queues);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_add_queues *add_q =\n+\t\t\t\t(struct virtchnl2_add_queues *)msg;\n+\n+\t\t\tif (add_q->chunks.num_chunks == 0) {\n+\t\t\t\t/* zero chunks is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (add_q->chunks.num_chunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_queue_reg_chunk);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_ENABLE_QUEUES:\n+\tcase VIRTCHNL2_OP_DISABLE_QUEUES:\n+\tcase VIRTCHNL2_OP_DEL_QUEUES:\n+\t\tvalid_len = sizeof(struct virtchnl2_del_ena_dis_queues);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_del_ena_dis_queues *qs =\n+\t\t\t\t(struct virtchnl2_del_ena_dis_queues *)msg;\n+\t\t\tif (qs->chunks.num_chunks == 0 ||\n+\t\t\t    qs->chunks.num_chunks > VIRTCHNL2_OP_DEL_ENABLE_DISABLE_QUEUES_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (qs->chunks.num_chunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_queue_chunk);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_MAP_QUEUE_VECTOR:\n+\tcase VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:\n+\t\tvalid_len = sizeof(struct virtchnl2_queue_vector_maps);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_queue_vector_maps *v_qp =\n+\t\t\t\t(struct virtchnl2_queue_vector_maps *)msg;\n+\t\t\tif (v_qp->num_qv_maps == 0 ||\n+\t\t\t    v_qp->num_qv_maps > VIRTCHNL2_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (v_qp->num_qv_maps - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_queue_vector);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_ALLOC_VECTORS:\n+\t\tvalid_len = sizeof(struct virtchnl2_alloc_vectors);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_alloc_vectors *v_av =\n+\t\t\t\t(struct virtchnl2_alloc_vectors *)msg;\n+\n+\t\t\tif (v_av->vchunks.num_vchunks == 0) {\n+\t\t\t\t/* zero chunks is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (v_av->vchunks.num_vchunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_vector_chunk);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_DEALLOC_VECTORS:\n+\t\tvalid_len = sizeof(struct virtchnl2_vector_chunks);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_vector_chunks *v_chunks =\n+\t\t\t\t(struct virtchnl2_vector_chunks *)msg;\n+\t\t\tif (v_chunks->num_vchunks == 0) {\n+\t\t\t\terr_msg_format = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tvalid_len += (v_chunks->num_vchunks - 1) *\n+\t\t\t\t      sizeof(struct virtchnl2_vector_chunk);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_RSS_KEY:\n+\tcase VIRTCHNL2_OP_SET_RSS_KEY:\n+\t\tvalid_len = sizeof(struct virtchnl2_rss_key);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_rss_key *vrk =\n+\t\t\t\t(struct virtchnl2_rss_key *)msg;\n+\n+\t\t\tif (vrk->key_len == 0) {\n+\t\t\t\t/* zero length is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += vrk->key_len - 1;\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_RSS_LUT:\n+\tcase VIRTCHNL2_OP_SET_RSS_LUT:\n+\t\tvalid_len = sizeof(struct virtchnl2_rss_lut);\n+\t\tif (msglen >= valid_len) {\n+\t\t\tstruct virtchnl2_rss_lut *vrl =\n+\t\t\t\t(struct virtchnl2_rss_lut *)msg;\n+\n+\t\t\tif (vrl->lut_entries == 0) {\n+\t\t\t\t/* zero entries is allowed as input */\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tvalid_len += (vrl->lut_entries - 1) * sizeof(vrl->lut);\n+\t\t}\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_RSS_HASH:\n+\tcase VIRTCHNL2_OP_SET_RSS_HASH:\n+\t\tvalid_len = sizeof(struct virtchnl2_rss_hash);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_SET_SRIOV_VFS:\n+\t\tvalid_len = sizeof(struct virtchnl2_sriov_vfs_info);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_PTYPE_INFO:\n+\t\tvalid_len = sizeof(struct virtchnl2_get_ptype_info);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_STATS:\n+\t\tvalid_len = sizeof(struct virtchnl2_vport_stats);\n+\t\tbreak;\n+\tcase VIRTCHNL2_OP_RESET_VF:\n+\t\tbreak;\n+\t/* These are always errors coming from the VF. */\n+\tcase VIRTCHNL2_OP_EVENT:\n+\tcase VIRTCHNL2_OP_UNKNOWN:\n+\tdefault:\n+\t\treturn VIRTCHNL2_STATUS_ERR_PARAM;\n+\t}\n+\t/* few more checks */\n+\tif (err_msg_format || valid_len != msglen)\n+\t\treturn VIRTCHNL2_STATUS_ERR_OPCODE_MISMATCH;\n+\n+\treturn 0;\n+}\n+\n+#endif /* _VIRTCHNL_2_H_ */\ndiff --git a/drivers/common/idpf/base/virtchnl2_lan_desc.h b/drivers/common/idpf/base/virtchnl2_lan_desc.h\nnew file mode 100644\nindex 0000000000..b8cb22e474\n--- /dev/null\n+++ b/drivers/common/idpf/base/virtchnl2_lan_desc.h\n@@ -0,0 +1,606 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+/*\n+ * Copyright (C) 2019 Intel Corporation\n+ *\n+ * For licensing information, see the file 'LICENSE' in the root folder\n+ */\n+#ifndef _VIRTCHNL2_LAN_DESC_H_\n+#define _VIRTCHNL2_LAN_DESC_H_\n+\n+/* VIRTCHNL2_TX_DESC_IDS\n+ * Transmit descriptor ID flags\n+ */\n+#define VIRTCHNL2_TXDID_DATA\t\t\t\tBIT(0)\n+#define VIRTCHNL2_TXDID_CTX\t\t\t\tBIT(1)\n+#define VIRTCHNL2_TXDID_REINJECT_CTX\t\t\tBIT(2)\n+#define VIRTCHNL2_TXDID_FLEX_DATA\t\t\tBIT(3)\n+#define VIRTCHNL2_TXDID_FLEX_CTX\t\t\tBIT(4)\n+#define VIRTCHNL2_TXDID_FLEX_TSO_CTX\t\t\tBIT(5)\n+#define VIRTCHNL2_TXDID_FLEX_TSYN_L2TAG1\t\tBIT(6)\n+#define VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2\t\tBIT(7)\n+#define VIRTCHNL2_TXDID_FLEX_TSO_L2TAG2_PARSTAG_CTX\tBIT(8)\n+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_TSO_CTX\tBIT(9)\n+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_CTX\t\tBIT(10)\n+#define VIRTCHNL2_TXDID_FLEX_L2TAG2_CTX\t\t\tBIT(11)\n+#define VIRTCHNL2_TXDID_FLEX_FLOW_SCHED\t\t\tBIT(12)\n+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_TSO_CTX\t\tBIT(13)\n+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_CTX\t\tBIT(14)\n+#define VIRTCHNL2_TXDID_DESC_DONE\t\t\tBIT(15)\n+\n+/* VIRTCHNL2_RX_DESC_IDS\n+ * Receive descriptor IDs (range from 0 to 63)\n+ */\n+#define VIRTCHNL2_RXDID_0_16B_BASE\t\t\t0\n+#define VIRTCHNL2_RXDID_1_32B_BASE\t\t\t1\n+/* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be\n+ * differentiated based on queue model; e.g. single queue model can\n+ * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ\n+ * for DID 2.\n+ */\n+#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ\t\t\t2\n+#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC\t\t\t2\n+#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW\t\t\t3\n+#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB\t\t4\n+#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL\t\t5\n+#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2\t\t\t6\n+#define VIRTCHNL2_RXDID_7_HW_RSVD\t\t\t7\n+/* 9 through 15 are reserved */\n+#define VIRTCHNL2_RXDID_16_COMMS_GENERIC\t\t16\n+#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN\t\t17\n+#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4\t\t18\n+#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6\t\t19\n+#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW\t\t20\n+#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP\t\t21\n+/* 22 through 63 are reserved */\n+\n+/* VIRTCHNL2_RX_DESC_ID_BITMASKS\n+ * Receive descriptor ID bitmasks\n+ */\n+#define VIRTCHNL2_RXDID_0_16B_BASE_M\t\tBIT(VIRTCHNL2_RXDID_0_16B_BASE)\n+#define VIRTCHNL2_RXDID_1_32B_BASE_M\t\tBIT(VIRTCHNL2_RXDID_1_32B_BASE)\n+#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M\t\tBIT(VIRTCHNL2_RXDID_2_FLEX_SPLITQ)\n+#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M\t\tBIT(VIRTCHNL2_RXDID_2_FLEX_SQ_NIC)\n+#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW_M\t\tBIT(VIRTCHNL2_RXDID_3_FLEX_SQ_SW)\n+#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB_M\tBIT(VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB)\n+#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL_M\tBIT(VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL)\n+#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2_M\tBIT(VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2)\n+#define VIRTCHNL2_RXDID_7_HW_RSVD_M\t\tBIT(VIRTCHNL2_RXDID_7_HW_RSVD)\n+/* 9 through 15 are reserved */\n+#define VIRTCHNL2_RXDID_16_COMMS_GENERIC_M\tBIT(VIRTCHNL2_RXDID_16_COMMS_GENERIC)\n+#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN_M\tBIT(VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN)\n+#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4_M\tBIT(VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4)\n+#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6_M\tBIT(VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6)\n+#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW_M\tBIT(VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW)\n+#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP_M\tBIT(VIRTCHNL2_RXDID_21_COMMS_AUX_TCP)\n+/* 22 through 63 are reserved */\n+\n+/* Rx */\n+/* For splitq virtchnl2_rx_flex_desc_adv desc members */\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M\t\t\\\n+\tMAKEMASK(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M\t\t\\\n+\tMAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S\t\t10\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M\t\t\\\n+\tMAKEMASK(0x3UL, VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S\t\t12\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M\t\t\t\\\n+\tMAKEMASK(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M\t\\\n+\tMAKEMASK(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S\t\t14\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M\t\t\t\\\n+\tBIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S\t\t15\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M\t\t\\\n+\tBIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M\t\t\\\n+\tMAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S\t\t10\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M\t\t\t\\\n+\tBIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S\t\t11\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M\t\t\t\\\n+\tBIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S\t\t12\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M\t\t\t\\\n+\tMAKEMASK(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M)\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S\t\t15\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M\t\t\\\n+\tBIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)\n+\n+/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW1_BITS\n+ * for splitq virtchnl2_rx_flex_desc_adv\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_S\t\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_S\t\t1\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_S\t\t2\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S\t\t3\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S\t\t4\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S\t\t5\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S\t\t6\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S\t\t7\n+\n+/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW0_BITS\n+ * for splitq virtchnl2_rx_flex_desc_adv\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_S\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S\t\t1\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_S\t\t2\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_S\t\t3\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S\t\t4\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S\t\t5\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S\t6\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S\t7\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LAST\t\t\t8 /* this entry must be last!!! */\n+\n+/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_1_BITS\n+ * for splitq virtchnl2_rx_flex_desc_adv\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_S\t\t0 /* 2 bits */\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S\t\t2\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S\t\t3\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S\t4\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S\t5\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S\t6\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S\t7\n+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_LAST\t\t\t8 /* this entry must be last!!! */\n+\n+/* for singleq (flex) virtchnl2_rx_flex_desc fields */\n+/* for virtchnl2_rx_flex_desc.ptype_flex_flags0 member */\n+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_S\t\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M\t\t\t\\\n+\tMAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_PTYPE_S) /* 10 bits */\n+\n+/* for virtchnl2_rx_flex_desc.pkt_length member */\n+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S\t\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M\t\t\t\\\n+\tMAKEMASK(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S) /* 14 bits */\n+\n+/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_0_BITS\n+ * for singleq (flex) virtchnl2_rx_flex_desc\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S\t\t\t0\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_S\t\t\t1\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_S\t\t\t2\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S\t\t\t3\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S\t\t4\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S\t\t5\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S\t\t6\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S\t\t7\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_S\t\t\t8\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_S\t\t9\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_S\t\t\t10\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_S\t\t\t11\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_S\t\t12\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_S\t\t13\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S\t\t14\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S\t\t15\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LAST\t\t\t16 /* this entry must be last!!! */\n+\n+/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_1_BITS\n+ * for singleq (flex) virtchnl2_rx_flex_desc\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_S\t\t\t0 /* 4 bits */\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_S\t\t\t4\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_S\t\t\t5\n+/* [10:6] reserved */\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_S\t\t11\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S\t\t12\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S\t\t13\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S\t\t14\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S\t\t15\n+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_LAST\t\t\t16 /* this entry must be last!!! */\n+\n+/* for virtchnl2_rx_flex_desc.ts_low member */\n+#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID\t\t\t\tBIT(0)\n+\n+/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S\t63\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_M\t\\\n+\tBIT_ULL(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S)\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S\t52\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_M\t\\\n+\tMAKEMASK(0x7FFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S)\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S\t38\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M\t\\\n+\tMAKEMASK(0x3FFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S)\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S\t30\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M\t\\\n+\tMAKEMASK(0xFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S)\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S\t19\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M\t\\\n+\tMAKEMASK(0xFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S)\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S\t0\n+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M\t\\\n+\tMAKEMASK(0x7FFFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S)\n+\n+/* VIRTCHNL2_RX_BASE_DESC_STATUS_BITS\n+ * for singleq (base) virtchnl2_rx_base_desc\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_DD_S\t\t0\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_S\t\t1\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_S\t\t2\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_S\t\t3\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_S\t\t4\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_S\t\t5 /* 3 bits */\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_S\t8\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_S\t\t9 /* 2 bits */\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_S\t\t11\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_S\t\t12 /* 2 bits */\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_S\t\t14\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_S\t15\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_S\t\t16 /* 2 bits */\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_S\t18\n+#define VIRTCHNL2_RX_BASE_DESC_STATUS_LAST\t\t19 /* this entry must be last!!! */\n+\n+/* VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_BITS\n+ * for singleq (base) virtchnl2_rx_base_desc\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S\t0\n+\n+/* VIRTCHNL2_RX_BASE_DESC_ERROR_BITS\n+ * for singleq (base) virtchnl2_rx_base_desc\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_S\t\t0\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_S\t1\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_S\t\t2\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_S\t\t3 /* 3 bits */\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_S\t\t3\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_S\t\t4\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_S\t\t5\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_S\t\t6\n+#define VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_S\t\t7\n+\n+/* VIRTCHNL2_RX_BASE_DESC_FLTSTAT_VALUES\n+ * for singleq (base) virtchnl2_rx_base_desc\n+ * Note: These are predefined bit offsets\n+ */\n+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_NO_DATA\t\t0\n+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_FD_ID\t\t1\n+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSV\t\t2\n+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH\t\t3\n+\n+/* Receive Descriptors */\n+/* splitq buf\n+ * |                                       16|                   0|\n+ * ----------------------------------------------------------------\n+ * | RSV                                     | Buffer ID          |\n+ * ----------------------------------------------------------------\n+ * | Rx packet buffer address                                     |\n+ * ----------------------------------------------------------------\n+ * | Rx header buffer address                                     |\n+ * ----------------------------------------------------------------\n+ * | RSV                                                          |\n+ * ----------------------------------------------------------------\n+ * |                                                             0|\n+ */\n+struct virtchnl2_splitq_rx_buf_desc {\n+\tstruct {\n+\t\t__le16  buf_id; /* Buffer Identifier */\n+\t\t__le16  rsvd0;\n+\t\t__le32  rsvd1;\n+\t} qword0;\n+\t__le64  pkt_addr; /* Packet buffer address */\n+\t__le64  hdr_addr; /* Header buffer address */\n+\t__le64  rsvd2;\n+}; /* read used with buffer queues*/\n+\n+/* singleq buf\n+ * |                                                             0|\n+ * ----------------------------------------------------------------\n+ * | Rx packet buffer address                                     |\n+ * ----------------------------------------------------------------\n+ * | Rx header buffer address                                     |\n+ * ----------------------------------------------------------------\n+ * | RSV                                                          |\n+ * ----------------------------------------------------------------\n+ * | RSV                                                          |\n+ * ----------------------------------------------------------------\n+ * |                                                             0|\n+ */\n+struct virtchnl2_singleq_rx_buf_desc {\n+\t__le64  pkt_addr; /* Packet buffer address */\n+\t__le64  hdr_addr; /* Header buffer address */\n+\t__le64  rsvd1;\n+\t__le64  rsvd2;\n+}; /* read used with buffer queues*/\n+\n+union virtchnl2_rx_buf_desc {\n+\tstruct virtchnl2_singleq_rx_buf_desc\t\tread;\n+\tstruct virtchnl2_splitq_rx_buf_desc\t\tsplit_rd;\n+};\n+\n+/* (0x00) singleq wb(compl) */\n+struct virtchnl2_singleq_base_rx_desc {\n+\tstruct {\n+\t\tstruct {\n+\t\t\t__le16 mirroring_status;\n+\t\t\t__le16 l2tag1;\n+\t\t} lo_dword;\n+\t\tunion {\n+\t\t\t__le32 rss; /* RSS Hash */\n+\t\t\t__le32 fd_id; /* Flow Director filter id */\n+\t\t} hi_dword;\n+\t} qword0;\n+\tstruct {\n+\t\t/* status/error/PTYPE/length */\n+\t\t__le64 status_error_ptype_len;\n+\t} qword1;\n+\tstruct {\n+\t\t__le16 ext_status; /* extended status */\n+\t\t__le16 rsvd;\n+\t\t__le16 l2tag2_1;\n+\t\t__le16 l2tag2_2;\n+\t} qword2;\n+\tstruct {\n+\t\t__le32 reserved;\n+\t\t__le32 fd_id;\n+\t} qword3;\n+}; /* writeback */\n+\n+/* (0x01) singleq flex compl */\n+struct virtchnl2_rx_flex_desc {\n+\t/* Qword 0 */\n+\tu8 rxdid; /* descriptor builder profile id */\n+\tu8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */\n+\t__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */\n+\t__le16 pkt_len; /* [15:14] are reserved */\n+\t__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */\n+\t\t\t\t\t/* sph=[11:11] */\n+\t\t\t\t\t/* ff1/ext=[15:12] */\n+\n+\t/* Qword 1 */\n+\t__le16 status_error0;\n+\t__le16 l2tag1;\n+\t__le16 flex_meta0;\n+\t__le16 flex_meta1;\n+\n+\t/* Qword 2 */\n+\t__le16 status_error1;\n+\tu8 flex_flags2;\n+\tu8 time_stamp_low;\n+\t__le16 l2tag2_1st;\n+\t__le16 l2tag2_2nd;\n+\n+\t/* Qword 3 */\n+\t__le16 flex_meta2;\n+\t__le16 flex_meta3;\n+\tunion {\n+\t\tstruct {\n+\t\t\t__le16 flex_meta4;\n+\t\t\t__le16 flex_meta5;\n+\t\t} flex;\n+\t\t__le32 ts_high;\n+\t} flex_ts;\n+};\n+\n+/* (0x02) */\n+struct virtchnl2_rx_flex_desc_nic {\n+\t/* Qword 0 */\n+\tu8 rxdid;\n+\tu8 mir_id_umb_cast;\n+\t__le16 ptype_flex_flags0;\n+\t__le16 pkt_len;\n+\t__le16 hdr_len_sph_flex_flags1;\n+\n+\t/* Qword 1 */\n+\t__le16 status_error0;\n+\t__le16 l2tag1;\n+\t__le32 rss_hash;\n+\n+\t/* Qword 2 */\n+\t__le16 status_error1;\n+\tu8 flexi_flags2;\n+\tu8 ts_low;\n+\t__le16 l2tag2_1st;\n+\t__le16 l2tag2_2nd;\n+\n+\t/* Qword 3 */\n+\t__le32 flow_id;\n+\tunion {\n+\t\tstruct {\n+\t\t\t__le16 rsvd;\n+\t\t\t__le16 flow_id_ipv6;\n+\t\t} flex;\n+\t\t__le32 ts_high;\n+\t} flex_ts;\n+};\n+\n+/* Rx Flex Descriptor Switch Profile\n+ * RxDID Profile Id 3\n+ * Flex-field 0: Source Vsi\n+ */\n+struct virtchnl2_rx_flex_desc_sw {\n+\t/* Qword 0 */\n+\tu8 rxdid;\n+\tu8 mir_id_umb_cast;\n+\t__le16 ptype_flex_flags0;\n+\t__le16 pkt_len;\n+\t__le16 hdr_len_sph_flex_flags1;\n+\n+\t/* Qword 1 */\n+\t__le16 status_error0;\n+\t__le16 l2tag1;\n+\t__le16 src_vsi; /* [10:15] are reserved */\n+\t__le16 flex_md1_rsvd;\n+\n+\t/* Qword 2 */\n+\t__le16 status_error1;\n+\tu8 flex_flags2;\n+\tu8 ts_low;\n+\t__le16 l2tag2_1st;\n+\t__le16 l2tag2_2nd;\n+\n+\t/* Qword 3 */\n+\t__le32 rsvd; /* flex words 2-3 are reserved */\n+\t__le32 ts_high;\n+};\n+\n+\n+/* Rx Flex Descriptor NIC Profile\n+ * RxDID Profile Id 6\n+ * Flex-field 0: RSS hash lower 16-bits\n+ * Flex-field 1: RSS hash upper 16-bits\n+ * Flex-field 2: Flow Id lower 16-bits\n+ * Flex-field 3: Source Vsi\n+ * Flex-field 4: reserved, Vlan id taken from L2Tag\n+ */\n+struct virtchnl2_rx_flex_desc_nic_2 {\n+\t/* Qword 0 */\n+\tu8 rxdid;\n+\tu8 mir_id_umb_cast;\n+\t__le16 ptype_flex_flags0;\n+\t__le16 pkt_len;\n+\t__le16 hdr_len_sph_flex_flags1;\n+\n+\t/* Qword 1 */\n+\t__le16 status_error0;\n+\t__le16 l2tag1;\n+\t__le32 rss_hash;\n+\n+\t/* Qword 2 */\n+\t__le16 status_error1;\n+\tu8 flexi_flags2;\n+\tu8 ts_low;\n+\t__le16 l2tag2_1st;\n+\t__le16 l2tag2_2nd;\n+\n+\t/* Qword 3 */\n+\t__le16 flow_id;\n+\t__le16 src_vsi;\n+\tunion {\n+\t\tstruct {\n+\t\t\t__le16 rsvd;\n+\t\t\t__le16 flow_id_ipv6;\n+\t\t} flex;\n+\t\t__le32 ts_high;\n+\t} flex_ts;\n+};\n+\n+/* Rx Flex Descriptor Advanced (Split Queue Model)\n+ * RxDID Profile Id 7\n+ */\n+struct virtchnl2_rx_flex_desc_adv {\n+\t/* Qword 0 */\n+\tu8 rxdid_ucast; /* profile_id=[3:0] */\n+\t\t\t/* rsvd=[5:4] */\n+\t\t\t/* ucast=[7:6] */\n+\tu8 status_err0_qw0;\n+\t__le16 ptype_err_fflags0;\t/* ptype=[9:0] */\n+\t\t\t\t\t/* ip_hdr_err=[10:10] */\n+\t\t\t\t\t/* udp_len_err=[11:11] */\n+\t\t\t\t\t/* ff0=[15:12] */\n+\t__le16 pktlen_gen_bufq_id;\t/* plen=[13:0] */\n+\t\t\t\t\t/* gen=[14:14]  only in splitq */\n+\t\t\t\t\t/* bufq_id=[15:15] only in splitq */\n+\t__le16 hdrlen_flags;\t\t/* header=[9:0] */\n+\t\t\t\t\t/* rsc=[10:10] only in splitq */\n+\t\t\t\t\t/* sph=[11:11] only in splitq */\n+\t\t\t\t\t/* ext_udp_0=[12:12] */\n+\t\t\t\t\t/* int_udp_0=[13:13] */\n+\t\t\t\t\t/* trunc_mirr=[14:14] */\n+\t\t\t\t\t/* miss_prepend=[15:15] */\n+\t/* Qword 1 */\n+\tu8 status_err0_qw1;\n+\tu8 status_err1;\n+\tu8 fflags1;\n+\tu8 ts_low;\n+\t__le16 fmd0;\n+\t__le16 fmd1;\n+\t/* Qword 2 */\n+\t__le16 fmd2;\n+\tu8 fflags2;\n+\tu8 hash3;\n+\t__le16 fmd3;\n+\t__le16 fmd4;\n+\t/* Qword 3 */\n+\t__le16 fmd5;\n+\t__le16 fmd6;\n+\t__le16 fmd7_0;\n+\t__le16 fmd7_1;\n+}; /* writeback */\n+\n+/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile\n+ * RxDID Profile Id 8\n+ * Flex-field 0: BufferID\n+ * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)\n+ * Flex-field 2: Hash[15:0]\n+ * Flex-flags 2: Hash[23:16]\n+ * Flex-field 3: L2TAG2\n+ * Flex-field 5: L2TAG1\n+ * Flex-field 7: Timestamp (upper 32 bits)\n+ */\n+struct virtchnl2_rx_flex_desc_adv_nic_3 {\n+\t/* Qword 0 */\n+\tu8 rxdid_ucast; /* profile_id=[3:0] */\n+\t\t\t/* rsvd=[5:4] */\n+\t\t\t/* ucast=[7:6] */\n+\tu8 status_err0_qw0;\n+\t__le16 ptype_err_fflags0;\t/* ptype=[9:0] */\n+\t\t\t\t\t/* ip_hdr_err=[10:10] */\n+\t\t\t\t\t/* udp_len_err=[11:11] */\n+\t\t\t\t\t/* ff0=[15:12] */\n+\t__le16 pktlen_gen_bufq_id;\t/* plen=[13:0] */\n+\t\t\t\t\t/* gen=[14:14]  only in splitq */\n+\t\t\t\t\t/* bufq_id=[15:15] only in splitq */\n+\t__le16 hdrlen_flags;\t\t/* header=[9:0] */\n+\t\t\t\t\t/* rsc=[10:10] only in splitq */\n+\t\t\t\t\t/* sph=[11:11] only in splitq */\n+\t\t\t\t\t/* ext_udp_0=[12:12] */\n+\t\t\t\t\t/* int_udp_0=[13:13] */\n+\t\t\t\t\t/* trunc_mirr=[14:14] */\n+\t\t\t\t\t/* miss_prepend=[15:15] */\n+\t/* Qword 1 */\n+\tu8 status_err0_qw1;\n+\tu8 status_err1;\n+\tu8 fflags1;\n+\tu8 ts_low;\n+\t__le16 buf_id; /* only in splitq */\n+\tunion {\n+\t\t__le16 raw_cs;\n+\t\t__le16 l2tag1;\n+\t\t__le16 rscseglen;\n+\t} misc;\n+\t/* Qword 2 */\n+\t__le16 hash1;\n+\tunion {\n+\t\tu8 fflags2;\n+\t\tu8 mirrorid;\n+\t\tu8 hash2;\n+\t} ff2_mirrid_hash2;\n+\tu8 hash3;\n+\t__le16 l2tag2;\n+\t__le16 fmd4;\n+\t/* Qword 3 */\n+\t__le16 l2tag1;\n+\t__le16 fmd6;\n+\t__le32 ts_high;\n+}; /* writeback */\n+\n+union virtchnl2_rx_desc {\n+\tstruct virtchnl2_singleq_rx_buf_desc\t\tread;\n+\tstruct virtchnl2_singleq_base_rx_desc\t\tbase_wb;\n+\tstruct virtchnl2_rx_flex_desc\t\t\tflex_wb;\n+\tstruct virtchnl2_rx_flex_desc_nic\t\tflex_nic_wb;\n+\tstruct virtchnl2_rx_flex_desc_sw\t\tflex_sw_wb;\n+\tstruct virtchnl2_rx_flex_desc_nic_2\t\tflex_nic_2_wb;\n+\tstruct virtchnl2_rx_flex_desc_adv\t\tflex_adv_wb;\n+\tstruct virtchnl2_rx_flex_desc_adv_nic_3\t\tflex_adv_nic_3_wb;\n+};\n+\n+#endif /* _VIRTCHNL_LAN_DESC_H_ */\ndiff --git a/drivers/common/idpf/base/virtchnl_inline_ipsec.h b/drivers/common/idpf/base/virtchnl_inline_ipsec.h\nnew file mode 100644\nindex 0000000000..e19043ac47\n--- /dev/null\n+++ b/drivers/common/idpf/base/virtchnl_inline_ipsec.h\n@@ -0,0 +1,567 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2022 Intel Corporation\n+ */\n+\n+#ifndef _VIRTCHNL_INLINE_IPSEC_H_\n+#define _VIRTCHNL_INLINE_IPSEC_H_\n+\n+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM\t3\n+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM\t\t16\n+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM\t\t128\n+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER\t2\n+#define VIRTCHNL_IPSEC_MAX_KEY_LEN\t\t128\n+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM\t8\n+#define VIRTCHNL_IPSEC_SA_DESTROY\t\t0\n+#define VIRTCHNL_IPSEC_BROADCAST_VFID\t\t0xFFFFFFFF\n+#define VIRTCHNL_IPSEC_INVALID_REQ_ID\t\t0xFFFF\n+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP\t0xFFFFFFFF\n+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP\t0xFFFFFFFF\n+\n+/* crypto type */\n+#define VIRTCHNL_AUTH\t\t1\n+#define VIRTCHNL_CIPHER\t\t2\n+#define VIRTCHNL_AEAD\t\t3\n+\n+/* caps enabled */\n+#define VIRTCHNL_IPSEC_ESN_ENA\t\t\tBIT(0)\n+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA\t\tBIT(1)\n+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA\t\tBIT(2)\n+#define VIRTCHNL_IPSEC_AUDIT_ENA\t\tBIT(3)\n+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA\t\tBIT(4)\n+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA\tBIT(5)\n+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA\t\tBIT(6)\n+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA\t\tBIT(7)\n+\n+/* algorithm type */\n+/* Hash Algorithm */\n+#define VIRTCHNL_HASH_NO_ALG\t0 /* NULL algorithm */\n+#define VIRTCHNL_AES_CBC_MAC\t1 /* AES-CBC-MAC algorithm */\n+#define VIRTCHNL_AES_CMAC\t2 /* AES CMAC algorithm */\n+#define VIRTCHNL_AES_GMAC\t3 /* AES GMAC algorithm */\n+#define VIRTCHNL_AES_XCBC_MAC\t4 /* AES XCBC algorithm */\n+#define VIRTCHNL_MD5_HMAC\t5 /* HMAC using MD5 algorithm */\n+#define VIRTCHNL_SHA1_HMAC\t6 /* HMAC using 128 bit SHA algorithm */\n+#define VIRTCHNL_SHA224_HMAC\t7 /* HMAC using 224 bit SHA algorithm */\n+#define VIRTCHNL_SHA256_HMAC\t8 /* HMAC using 256 bit SHA algorithm */\n+#define VIRTCHNL_SHA384_HMAC\t9 /* HMAC using 384 bit SHA algorithm */\n+#define VIRTCHNL_SHA512_HMAC\t10 /* HMAC using 512 bit SHA algorithm */\n+#define VIRTCHNL_SHA3_224_HMAC\t11 /* HMAC using 224 bit SHA3 algorithm */\n+#define VIRTCHNL_SHA3_256_HMAC\t12 /* HMAC using 256 bit SHA3 algorithm */\n+#define VIRTCHNL_SHA3_384_HMAC\t13 /* HMAC using 384 bit SHA3 algorithm */\n+#define VIRTCHNL_SHA3_512_HMAC\t14 /* HMAC using 512 bit SHA3 algorithm */\n+/* Cipher Algorithm */\n+#define VIRTCHNL_CIPHER_NO_ALG\t15 /* NULL algorithm */\n+#define VIRTCHNL_3DES_CBC\t16 /* Triple DES algorithm in CBC mode */\n+#define VIRTCHNL_AES_CBC\t17 /* AES algorithm in CBC mode */\n+#define VIRTCHNL_AES_CTR\t18 /* AES algorithm in Counter mode */\n+/* AEAD Algorithm */\n+#define VIRTCHNL_AES_CCM\t19 /* AES algorithm in CCM mode */\n+#define VIRTCHNL_AES_GCM\t20 /* AES algorithm in GCM mode */\n+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */\n+\n+/* protocol type */\n+#define VIRTCHNL_PROTO_ESP\t1\n+#define VIRTCHNL_PROTO_AH\t2\n+#define VIRTCHNL_PROTO_RSVD1\t3\n+\n+/* sa mode */\n+#define VIRTCHNL_SA_MODE_TRANSPORT\t1\n+#define VIRTCHNL_SA_MODE_TUNNEL\t\t2\n+#define VIRTCHNL_SA_MODE_TRAN_TUN\t3\n+#define VIRTCHNL_SA_MODE_UNKNOWN\t4\n+\n+/* sa direction */\n+#define VIRTCHNL_DIR_INGRESS\t\t1\n+#define VIRTCHNL_DIR_EGRESS\t\t2\n+#define VIRTCHNL_DIR_INGRESS_EGRESS\t3\n+\n+/* sa termination */\n+#define VIRTCHNL_TERM_SOFTWARE\t1\n+#define VIRTCHNL_TERM_HARDWARE\t2\n+\n+/* sa ip type */\n+#define VIRTCHNL_IPV4\t1\n+#define VIRTCHNL_IPV6\t2\n+\n+/* for virtchnl_ipsec_resp */\n+enum inline_ipsec_resp {\n+\tINLINE_IPSEC_SUCCESS = 0,\n+\tINLINE_IPSEC_FAIL = -1,\n+\tINLINE_IPSEC_ERR_FIFO_FULL = -2,\n+\tINLINE_IPSEC_ERR_NOT_READY = -3,\n+\tINLINE_IPSEC_ERR_VF_DOWN = -4,\n+\tINLINE_IPSEC_ERR_INVALID_PARAMS = -5,\n+\tINLINE_IPSEC_ERR_NO_MEM = -6,\n+};\n+\n+/* Detailed opcodes for DPDK and IPsec use */\n+enum inline_ipsec_ops {\n+\tINLINE_IPSEC_OP_GET_CAP = 0,\n+\tINLINE_IPSEC_OP_GET_STATUS = 1,\n+\tINLINE_IPSEC_OP_SA_CREATE = 2,\n+\tINLINE_IPSEC_OP_SA_UPDATE = 3,\n+\tINLINE_IPSEC_OP_SA_DESTROY = 4,\n+\tINLINE_IPSEC_OP_SP_CREATE = 5,\n+\tINLINE_IPSEC_OP_SP_DESTROY = 6,\n+\tINLINE_IPSEC_OP_SA_READ = 7,\n+\tINLINE_IPSEC_OP_EVENT = 8,\n+\tINLINE_IPSEC_OP_RESP = 9,\n+};\n+\n+#pragma pack(1)\n+/* Not all valid, if certain field is invalid, set 1 for all bits */\n+struct virtchnl_algo_cap  {\n+\tu32 algo_type;\n+\n+\tu16 block_size;\n+\n+\tu16 min_key_size;\n+\tu16 max_key_size;\n+\tu16 inc_key_size;\n+\n+\tu16 min_iv_size;\n+\tu16 max_iv_size;\n+\tu16 inc_iv_size;\n+\n+\tu16 min_digest_size;\n+\tu16 max_digest_size;\n+\tu16 inc_digest_size;\n+\n+\tu16 min_aad_size;\n+\tu16 max_aad_size;\n+\tu16 inc_aad_size;\n+};\n+#pragma pack()\n+\n+/* vf record the capability of crypto from the virtchnl */\n+struct virtchnl_sym_crypto_cap {\n+\tu8 crypto_type;\n+\tu8 algo_cap_num;\n+\tstruct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];\n+};\n+\n+/* VIRTCHNL_OP_GET_IPSEC_CAP\n+ * VF pass virtchnl_ipsec_cap to PF\n+ * and PF return capability of ipsec from virtchnl.\n+ */\n+#pragma pack(1)\n+struct virtchnl_ipsec_cap {\n+\t/* max number of SA per VF */\n+\tu16 max_sa_num;\n+\n+\t/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */\n+\tu8 virtchnl_protocol_type;\n+\n+\t/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */\n+\tu8 virtchnl_sa_mode;\n+\n+\t/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */\n+\tu8 virtchnl_direction;\n+\n+\t/* termination mode - value ref VIRTCHNL_TERM_XXX */\n+\tu8 termination_mode;\n+\n+\t/* number of supported crypto capability */\n+\tu8 crypto_cap_num;\n+\n+\t/* descriptor ID */\n+\tu16 desc_id;\n+\n+\t/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */\n+\tu32 caps_enabled;\n+\n+\t/* crypto capabilities */\n+\tstruct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];\n+};\n+\n+/* configuration of crypto function */\n+struct virtchnl_ipsec_crypto_cfg_item {\n+\tu8 crypto_type;\n+\n+\tu32 algo_type;\n+\n+\t/* Length of valid IV data. */\n+\tu16 iv_len;\n+\n+\t/* Length of digest */\n+\tu16 digest_len;\n+\n+\t/* SA salt */\n+\tu32 salt;\n+\n+\t/* The length of the symmetric key */\n+\tu16 key_len;\n+\n+\t/* key data buffer */\n+\tu8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];\n+};\n+#pragma pack()\n+\n+struct virtchnl_ipsec_sym_crypto_cfg {\n+\tstruct virtchnl_ipsec_crypto_cfg_item\n+\t\titems[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];\n+};\n+\n+#pragma pack(1)\n+/* VIRTCHNL_OP_IPSEC_SA_CREATE\n+ * VF send this SA configuration to PF using virtchnl;\n+ * PF create SA as configuration and PF driver will return\n+ * an unique index (sa_idx) for the created SA.\n+ */\n+struct virtchnl_ipsec_sa_cfg {\n+\t/* IPsec SA Protocol - AH/ESP */\n+\tu8 virtchnl_protocol_type;\n+\n+\t/* termination mode - value ref VIRTCHNL_TERM_XXX */\n+\tu8 virtchnl_termination;\n+\n+\t/* type of outer IP - IPv4/IPv6 */\n+\tu8 virtchnl_ip_type;\n+\n+\t/* type of esn - !0:enable/0:disable */\n+\tu8 esn_enabled;\n+\n+\t/* udp encap - !0:enable/0:disable */\n+\tu8 udp_encap_enabled;\n+\n+\t/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */\n+\tu8 virtchnl_direction;\n+\n+\t/* reserved */\n+\tu8 reserved1;\n+\n+\t/* SA security parameter index */\n+\tu32 spi;\n+\n+\t/* outer src ip address */\n+\tu8 src_addr[16];\n+\n+\t/* outer dst ip address */\n+\tu8 dst_addr[16];\n+\n+\t/* SPD reference. Used to link an SA with its policy.\n+\t * PF drivers may ignore this field.\n+\t */\n+\tu16 spd_ref;\n+\n+\t/* high 32 bits of esn */\n+\tu32 esn_hi;\n+\n+\t/* low 32 bits of esn */\n+\tu32 esn_low;\n+\n+\t/* When enabled, sa_index must be valid */\n+\tu8 sa_index_en;\n+\n+\t/* SA index when sa_index_en is true */\n+\tu32 sa_index;\n+\n+\t/* auditing mode - enable/disable */\n+\tu8 audit_en;\n+\n+\t/* lifetime byte limit - enable/disable\n+\t * When enabled, byte_limit_hard and byte_limit_soft\n+\t * must be valid.\n+\t */\n+\tu8 byte_limit_en;\n+\n+\t/* hard byte limit count */\n+\tu64 byte_limit_hard;\n+\n+\t/* soft byte limit count */\n+\tu64 byte_limit_soft;\n+\n+\t/* drop on authentication failure - enable/disable */\n+\tu8 drop_on_auth_fail_en;\n+\n+\t/* anti-reply window check - enable/disable\n+\t * When enabled, arw_size must be valid.\n+\t */\n+\tu8 arw_check_en;\n+\n+\t/* size of arw window, offset by 1. Setting to 0\n+\t * represents ARW window size of 1. Setting to 127\n+\t * represents ARW window size of 128\n+\t */\n+\tu8 arw_size;\n+\n+\t/* no ip offload mode - enable/disable\n+\t * When enabled, ip type and address must not be valid.\n+\t */\n+\tu8 no_ip_offload_en;\n+\n+\t/* SA Domain. Used to logical separate an SADB into groups.\n+\t * PF drivers supporting a single group ignore this field.\n+\t */\n+\tu16 sa_domain;\n+\n+\t/* crypto configuration */\n+\tstruct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;\n+};\n+#pragma pack()\n+\n+/* VIRTCHNL_OP_IPSEC_SA_UPDATE\n+ * VF send configuration of index of SA to PF\n+ * PF will update SA according to configuration\n+ */\n+struct virtchnl_ipsec_sa_update {\n+\tu32 sa_index; /* SA to update */\n+\tu32 esn_hi; /* high 32 bits of esn */\n+\tu32 esn_low; /* low 32 bits of esn */\n+};\n+\n+#pragma pack(1)\n+/* VIRTCHNL_OP_IPSEC_SA_DESTROY\n+ * VF send configuration of index of SA to PF\n+ * PF will destroy SA according to configuration\n+ * flag bitmap indicate all SA or just selected SA will\n+ * be destroyed\n+ */\n+struct virtchnl_ipsec_sa_destroy {\n+\t/* All zero bitmap indicates all SA will be destroyed.\n+\t * Non-zero bitmap indicates the selected SA in\n+\t * array sa_index will be destroyed.\n+\t */\n+\tu8 flag;\n+\n+\t/* selected SA index */\n+\tu32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];\n+};\n+\n+/* VIRTCHNL_OP_IPSEC_SA_READ\n+ * VF send this SA configuration to PF using virtchnl;\n+ * PF read SA and will return configuration for the created SA.\n+ */\n+struct virtchnl_ipsec_sa_read {\n+\t/* SA valid - invalid/valid */\n+\tu8 valid;\n+\n+\t/* SA active - inactive/active */\n+\tu8 active;\n+\n+\t/* SA SN rollover - not_rollover/rollover */\n+\tu8 sn_rollover;\n+\n+\t/* IPsec SA Protocol - AH/ESP */\n+\tu8 virtchnl_protocol_type;\n+\n+\t/* termination mode - value ref VIRTCHNL_TERM_XXX */\n+\tu8 virtchnl_termination;\n+\n+\t/* auditing mode - enable/disable */\n+\tu8 audit_en;\n+\n+\t/* lifetime byte limit - enable/disable\n+\t * When set to limit, byte_limit_hard and byte_limit_soft\n+\t * must be valid.\n+\t */\n+\tu8 byte_limit_en;\n+\n+\t/* hard byte limit count */\n+\tu64 byte_limit_hard;\n+\n+\t/* soft byte limit count */\n+\tu64 byte_limit_soft;\n+\n+\t/* drop on authentication failure - enable/disable */\n+\tu8 drop_on_auth_fail_en;\n+\n+\t/* anti-replay window check - enable/disable\n+\t * When set to check, arw_size, arw_top, and arw must be valid\n+\t */\n+\tu8 arw_check_en;\n+\n+\t/* size of arw window, offset by 1. Setting to 0\n+\t * represents ARW window size of 1. Setting to 127\n+\t * represents ARW window size of 128\n+\t */\n+\tu8 arw_size;\n+\n+\t/* reserved */\n+\tu8 reserved1;\n+\n+\t/* top of anti-replay-window */\n+\tu64 arw_top;\n+\n+\t/* anti-replay-window */\n+\tu8 arw[16];\n+\n+\t/* packets processed  */\n+\tu64 packets_processed;\n+\n+\t/* bytes processed  */\n+\tu64 bytes_processed;\n+\n+\t/* packets dropped  */\n+\tu32 packets_dropped;\n+\n+\t/* authentication failures */\n+\tu32 auth_fails;\n+\n+\t/* ARW check failures */\n+\tu32 arw_fails;\n+\n+\t/* type of esn - enable/disable */\n+\tu8 esn;\n+\n+\t/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */\n+\tu8 virtchnl_direction;\n+\n+\t/* SA security parameter index */\n+\tu32 spi;\n+\n+\t/* SA salt */\n+\tu32 salt;\n+\n+\t/* high 32 bits of esn */\n+\tu32 esn_hi;\n+\n+\t/* low 32 bits of esn */\n+\tu32 esn_low;\n+\n+\t/* SA Domain. Used to logical separate an SADB into groups.\n+\t * PF drivers supporting a single group ignore this field.\n+\t */\n+\tu16 sa_domain;\n+\n+\t/* SPD reference. Used to link an SA with its policy.\n+\t * PF drivers may ignore this field.\n+\t */\n+\tu16 spd_ref;\n+\n+\t/* crypto configuration. Salt and keys are set to 0 */\n+\tstruct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;\n+};\n+#pragma pack()\n+\n+/* Add allowlist entry in IES */\n+struct virtchnl_ipsec_sp_cfg {\n+\tu32 spi;\n+\tu32 dip[4];\n+\n+\t/* Drop frame if true or redirect to QAT if false. */\n+\tu8 drop;\n+\n+\t/* Congestion domain. For future use. */\n+\tu8 cgd;\n+\n+\t/* 0 for IPv4 table, 1 for IPv6 table. */\n+\tu8 table_id;\n+\n+\t/* Set TC (congestion domain) if true. For future use. */\n+\tu8 set_tc;\n+\n+\t/* 0 for NAT-T unsupported, 1 for NAT-T supported */\n+\tu8 is_udp;\n+\n+\t/* reserved */\n+\tu8 reserved;\n+\n+\t/* NAT-T UDP port number. Only valid in case NAT-T supported */\n+\tu16 udp_port;\n+};\n+\n+#pragma pack(1)\n+/* Delete allowlist entry in IES */\n+struct virtchnl_ipsec_sp_destroy {\n+\t/* 0 for IPv4 table, 1 for IPv6 table. */\n+\tu8 table_id;\n+\tu32 rule_id;\n+};\n+#pragma pack()\n+\n+/* Response from IES to allowlist operations */\n+struct virtchnl_ipsec_sp_cfg_resp {\n+\tu32 rule_id;\n+};\n+\n+struct virtchnl_ipsec_sa_cfg_resp {\n+\tu32 sa_handle;\n+};\n+\n+#define INLINE_IPSEC_EVENT_RESET\t0x1\n+#define INLINE_IPSEC_EVENT_CRYPTO_ON\t0x2\n+#define INLINE_IPSEC_EVENT_CRYPTO_OFF\t0x4\n+\n+struct virtchnl_ipsec_event {\n+\tu32 ipsec_event_data;\n+};\n+\n+#define INLINE_IPSEC_STATUS_AVAILABLE\t0x1\n+#define INLINE_IPSEC_STATUS_UNAVAILABLE\t0x2\n+\n+struct virtchnl_ipsec_status {\n+\tu32 status;\n+};\n+\n+struct virtchnl_ipsec_resp {\n+\tu32 resp;\n+};\n+\n+/* Internal message descriptor for VF <-> IPsec communication */\n+struct inline_ipsec_msg {\n+\tu16 ipsec_opcode;\n+\tu16 req_id;\n+\n+\tunion {\n+\t\t/* IPsec request */\n+\t\tstruct virtchnl_ipsec_sa_cfg sa_cfg[0];\n+\t\tstruct virtchnl_ipsec_sp_cfg sp_cfg[0];\n+\t\tstruct virtchnl_ipsec_sa_update sa_update[0];\n+\t\tstruct virtchnl_ipsec_sa_destroy sa_destroy[0];\n+\t\tstruct virtchnl_ipsec_sp_destroy sp_destroy[0];\n+\n+\t\t/* IPsec response */\n+\t\tstruct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];\n+\t\tstruct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];\n+\t\tstruct virtchnl_ipsec_cap ipsec_cap[0];\n+\t\tstruct virtchnl_ipsec_status ipsec_status[0];\n+\t\t/* response to del_sa, del_sp, update_sa */\n+\t\tstruct virtchnl_ipsec_resp ipsec_resp[0];\n+\n+\t\t/* IPsec event (no req_id is required) */\n+\t\tstruct virtchnl_ipsec_event event[0];\n+\n+\t\t/* Reserved */\n+\t\tstruct virtchnl_ipsec_sa_read sa_read[0];\n+\t} ipsec_data;\n+};\n+\n+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)\n+{\n+\tu16 valid_len = sizeof(struct inline_ipsec_msg);\n+\n+\tswitch (opcode) {\n+\tcase INLINE_IPSEC_OP_GET_CAP:\n+\tcase INLINE_IPSEC_OP_GET_STATUS:\n+\t\tbreak;\n+\tcase INLINE_IPSEC_OP_SA_CREATE:\n+\t\tvalid_len += sizeof(struct virtchnl_ipsec_sa_cfg);\n+\t\tbreak;\n+\tcase INLINE_IPSEC_OP_SP_CREATE:\n+\t\tvalid_len += sizeof(struct virtchnl_ipsec_sp_cfg);\n+\t\tbreak;\n+\tcase INLINE_IPSEC_OP_SA_UPDATE:\n+\t\tvalid_len += sizeof(struct virtchnl_ipsec_sa_update);\n+\t\tbreak;\n+\tcase INLINE_IPSEC_OP_SA_DESTROY:\n+\t\tvalid_len += sizeof(struct virtchnl_ipsec_sa_destroy);\n+\t\tbreak;\n+\tcase INLINE_IPSEC_OP_SP_DESTROY:\n+\t\tvalid_len += sizeof(struct virtchnl_ipsec_sp_destroy);\n+\t\tbreak;\n+\t/* Only for msg length calculation of response to VF in case of\n+\t * inline ipsec failure.\n+\t */\n+\tcase INLINE_IPSEC_OP_RESP:\n+\t\tvalid_len += sizeof(struct virtchnl_ipsec_resp);\n+\t\tbreak;\n+\tdefault:\n+\t\tvalid_len = 0;\n+\t\tbreak;\n+\t}\n+\n+\treturn valid_len;\n+}\n+\n+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */\ndiff --git a/drivers/common/idpf/meson.build b/drivers/common/idpf/meson.build\nnew file mode 100644\nindex 0000000000..26881166e9\n--- /dev/null\n+++ b/drivers/common/idpf/meson.build\n@@ -0,0 +1,4 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2022 Intel Corporation\n+\n+subdir('base')\n\\ No newline at end of file\ndiff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map\nnew file mode 100644\nindex 0000000000..bfb246c752\n--- /dev/null\n+++ b/drivers/common/idpf/version.map\n@@ -0,0 +1,12 @@\n+INTERNAL {\n+\tglobal:\n+\n+\tidpf_ctlq_deinit;\n+\tidpf_ctlq_init;\n+\tidpf_ctlq_clean_sq;\n+\tidpf_ctlq_recv;\n+\tidpf_ctlq_send;\n+\tidpf_ctlq_post_rx_buffs;\n+\n+\tlocal: *;\n+};\ndiff --git a/drivers/common/meson.build b/drivers/common/meson.build\nindex ea261dd70a..b63d899d50 100644\n--- a/drivers/common/meson.build\n+++ b/drivers/common/meson.build\n@@ -6,6 +6,7 @@ drivers = [\n         'cpt',\n         'dpaax',\n         'iavf',\n+        'idpf',\n         'mvep',\n         'octeontx',\n ]\n",
    "prefixes": [
        "v13",
        "01/18"
    ]
}