get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119226/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119226,
    "url": "http://patchwork.dpdk.org/api/patches/119226/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221027074729.1494529-13-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221027074729.1494529-13-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221027074729.1494529-13-junfeng.guo@intel.com",
    "date": "2022-10-27T07:47:23",
    "name": "[v14,12/18] net/idpf: support parsing packet type",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "043d53193e202caca4301dd2996c201ca54a50ce",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221027074729.1494529-13-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25451,
            "url": "http://patchwork.dpdk.org/api/series/25451/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25451",
            "date": "2022-10-27T07:47:11",
            "name": "add support for idpf PMD in DPDK",
            "version": 14,
            "mbox": "http://patchwork.dpdk.org/series/25451/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/119226/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/119226/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B9129A00C5;\n\tThu, 27 Oct 2022 09:51:03 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 52E8342C19;\n\tThu, 27 Oct 2022 09:50:00 +0200 (CEST)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by mails.dpdk.org (Postfix) with ESMTP id 6217D42BB9\n for <dev@dpdk.org>; Thu, 27 Oct 2022 09:49:54 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 27 Oct 2022 00:49:39 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by orsmga004.jf.intel.com with ESMTP; 27 Oct 2022 00:49:37 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666856994; x=1698392994;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=YJnaqFz+SWP+VLOVcYmfHlna5NUhI7+w/bfD3hIV+3g=;\n b=hywe5D0yUK+wpDB07Cl+jfLj8Lri2SAD02q94HBG0YnB5jeNyapEzxo4\n s47AiGfJ/Cx1mH3wFe7yHzUICijzqMR36tAB/SCuhKGubM+Xyu05gIGss\n FooQSOiSv0Ph2BBgTNG1EmpwH5VLa5nN8TltSRmksvsgcMXXKGF2Q665h\n mLUAKiFgiAYQcww+ZjlrrdKLRnDgo/7AaCr9peQpsAk3HfZi8wiCafbEW\n BWungKUXbl0E0XAu8CqHBasdiglRBJwSt9UhfGst0Z9HEZkIgAfZAqCWZ\n KJZ1em4qTREmU3ybxwkQNCEabEHU1Evmtt5r54VEa0zQxptnDMglJR4TU w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10512\"; a=\"287873200\"",
            "E=Sophos;i=\"5.95,217,1661842800\"; d=\"scan'208\";a=\"287873200\"",
            "E=McAfee;i=\"6500,9779,10512\"; a=\"757607737\"",
            "E=Sophos;i=\"5.95,217,1661842800\"; d=\"scan'208\";a=\"757607737\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Wenjun Wu <wenjun1.wu@intel.com>",
        "Subject": "[PATCH v14 12/18] net/idpf: support parsing packet type",
        "Date": "Thu, 27 Oct 2022 15:47:23 +0800",
        "Message-Id": "<20221027074729.1494529-13-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221027074729.1494529-1-junfeng.guo@intel.com>",
        "References": "<20221027054505.1369248-2-junfeng.guo@intel.com>\n <20221027074729.1494529-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Parse packet type during receiving packets.\n\nSigned-off-by: Wenjun Wu <wenjun1.wu@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c |   6 +\n drivers/net/idpf/idpf_ethdev.h |   6 +\n drivers/net/idpf/idpf_rxtx.c   |  11 ++\n drivers/net/idpf/idpf_rxtx.h   |   5 +\n drivers/net/idpf/idpf_vchnl.c  | 240 +++++++++++++++++++++++++++++++++\n 5 files changed, 268 insertions(+)",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex 6fb56e584d..630bdabcd4 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -709,6 +709,12 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n \t\tgoto err_api;\n \t}\n \n+\tret = idpf_get_pkt_type(adapter);\n+\tif (ret != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to set ptype table\");\n+\t\tgoto err_api;\n+\t}\n+\n \tadapter->caps = rte_zmalloc(\"idpf_caps\",\n \t\t\t\tsizeof(struct virtchnl2_get_capabilities), 0);\n \tif (adapter->caps == NULL) {\ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nindex af0a8e2970..db9af58f72 100644\n--- a/drivers/net/idpf/idpf_ethdev.h\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -39,6 +39,8 @@\n \n #define IDPF_NUM_MACADDR_MAX\t64\n \n+#define IDPF_MAX_PKT_TYPE\t1024\n+\n #define IDPF_VLAN_TAG_SIZE\t4\n #define IDPF_ETH_OVERHEAD \\\n \t(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)\n@@ -125,6 +127,8 @@ struct idpf_adapter {\n \t/* Max config queue number per VC message */\n \tuint32_t max_rxq_per_msg;\n \tuint32_t max_txq_per_msg;\n+\n+\tuint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;\n };\n \n TAILQ_HEAD(idpf_adapter_list, idpf_adapter);\n@@ -182,6 +186,7 @@ atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)\n struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);\n void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);\n int idpf_vc_check_api_version(struct idpf_adapter *adapter);\n+int idpf_get_pkt_type(struct idpf_adapter *adapter);\n int idpf_vc_get_caps(struct idpf_adapter *adapter);\n int idpf_vc_create_vport(struct idpf_adapter *adapter);\n int idpf_vc_destroy_vport(struct idpf_vport *vport);\n@@ -193,6 +198,7 @@ int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,\n \t\t      bool rx, bool on);\n int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);\n int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);\n+int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);\n int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,\n \t\t      uint16_t buf_len, uint8_t *buf);\n \ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex f55d2143b9..a980714060 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -1281,6 +1281,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tvolatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;\n \tuint16_t pktlen_gen_bufq_id;\n \tstruct idpf_rx_queue *rxq;\n+\tconst uint32_t *ptype_tbl;\n \tstruct rte_mbuf *rxm;\n \tuint16_t rx_id_bufq1;\n \tuint16_t rx_id_bufq2;\n@@ -1300,6 +1301,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \trx_id_bufq1 = rxq->bufq1->rx_next_avail;\n \trx_id_bufq2 = rxq->bufq2->rx_next_avail;\n \trx_desc_ring = rxq->rx_ring;\n+\tptype_tbl = rxq->adapter->ptype_tbl;\n \n \twhile (nb_rx < nb_pkts) {\n \t\trx_desc = &rx_desc_ring[rx_id];\n@@ -1347,6 +1349,10 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\trxm->next = NULL;\n \t\trxm->nb_segs = 1;\n \t\trxm->port = rxq->port_id;\n+\t\trxm->packet_type =\n+\t\t\tptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) &\n+\t\t\t\t   VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>\n+\t\t\t\t  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];\n \n \t\trx_pkts[nb_rx++] = rxm;\n \t}\n@@ -1533,6 +1539,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \tvolatile union virtchnl2_rx_desc *rxdp;\n \tunion virtchnl2_rx_desc rxd;\n \tstruct idpf_rx_queue *rxq;\n+\tconst uint32_t *ptype_tbl;\n \tuint16_t rx_id, nb_hold;\n \tstruct rte_eth_dev *dev;\n \tuint16_t rx_packet_len;\n@@ -1551,6 +1558,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \n \trx_id = rxq->rx_tail;\n \trx_ring = rxq->rx_ring;\n+\tptype_tbl = rxq->adapter->ptype_tbl;\n \n \twhile (nb_rx < nb_pkts) {\n \t\trxdp = &rx_ring[rx_id];\n@@ -1603,6 +1611,9 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\trxm->pkt_len = rx_packet_len;\n \t\trxm->data_len = rx_packet_len;\n \t\trxm->port = rxq->port_id;\n+\t\trxm->packet_type =\n+\t\t\tptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) &\n+\t\t\t\t\t    VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)];\n \n \t\trx_pkts[nb_rx++] = rxm;\n \t}\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex 30dc94b3dd..3853ed55c9 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -23,6 +23,10 @@\n \n #define IDPF_TX_MAX_MTU_SEG\t10\n \n+#define IDPF_GET_PTYPE_SIZE(p) \\\n+\t(sizeof(struct virtchnl2_ptype) + \\\n+\t(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))\n+\n struct idpf_rx_queue {\n \tstruct idpf_adapter *adapter;   /* the adapter this queue belongs to */\n \tstruct rte_mempool *mp;         /* mbuf pool to populate Rx ring */\n@@ -150,4 +154,5 @@ void idpf_stop_queues(struct rte_eth_dev *dev);\n \n void idpf_set_rx_function(struct rte_eth_dev *dev);\n void idpf_set_tx_function(struct rte_eth_dev *dev);\n+\n #endif /* _IDPF_RXTX_H_ */\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nindex 1ba59929a0..ac399d331a 100644\n--- a/drivers/net/idpf/idpf_vchnl.c\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -234,6 +234,11 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)\n \t\terr = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);\n \t\tclear_cmd(adapter);\n \t\tbreak;\n+\tcase VIRTCHNL2_OP_GET_PTYPE_INFO:\n+\t\t/* for multuple response message,\n+\t\t * do not handle the response here.\n+\t\t */\n+\t\tbreak;\n \tdefault:\n \t\t/* For other virtchnl ops in running time,\n \t\t * wait for the cmd done flag.\n@@ -298,6 +303,215 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter)\n \treturn 0;\n }\n \n+int __rte_cold\n+idpf_get_pkt_type(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl2_get_ptype_info *ptype_info;\n+\tuint16_t ptype_recvd = 0, ptype_offset, i, j;\n+\tint ret;\n+\n+\tret = idpf_vc_query_ptype_info(adapter);\n+\tif (ret != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"Fail to query packet type information\");\n+\t\treturn ret;\n+\t}\n+\n+\tptype_info = rte_zmalloc(\"ptype_info\", IDPF_DFLT_MBX_BUF_SIZE, 0);\n+\t\tif (ptype_info == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\twhile (ptype_recvd < IDPF_MAX_PKT_TYPE) {\n+\t\tret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,\n+\t\t\t\t\tIDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info);\n+\t\tif (ret != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to get packet type information\");\n+\t\t\tgoto free_ptype_info;\n+\t\t}\n+\n+\t\tptype_recvd += ptype_info->num_ptypes;\n+\t\tptype_offset = sizeof(struct virtchnl2_get_ptype_info) -\n+\t\t\t\t\t\tsizeof(struct virtchnl2_ptype);\n+\n+\t\tfor (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {\n+\t\t\tbool is_inner = false, is_ip = false;\n+\t\t\tstruct virtchnl2_ptype *ptype;\n+\t\t\tuint32_t proto_hdr = 0;\n+\n+\t\t\tptype = (struct virtchnl2_ptype *)\n+\t\t\t\t\t((u8 *)ptype_info + ptype_offset);\n+\t\t\tptype_offset += IDPF_GET_PTYPE_SIZE(ptype);\n+\t\t\tif (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {\n+\t\t\t\tret = -EINVAL;\n+\t\t\t\tgoto free_ptype_info;\n+\t\t\t}\n+\n+\t\t\tif (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)\n+\t\t\t\tgoto free_ptype_info;\n+\n+\t\t\tfor (j = 0; j < ptype->proto_id_count; j++) {\n+\t\t\t\tswitch (rte_cpu_to_le_16(ptype->proto_id[j])) {\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GRE:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_VXLAN:\n+\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_L4_MASK;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;\n+\t\t\t\t\tis_inner = true;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_MAC:\n+\t\t\t\t\tif (is_inner) {\n+\t\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L2_ETHER;\n+\t\t\t\t\t} else {\n+\t\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_L2_MASK;\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L2_ETHER;\n+\t\t\t\t\t}\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_VLAN:\n+\t\t\t\t\tif (is_inner) {\n+\t\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;\n+\t\t\t\t\t}\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PTP:\n+\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_L2_MASK;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_LLDP:\n+\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_L2_MASK;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_ARP:\n+\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_L2_MASK;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_L2_ETHER_ARP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PPPOE:\n+\t\t\t\t\tproto_hdr &= ~RTE_PTYPE_L2_MASK;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_IPV4:\n+\t\t\t\t\tif (!is_ip) {\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;\n+\t\t\t\t\t\tis_ip = true;\n+\t\t\t\t\t} else {\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n+\t\t\t\t\t\t\t     RTE_PTYPE_TUNNEL_IP;\n+\t\t\t\t\t\tis_inner = true;\n+\t\t\t\t\t}\n+\t\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_IPV6:\n+\t\t\t\t\tif (!is_ip) {\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;\n+\t\t\t\t\t\tis_ip = true;\n+\t\t\t\t\t} else {\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n+\t\t\t\t\t\t\t     RTE_PTYPE_TUNNEL_IP;\n+\t\t\t\t\t\tis_inner = true;\n+\t\t\t\t\t}\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_IPV4_FRAG:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_IPV6_FRAG:\n+\t\t\t\t\tif (is_inner)\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L4_FRAG;\n+\t\t\t\t\telse\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L4_FRAG;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_UDP:\n+\t\t\t\t\tif (is_inner)\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L4_UDP;\n+\t\t\t\t\telse\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L4_UDP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_TCP:\n+\t\t\t\t\tif (is_inner)\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L4_TCP;\n+\t\t\t\t\telse\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L4_TCP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_SCTP:\n+\t\t\t\t\tif (is_inner)\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L4_SCTP;\n+\t\t\t\t\telse\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L4_SCTP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_ICMP:\n+\t\t\t\t\tif (is_inner)\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L4_ICMP;\n+\t\t\t\t\telse\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L4_ICMP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_ICMPV6:\n+\t\t\t\t\tif (is_inner)\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_INNER_L4_ICMP;\n+\t\t\t\t\telse\n+\t\t\t\t\t\tproto_hdr |= RTE_PTYPE_L4_ICMP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_L2TPV2:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_L2TPV3:\n+\t\t\t\t\tis_inner = true;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_TUNNEL_L2TP;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_NVGRE:\n+\t\t\t\t\tis_inner = true;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GTPC_TEID:\n+\t\t\t\t\tis_inner = true;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_TUNNEL_GTPC;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GTPU:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GTPU_UL:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GTPU_DL:\n+\t\t\t\t\tis_inner = true;\n+\t\t\t\t\tproto_hdr |= RTE_PTYPE_TUNNEL_GTPU;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PAY:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_IPV6_EH:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PRE_MAC:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_POST_MAC:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_ETHERTYPE:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_SVLAN:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_CVLAN:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_MPLS:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_MMPLS:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_CTRL:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_ECP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_EAPOL:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PPPOD:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_IGMP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_AH:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_ESP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_IKE:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_NATT_KEEP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GTP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GTP_EH:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GTPCV2:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_ECPRI:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_VRRP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_OSPF:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_TUN:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_VXLAN_GPE:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_GENEVE:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_NSH:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_QUIC:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PFCP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PFCP_NODE:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_PFCP_SESSION:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_RTP:\n+\t\t\t\tcase VIRTCHNL2_PROTO_HDR_NO_PROTO:\n+\t\t\t\tdefault:\n+\t\t\t\t\tcontinue;\n+\t\t\t\t}\n+\t\t\t\tadapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+free_ptype_info:\n+\trte_free(ptype_info);\n+\tclear_cmd(adapter);\n+\treturn ret;\n+}\n+\n int\n idpf_vc_get_caps(struct idpf_adapter *adapter)\n {\n@@ -980,3 +1194,29 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)\n \n \treturn err;\n }\n+\n+int\n+idpf_vc_query_ptype_info(struct idpf_adapter *adapter)\n+{\n+\tstruct virtchnl2_get_ptype_info *ptype_info;\n+\tstruct idpf_cmd_info args;\n+\tint len, err;\n+\n+\tlen = sizeof(struct virtchnl2_get_ptype_info);\n+\tptype_info = rte_zmalloc(\"ptype_info\", len, 0);\n+\tif (ptype_info == NULL)\n+\t\treturn -ENOMEM;\n+\n+\tptype_info->start_ptype_id = 0;\n+\tptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;\n+\targs.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;\n+\targs.in_args = (u8 *)ptype_info;\n+\targs.in_args_size = len;\n+\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err != 0)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO\");\n+\n+\trte_free(ptype_info);\n+\treturn err;\n+}\n",
    "prefixes": [
        "v14",
        "12/18"
    ]
}