get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/78049/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 78049,
    "url": "http://patchwork.dpdk.org/api/patches/78049/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20200917115332.45663-1-haiyue.wang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200917115332.45663-1-haiyue.wang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200917115332.45663-1-haiyue.wang@intel.com",
    "date": "2020-09-17T11:53:32",
    "name": "[v1] net/ice: enhance the FlexiMD handling",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f7ec23f591bde46c61921d42491b1c8ae6fb1000",
    "submitter": {
        "id": 1044,
        "url": "http://patchwork.dpdk.org/api/people/1044/?format=api",
        "name": "Wang, Haiyue",
        "email": "haiyue.wang@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patchwork.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20200917115332.45663-1-haiyue.wang@intel.com/mbox/",
    "series": [
        {
            "id": 12321,
            "url": "http://patchwork.dpdk.org/api/series/12321/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=12321",
            "date": "2020-09-17T11:53:32",
            "name": "[v1] net/ice: enhance the FlexiMD handling",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/12321/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/78049/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/78049/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 2B4BAA04BB;\n\tThu, 17 Sep 2020 14:04:49 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 84DA71D62D;\n\tThu, 17 Sep 2020 14:04:48 +0200 (CEST)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by dpdk.org (Postfix) with ESMTP id 0332C1D62A\n for <dev@dpdk.org>; Thu, 17 Sep 2020 14:04:46 +0200 (CEST)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 17 Sep 2020 05:04:45 -0700",
            "from npg-dpdk-haiyue-3.sh.intel.com ([10.67.118.160])\n by orsmga007.jf.intel.com with ESMTP; 17 Sep 2020 05:04:42 -0700"
        ],
        "IronPort-SDR": [
            "\n dEbgtryy+FU3g2l2XcLvaHQpDK1k4NyBrv/z2vVNSkV9VDvw6j+EddwiT7SYWNKrWJ+PK9TwJ4\n gsK0DsHckWxw==",
            "\n 7oLrPEwRoZz6KX0FjuUY4uDSCISKDeNUM5aL7uz1rUtbXdKQgOegdEzzSAN+K9xim0G4eARTkz\n 0v/ZcChbaU1g=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9746\"; a=\"139185851\"",
            "E=Sophos;i=\"5.76,436,1592895600\"; d=\"scan'208\";a=\"139185851\"",
            "E=Sophos;i=\"5.76,436,1592895600\"; d=\"scan'208\";a=\"346601674\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Haiyue Wang <haiyue.wang@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "qi.z.zhang@intel.com, junyux.jiang@intel.com, leyi.rong@intel.com,\n qiming.yang@intel.com, guinanx.sun@intel.com, junfeng.guo@intel.com,\n jia.guo@intel.com, Haiyue Wang <haiyue.wang@intel.com>",
        "Date": "Thu, 17 Sep 2020 19:53:32 +0800",
        "Message-Id": "<20200917115332.45663-1-haiyue.wang@intel.com>",
        "X-Mailer": "git-send-email 2.28.0",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v1] net/ice: enhance the FlexiMD handling",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The ice hardware supports different FlexiMDs selection into mbuf, it can\nhave different offset in the Rx descriptor for single FlexiMD, like flow\nid. So it needs to handle the FlexiMD extraction according to the RXDID.\n\nSigned-off-by: Haiyue Wang <haiyue.wang@intel.com>\n---\n drivers/net/ice/ice_rxtx.c | 260 ++++++++++++++++++++++---------------\n drivers/net/ice/ice_rxtx.h |   5 +\n 2 files changed, 160 insertions(+), 105 deletions(-)",
    "diff": "diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex fef6ad454..b379741ae 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -25,40 +25,6 @@ uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;\n uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;\n uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;\n \n-static inline uint64_t\n-ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid)\n-{\n-\tstatic struct {\n-\t\tuint64_t *ol_flag;\n-\t\tbool chk_valid;\n-\t} ol_flag_map[] = {\n-\t\t[ICE_RXDID_COMMS_AUX_VLAN] = {\n-\t\t\t&rte_net_ice_dynflag_proto_xtr_vlan_mask, true },\n-\t\t[ICE_RXDID_COMMS_AUX_IPV4] = {\n-\t\t\t&rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },\n-\t\t[ICE_RXDID_COMMS_AUX_IPV6] = {\n-\t\t\t&rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },\n-\t\t[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {\n-\t\t\t&rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true },\n-\t\t[ICE_RXDID_COMMS_AUX_TCP] = {\n-\t\t\t&rte_net_ice_dynflag_proto_xtr_tcp_mask, true },\n-\t\t[ICE_RXDID_COMMS_AUX_IP_OFFSET] = {\n-\t\t\t&rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false },\n-\t};\n-\tuint64_t *ol_flag;\n-\n-\tif (rxdid < RTE_DIM(ol_flag_map)) {\n-\t\tol_flag = ol_flag_map[rxdid].ol_flag;\n-\t\tif (!ol_flag)\n-\t\t\treturn 0ULL;\n-\n-\t\t*chk_valid = ol_flag_map[rxdid].chk_valid;\n-\t\treturn *ol_flag;\n-\t}\n-\n-\treturn 0ULL;\n-}\n-\n static inline uint8_t\n ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)\n {\n@@ -76,6 +42,156 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)\n \t\t\t\trxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;\n }\n \n+static inline void\n+ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,\n+\t\t\t\t   struct rte_mbuf *mb,\n+\t\t\t\t   volatile union ice_rx_flex_desc *rxdp)\n+{\n+\tvolatile struct ice_32b_rx_flex_desc_comms_ovs *desc =\n+\t\t\t(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\tuint16_t stat_err;\n+#endif\n+\n+\tif (desc->flow_id != 0xFFFFFFFF) {\n+\t\tmb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;\n+\t\tmb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);\n+\t}\n+\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\tstat_err = rte_le_to_cpu_16(desc->status_error0);\n+\tif (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {\n+\t\tmb->ol_flags |= PKT_RX_RSS_HASH;\n+\t\tmb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);\n+\t}\n+#endif\n+}\n+\n+static inline void\n+ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,\n+\t\t\t\t      struct rte_mbuf *mb,\n+\t\t\t\t      volatile union ice_rx_flex_desc *rxdp)\n+{\n+\tvolatile struct ice_32b_rx_flex_desc_comms *desc =\n+\t\t\t(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;\n+\tuint16_t stat_err;\n+\n+\tstat_err = rte_le_to_cpu_16(desc->status_error0);\n+\tif (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {\n+\t\tmb->ol_flags |= PKT_RX_RSS_HASH;\n+\t\tmb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);\n+\t}\n+\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\tif (desc->flow_id != 0xFFFFFFFF) {\n+\t\tmb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;\n+\t\tmb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);\n+\t}\n+\n+\tif (rxq->xtr_ol_flag) {\n+\t\tuint32_t metadata = 0;\n+\n+\t\tstat_err = rte_le_to_cpu_16(desc->status_error1);\n+\n+\t\tif (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))\n+\t\t\tmetadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);\n+\n+\t\tif (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))\n+\t\t\tmetadata |=\n+\t\t\t\trte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;\n+\n+\t\tif (metadata) {\n+\t\t\tmb->ol_flags |= rxq->xtr_ol_flag;\n+\n+\t\t\t*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;\n+\t\t}\n+\t}\n+#endif\n+}\n+\n+static inline void\n+ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,\n+\t\t\t\t      struct rte_mbuf *mb,\n+\t\t\t\t      volatile union ice_rx_flex_desc *rxdp)\n+{\n+\tvolatile struct ice_32b_rx_flex_desc_comms *desc =\n+\t\t\t(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;\n+\tuint16_t stat_err;\n+\n+\tstat_err = rte_le_to_cpu_16(desc->status_error0);\n+\tif (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {\n+\t\tmb->ol_flags |= PKT_RX_RSS_HASH;\n+\t\tmb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);\n+\t}\n+\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\tif (desc->flow_id != 0xFFFFFFFF) {\n+\t\tmb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;\n+\t\tmb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);\n+\t}\n+\n+\tif (rxq->xtr_ol_flag) {\n+\t\tuint32_t metadata = 0;\n+\n+\t\tif (desc->flex_ts.flex.aux0 != 0xFFFF)\n+\t\t\tmetadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);\n+\t\telse if (desc->flex_ts.flex.aux1 != 0xFFFF)\n+\t\t\tmetadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);\n+\n+\t\tif (metadata) {\n+\t\t\tmb->ol_flags |= rxq->xtr_ol_flag;\n+\n+\t\t\t*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;\n+\t\t}\n+\t}\n+#endif\n+}\n+\n+static void\n+ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)\n+{\n+\tswitch (rxdid) {\n+\tcase ICE_RXDID_COMMS_AUX_VLAN:\n+\t\trxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;\n+\t\trxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;\n+\t\tbreak;\n+\n+\tcase ICE_RXDID_COMMS_AUX_IPV4:\n+\t\trxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;\n+\t\trxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;\n+\t\tbreak;\n+\n+\tcase ICE_RXDID_COMMS_AUX_IPV6:\n+\t\trxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;\n+\t\trxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;\n+\t\tbreak;\n+\n+\tcase ICE_RXDID_COMMS_AUX_IPV6_FLOW:\n+\t\trxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;\n+\t\trxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;\n+\t\tbreak;\n+\n+\tcase ICE_RXDID_COMMS_AUX_TCP:\n+\t\trxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;\n+\t\trxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;\n+\t\tbreak;\n+\n+\tcase ICE_RXDID_COMMS_AUX_IP_OFFSET:\n+\t\trxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;\n+\t\trxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;\n+\t\tbreak;\n+\n+\tcase ICE_RXDID_COMMS_OVS:\n+\t\t/* fall-through */\n+\tdefault:\n+\t\trxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;\n+\t\tbreak;\n+\t}\n+\n+\tif (!rte_net_ice_dynf_proto_xtr_metadata_avail())\n+\t\trxq->xtr_ol_flag = 0;\n+}\n+\n static enum ice_status\n ice_program_hw_rx_queue(struct ice_rx_queue *rxq)\n {\n@@ -158,6 +274,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)\n \t\treturn -EINVAL;\n \t}\n \n+\tice_select_rxd_to_pkt_fields_handler(rxq, rxdid);\n+\n \t/* Enable Flexible Descriptors in the queue context which\n \t * allows this driver to select a specific receive descriptor format\n \t */\n@@ -1338,74 +1456,6 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)\n \t\t   mb->vlan_tci, mb->vlan_tci_outer);\n }\n \n-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n-#define ICE_RX_PROTO_XTR_VALID \\\n-\t((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \\\n-\t (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))\n-\n-static void\n-ice_rxd_to_proto_xtr(struct rte_mbuf *mb,\n-\t\t     volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)\n-{\n-\tuint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);\n-\tuint32_t metadata = 0;\n-\tuint64_t ol_flag;\n-\tbool chk_valid;\n-\n-\tol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);\n-\tif (unlikely(!ol_flag))\n-\t\treturn;\n-\n-\tif (chk_valid) {\n-\t\tif (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))\n-\t\t\tmetadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);\n-\n-\t\tif (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))\n-\t\t\tmetadata |=\n-\t\t\t\trte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;\n-\t} else {\n-\t\tif (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)\n-\t\t\tmetadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);\n-\t\telse if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)\n-\t\t\tmetadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);\n-\t}\n-\n-\tif (!metadata)\n-\t\treturn;\n-\n-\tmb->ol_flags |= ol_flag;\n-\n-\t*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;\n-}\n-#endif\n-\n-static inline void\n-ice_rxd_to_pkt_fields(struct rte_mbuf *mb,\n-\t\t      volatile union ice_rx_flex_desc *rxdp)\n-{\n-\tvolatile struct ice_32b_rx_flex_desc_comms_ovs *desc =\n-\t\t\t(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;\n-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n-\tuint16_t stat_err;\n-\n-\tstat_err = rte_le_to_cpu_16(desc->status_error0);\n-\tif (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {\n-\t\tmb->ol_flags |= PKT_RX_RSS_HASH;\n-\t\tmb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);\n-\t}\n-#endif\n-\n-\tif (desc->flow_id != 0xFFFFFFFF) {\n-\t\tmb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;\n-\t\tmb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);\n-\t}\n-\n-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n-\tif (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))\n-\t\tice_rxd_to_proto_xtr(mb, desc);\n-#endif\n-}\n-\n #define ICE_LOOK_AHEAD 8\n #if (ICE_LOOK_AHEAD != 8)\n #error \"PMD ICE: ICE_LOOK_AHEAD must be 8\\n\"\n@@ -1463,7 +1513,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)\n \t\t\tmb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &\n \t\t\t\trte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];\n \t\t\tice_rxd_to_vlan_tci(mb, &rxdp[j]);\n-\t\t\tice_rxd_to_pkt_fields(mb, &rxdp[j]);\n+\t\t\trxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);\n \n \t\t\tmb->ol_flags |= pkt_flags;\n \t\t}\n@@ -1760,7 +1810,7 @@ ice_recv_scattered_pkts(void *rx_queue,\n \t\tfirst_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &\n \t\t\trte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];\n \t\tice_rxd_to_vlan_tci(first_seg, &rxd);\n-\t\tice_rxd_to_pkt_fields(first_seg, &rxd);\n+\t\trxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);\n \t\tpkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);\n \t\tfirst_seg->ol_flags |= pkt_flags;\n \t\t/* Prefetch data of first segment, if configured to do so. */\n@@ -2160,7 +2210,7 @@ ice_recv_pkts(void *rx_queue,\n \t\trxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &\n \t\t\trte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];\n \t\tice_rxd_to_vlan_tci(rxm, &rxd);\n-\t\tice_rxd_to_pkt_fields(rxm, &rxd);\n+\t\trxq->rxd_to_pkt_fields(rxq, rxm, &rxd);\n \t\tpkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);\n \t\trxm->ol_flags |= pkt_flags;\n \t\t/* copy old mbuf to rx_pkts */\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex 9fa57b3b2..7a2a77d48 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -42,6 +42,9 @@\n \n typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);\n typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);\n+typedef void (*ice_rxd_to_pkt_fields_t)(__rte_unused struct ice_rx_queue *rxq,\n+\t\t\t\t\tstruct rte_mbuf *mb,\n+\t\t\t\t\tvolatile union ice_rx_flex_desc *rxdp);\n \n struct ice_rx_entry {\n \tstruct rte_mbuf *mbuf;\n@@ -82,6 +85,8 @@ struct ice_rx_queue {\n \tbool q_set; /* indicate if rx queue has been configured */\n \tbool rx_deferred_start; /* don't start this queue in dev start */\n \tuint8_t proto_xtr; /* Protocol extraction from flexible descriptor */\n+\tuint64_t xtr_ol_flag; /* Protocol extraction offload flag */\n+\tice_rxd_to_pkt_fields_t rxd_to_pkt_fields;\n \tice_rx_release_mbufs_t rx_rel_mbufs;\n };\n \n",
    "prefixes": [
        "v1"
    ]
}