get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117738/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117738,
    "url": "http://patchwork.dpdk.org/api/patches/117738/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221009202541.352724-5-yuanx.wang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221009202541.352724-5-yuanx.wang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221009202541.352724-5-yuanx.wang@intel.com",
    "date": "2022-10-09T20:25:41",
    "name": "[v9,4/4] net/ice: support buffer split in Rx path",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "af15eb3d792be5ab4675e4eeccbc4100f5bd9943",
    "submitter": {
        "id": 2087,
        "url": "http://patchwork.dpdk.org/api/people/2087/?format=api",
        "name": "Wang, YuanX",
        "email": "yuanx.wang@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221009202541.352724-5-yuanx.wang@intel.com/mbox/",
    "series": [
        {
            "id": 25061,
            "url": "http://patchwork.dpdk.org/api/series/25061/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25061",
            "date": "2022-10-09T20:25:37",
            "name": "support protocol based buffer split",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/25061/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/117738/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/117738/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C0DE9A0542;\n\tSun,  9 Oct 2022 14:40:11 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B41D040146;\n\tSun,  9 Oct 2022 14:40:11 +0200 (CEST)",
            "from mga06.intel.com (mga06b.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id 5326640042\n for <dev@dpdk.org>; Sun,  9 Oct 2022 14:40:10 +0200 (CEST)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 09 Oct 2022 05:40:09 -0700",
            "from unknown (HELO localhost.localdomain) ([10.239.252.55])\n by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 09 Oct 2022 05:40:03 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1665319210; x=1696855210;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=dekXYG7bGLK2Rs56E4XgdRIo9PruPg6JftoIZPOQpOI=;\n b=YxQDDJXbwAS4N5yDxHYsCHoJUvCP6Edz3/P0Hvzielc3K/RN/aLxNNmE\n A1rvBVhk3VLXE0aE1MswBO803D0JN1a8XpYWcR3Ty6yZ5ySRkhGJUsaA8\n TCNsDxCkZuKZPUg/rLyMwqrp1J8WWKqTZ24zPFZOxDyVB3IiVN9aAhTFk\n 7Y2o5ukBWRtwgTWhAq215A3kCgQt83KW3DY86OS7khVWnIESt6lg5HhQH\n ALkTLI4l3nH1K3mcu7LRxQ0u56AAuxRH+M9RGV0swRJBMzRI9C4Ebwu25\n GEvoQZ3i4jOKwwJ8AlZ+dnlP6H4s+cdGNDMSS9Uq29RfZMagNGDsxk0ku g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10494\"; a=\"366010305\"",
            "E=Sophos;i=\"5.95,171,1661842800\"; d=\"scan'208\";a=\"366010305\"",
            "E=McAfee;i=\"6500,9779,10494\"; a=\"656628454\"",
            "E=Sophos;i=\"5.95,171,1661842800\"; d=\"scan'208\";a=\"656628454\""
        ],
        "From": "Yuan Wang <yuanx.wang@intel.com>",
        "To": "dev@dpdk.org, Ferruh Yigit <ferruh.yigit@amd.com>,\n Qiming Yang <qiming.yang@intel.com>, Qi Zhang <qi.z.zhang@intel.com>",
        "Cc": "thomas@monjalon.net, andrew.rybchenko@oktetlabs.ru,\n ferruh.yigit@xilinx.com, mdr@ashroe.eu, xiaoyun.li@intel.com,\n aman.deep.singh@intel.com, yuying.zhang@intel.com, jerinjacobk@gmail.com,\n viacheslavo@nvidia.com, stephen@networkplumber.org, xuan.ding@intel.com,\n hpothula@marvell.com, yaqi.tang@intel.com,\n Yuan Wang <yuanx.wang@intel.com>, Wenxuan Wu <wenxuanx.wu@intel.com>",
        "Subject": "[PATCH v9 4/4] net/ice: support buffer split in Rx path",
        "Date": "Mon, 10 Oct 2022 04:25:41 +0800",
        "Message-Id": "<20221009202541.352724-5-yuanx.wang@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20221009202541.352724-1-yuanx.wang@intel.com>",
        "References": "<20220812181552.2908067-1-yuanx.wang@intel.com>\n <20221009202541.352724-1-yuanx.wang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for protocol based buffer split in normal Rx\ndata paths. When the Rx queue is configured with specific protocol type,\npackets received will be directly split into protocol header and\npayload parts. And the two parts will be put into different mempools.\n\nCurrently, protocol based buffer split is not supported in vectorized\npaths.\n\nA new API ice_buffer_split_supported_hdr_ptypes_get() has been\nintroduced, it will return the supported header protocols of ice PMD\nto app for splitting.\n\nSigned-off-by: Yuan Wang <yuanx.wang@intel.com>\nSigned-off-by: Xuan Ding <xuan.ding@intel.com>\nSigned-off-by: Wenxuan Wu <wenxuanx.wu@intel.com>\n---\n doc/guides/nics/features/default.ini   |   1 +\n doc/guides/nics/features/ice.ini       |   1 +\n doc/guides/rel_notes/release_22_11.rst |   4 +\n drivers/net/ice/ice_ethdev.c           |  58 +++++-\n drivers/net/ice/ice_rxtx.c             | 263 ++++++++++++++++++++++---\n drivers/net/ice/ice_rxtx.h             |  16 ++\n drivers/net/ice/ice_rxtx_vec_common.h  |   3 +\n 7 files changed, 314 insertions(+), 32 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini\nindex 05e47d7552..1c736ca1aa 100644\n--- a/doc/guides/nics/features/default.ini\n+++ b/doc/guides/nics/features/default.ini\n@@ -7,6 +7,7 @@\n ; string should not exceed feature_str_len defined in conf.py.\n ;\n [Features]\n+Buffer Split on Rx   =\n Speed capabilities   =\n Link status          =\n Link status event    =\ndiff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini\nindex 2f4a5a9a30..b72e83e42e 100644\n--- a/doc/guides/nics/features/ice.ini\n+++ b/doc/guides/nics/features/ice.ini\n@@ -7,6 +7,7 @@\n ; is selected.\n ;\n [Features]\n+Buffer Split on Rx   = P\n Speed capabilities   = Y\n Link status          = Y\n Link status event    = Y\ndiff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst\nindex b4329d4cb0..537cdeee61 100644\n--- a/doc/guides/rel_notes/release_22_11.rst\n+++ b/doc/guides/rel_notes/release_22_11.rst\n@@ -196,6 +196,10 @@ New Features\n   * Supported protocol-based buffer split using added ``proto_hdr``\n     in structure ``rte_eth_rxseg_split``.\n \n+* **Updated Intel ice driver.**\n+\n+  * Added protocol based buffer split support in scalar path.\n+\n \n Removed Items\n -------------\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 6e21c38152..8618a3e6b7 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -161,6 +161,7 @@ static int ice_timesync_read_time(struct rte_eth_dev *dev,\n static int ice_timesync_write_time(struct rte_eth_dev *dev,\n \t\t\t\t   const struct timespec *timestamp);\n static int ice_timesync_disable(struct rte_eth_dev *dev);\n+static const uint32_t *ice_buffer_split_supported_hdr_ptypes_get(struct rte_eth_dev *dev);\n \n static const struct rte_pci_id pci_id_ice_map[] = {\n \t{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },\n@@ -275,6 +276,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {\n \t.timesync_write_time          = ice_timesync_write_time,\n \t.timesync_disable             = ice_timesync_disable,\n \t.tm_ops_get                   = ice_tm_ops_get,\n+\t.buffer_split_supported_hdr_ptypes_get = ice_buffer_split_supported_hdr_ptypes_get,\n };\n \n /* store statistics names and its offset in stats structure */\n@@ -3802,7 +3804,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t\tRTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |\n \t\t\tRTE_ETH_RX_OFFLOAD_VLAN_EXTEND |\n \t\t\tRTE_ETH_RX_OFFLOAD_RSS_HASH |\n-\t\t\tRTE_ETH_RX_OFFLOAD_TIMESTAMP;\n+\t\t\tRTE_ETH_RX_OFFLOAD_TIMESTAMP |\n+\t\t\tRTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;\n \t\tdev_info->tx_offload_capa |=\n \t\t\tRTE_ETH_TX_OFFLOAD_QINQ_INSERT |\n \t\t\tRTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\n@@ -3814,7 +3817,7 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\tdev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;\n \t}\n \n-\tdev_info->rx_queue_offload_capa = 0;\n+\tdev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;\n \tdev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;\n \n \tdev_info->reta_size = pf->hash_lut_size;\n@@ -3883,6 +3886,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;\n \tdev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;\n \n+\tdev_info->rx_seg_capa.max_nseg = ICE_RX_MAX_NSEG;\n+\tdev_info->rx_seg_capa.multi_pools = 1;\n+\tdev_info->rx_seg_capa.offset_allowed = 0;\n+\tdev_info->rx_seg_capa.offset_align_log2 = 0;\n+\n \treturn 0;\n }\n \n@@ -5960,6 +5968,52 @@ ice_timesync_disable(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+static const uint32_t *\n+ice_buffer_split_supported_hdr_ptypes_get(struct rte_eth_dev *dev __rte_unused)\n+{\n+\t/* Buffer split protocol header capability. */\n+\tstatic const uint32_t ptypes[] = {\n+\t\t/* Non tunneled */\n+\t\tRTE_PTYPE_L2_ETHER,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,\n+\t\tRTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP,\n+\n+\t\t/* Tunneled */\n+\t\tRTE_PTYPE_TUNNEL_GRENAT,\n+\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,\n+\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,\n+\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP,\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP,\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP,\n+\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP,\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP,\n+\t\tRTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |\n+\t\tRTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP,\n+\n+\t\tRTE_PTYPE_UNKNOWN\n+\t};\n+\n+\treturn ptypes;\n+}\n+\n static int\n ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t      struct rte_pci_device *pci_dev)\ndiff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex d1e1fadf9d..697251c603 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -259,7 +259,6 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)\n \t/* Set buffer size as the head split is disabled. */\n \tbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -\n \t\t\t      RTE_PKTMBUF_HEADROOM);\n-\trxq->rx_hdr_len = 0;\n \trxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));\n \trxq->max_pkt_len =\n \t\tRTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,\n@@ -288,11 +287,91 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)\n \n \tmemset(&rx_ctx, 0, sizeof(rx_ctx));\n \n+\tif (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {\n+\t\tuint32_t proto_hdr;\n+\t\tproto_hdr = rxq->rxseg[0].proto_hdr;\n+\n+\t\tif (proto_hdr == RTE_PTYPE_UNKNOWN) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Buffer split protocol must be configured\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tswitch (proto_hdr & RTE_PTYPE_L4_MASK) {\n+\t\tcase RTE_PTYPE_L4_TCP:\n+\t\tcase RTE_PTYPE_L4_UDP:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;\n+\t\t\tgoto set_hsplit_finish;\n+\t\tcase RTE_PTYPE_L4_SCTP:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;\n+\t\t\tgoto set_hsplit_finish;\n+\t\t}\n+\n+\t\tswitch (proto_hdr & RTE_PTYPE_L3_MASK) {\n+\t\tcase RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:\n+\t\tcase RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;\n+\t\t\tgoto set_hsplit_finish;\n+\t\t}\n+\n+\t\tswitch (proto_hdr & RTE_PTYPE_L2_MASK) {\n+\t\tcase RTE_PTYPE_L2_ETHER:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;\n+\t\t\trx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2;\n+\t\t\tgoto set_hsplit_finish;\n+\t\t}\n+\n+\t\tswitch (proto_hdr & RTE_PTYPE_TUNNEL_MASK) {\n+\t\tcase RTE_PTYPE_TUNNEL_GRENAT:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS;\n+\t\t\tgoto set_hsplit_finish;\n+\t\t}\n+\n+\t\tswitch (proto_hdr & RTE_PTYPE_INNER_L4_MASK) {\n+\t\tcase RTE_PTYPE_INNER_L4_TCP:\n+\t\tcase RTE_PTYPE_INNER_L4_UDP:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;\n+\t\t\tgoto set_hsplit_finish;\n+\t\tcase RTE_PTYPE_INNER_L4_SCTP:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;\n+\t\t\tgoto set_hsplit_finish;\n+\t\t}\n+\n+\t\tswitch (proto_hdr & RTE_PTYPE_INNER_L3_MASK) {\n+\t\tcase RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:\n+\t\tcase RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;\n+\t\t\tgoto set_hsplit_finish;\n+\t\t}\n+\n+\t\tswitch (proto_hdr & RTE_PTYPE_INNER_L2_MASK) {\n+\t\tcase RTE_PTYPE_INNER_L2_ETHER:\n+\t\t\trx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;\n+\t\t\trx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;\n+\t\t\tgoto set_hsplit_finish;\n+\t\t}\n+\n+\t\tPMD_DRV_LOG(ERR, \"Buffer split protocol is not supported\");\n+\t\treturn -EINVAL;\n+\n+set_hsplit_finish:\n+\t\trxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE;\n+\t} else {\n+\t\trxq->rx_hdr_len = 0;\n+\t\trx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */\n+\t}\n+\n \trx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;\n \trx_ctx.qlen = rxq->nb_rx_desc;\n \trx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n \trx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;\n-\trx_ctx.dtype = 0; /* No Header Split mode */\n #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n \trx_ctx.dsize = 1; /* 32B descriptors */\n #endif\n@@ -378,6 +457,7 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)\n \n \tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n \t\tvolatile union ice_rx_flex_desc *rxd;\n+\t\trxd = &rxq->rx_ring[i];\n \t\tstruct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);\n \n \t\tif (unlikely(!mbuf)) {\n@@ -385,8 +465,6 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)\n \t\t\treturn -ENOMEM;\n \t\t}\n \n-\t\trte_mbuf_refcnt_set(mbuf, 1);\n-\t\tmbuf->next = NULL;\n \t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n \t\tmbuf->nb_segs = 1;\n \t\tmbuf->port = rxq->port_id;\n@@ -394,9 +472,32 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)\n \t\tdma_addr =\n \t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n \n-\t\trxd = &rxq->rx_ring[i];\n-\t\trxd->read.pkt_addr = dma_addr;\n-\t\trxd->read.hdr_addr = 0;\n+\t\tif (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n+\t\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\t\tmbuf->next = NULL;\n+\t\t\trxd->read.hdr_addr = 0;\n+\t\t\trxd->read.pkt_addr = dma_addr;\n+\t\t} else {\n+\t\t\tstruct rte_mbuf *mbuf_pay;\n+\t\t\tmbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);\n+\t\t\tif (unlikely(!mbuf_pay)) {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate payload mbuf for RX\");\n+\t\t\t\treturn -ENOMEM;\n+\t\t\t}\n+\n+\t\t\tmbuf_pay->next = NULL;\n+\t\t\tmbuf_pay->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\t\tmbuf_pay->nb_segs = 1;\n+\t\t\tmbuf_pay->port = rxq->port_id;\n+\t\t\tmbuf->next = mbuf_pay;\n+\n+\t\t\trxd->read.hdr_addr = dma_addr;\n+\t\t\t/* The LS bit should be set to zero regardless of\n+\t\t\t * buffer split enablement.\n+\t\t\t */\n+\t\t\trxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay));\n+\t\t}\n+\n #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n \t\trxd->read.rsvd1 = 0;\n \t\trxd->read.rsvd2 = 0;\n@@ -420,14 +521,14 @@ _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)\n \n \tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n \t\tif (rxq->sw_ring[i].mbuf) {\n-\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\trte_pktmbuf_free(rxq->sw_ring[i].mbuf);\n \t\t\trxq->sw_ring[i].mbuf = NULL;\n \t\t}\n \t}\n \tif (rxq->rx_nb_avail == 0)\n \t\treturn;\n \tfor (i = 0; i < rxq->rx_nb_avail; i++)\n-\t\trte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);\n+\t\trte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]);\n \n \trxq->rx_nb_avail = 0;\n }\n@@ -719,7 +820,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)\n \trx_ctx.qlen = rxq->nb_rx_desc;\n \trx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n \trx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;\n-\trx_ctx.dtype = 0; /* No Header Split mode */\n+\trx_ctx.dtype = 0; /* No Buffer Split mode */\n \trx_ctx.dsize = 1; /* 32B descriptors */\n \trx_ctx.rxmax = ICE_ETH_MAX_LEN;\n \t/* TPH: Transaction Layer Packet (TLP) processing hints */\n@@ -1053,6 +1154,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,\n \tuint16_t len;\n \tint use_def_burst_func = 1;\n \tuint64_t offloads;\n+\tuint16_t n_seg = rx_conf->rx_nseg;\n+\tuint16_t i;\n \n \tif (nb_desc % ICE_ALIGN_RING_DESC != 0 ||\n \t    nb_desc > ICE_MAX_RING_DESC ||\n@@ -1064,6 +1167,15 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,\n \n \toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n \n+\tif (mp)\n+\t\tn_seg = 1;\n+\n+\tif (n_seg > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n+\t\tPMD_INIT_LOG(ERR, \"port %u queue index %u split offload not configured\",\n+\t\t\t\tdev->data->port_id, queue_idx);\n+\t\treturn -EINVAL;\n+\t}\n+\n \t/* Free memory if needed */\n \tif (dev->data->rx_queues[queue_idx]) {\n \t\tice_rx_queue_release(dev->data->rx_queues[queue_idx]);\n@@ -1075,12 +1187,24 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t\t sizeof(struct ice_rx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE,\n \t\t\t\t socket_id);\n+\n \tif (!rxq) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for \"\n \t\t\t     \"rx queue data structure\");\n \t\treturn -ENOMEM;\n \t}\n-\trxq->mp = mp;\n+\n+\trxq->rxseg_nb = n_seg;\n+\tif (n_seg > 1) {\n+\t\tfor (i = 0; i < n_seg; i++)\n+\t\t\tmemcpy(&rxq->rxseg[i], &rx_conf->rx_seg[i].split,\n+\t\t\t\tsizeof(struct rte_eth_rxseg_split));\n+\n+\t\trxq->mp = rxq->rxseg[0].mp;\n+\t} else {\n+\t\trxq->mp = mp;\n+\t}\n+\n \trxq->nb_rx_desc = nb_desc;\n \trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n \trxq->queue_id = queue_idx;\n@@ -1551,7 +1675,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)\n \tstruct ice_rx_entry *rxep;\n \tstruct rte_mbuf *mb;\n \tuint16_t stat_err0;\n-\tuint16_t pkt_len;\n+\tuint16_t pkt_len, hdr_len;\n \tint32_t s[ICE_LOOK_AHEAD], nb_dd;\n \tint32_t i, j, nb_rx = 0;\n \tuint64_t pkt_flags = 0;\n@@ -1606,6 +1730,27 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)\n \t\t\t\t   ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;\n \t\t\tmb->data_len = pkt_len;\n \t\t\tmb->pkt_len = pkt_len;\n+\n+\t\t\tif (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n+\t\t\t\tpkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &\n+\t\t\t\t\tICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;\n+\t\t\t\tmb->data_len = pkt_len;\n+\t\t\t\tmb->pkt_len = pkt_len;\n+\t\t\t} else {\n+\t\t\t\tmb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs);\n+\t\t\t\tmb->next->next = NULL;\n+\t\t\t\thdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) &\n+\t\t\t\t\t\tICE_RX_FLEX_DESC_HEADER_LEN_M;\n+\t\t\t\tpkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &\n+\t\t\t\t\tICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;\n+\t\t\t\tmb->data_len = hdr_len;\n+\t\t\t\tmb->pkt_len = hdr_len + pkt_len;\n+\t\t\t\tmb->next->data_len = pkt_len;\n+#ifdef RTE_ETHDEV_DEBUG_RX\n+\t\t\t\trte_pktmbuf_dump(stdout, mb, rte_pktmbuf_pkt_len(mb));\n+#endif\n+\t\t\t}\n+\n \t\t\tmb->ol_flags = 0;\n \t\t\tstat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);\n \t\t\tpkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);\n@@ -1697,7 +1842,9 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)\n \tstruct rte_mbuf *mb;\n \tuint16_t alloc_idx, i;\n \tuint64_t dma_addr;\n-\tint diag;\n+\tint diag, diag_pay;\n+\tuint64_t pay_addr;\n+\tstruct rte_mbuf *mbufs_pay[rxq->rx_free_thresh];\n \n \t/* Allocate buffers in bulk */\n \talloc_idx = (uint16_t)(rxq->rx_free_trigger -\n@@ -1710,6 +1857,15 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)\n \t\treturn -ENOMEM;\n \t}\n \n+\tif (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {\n+\t\tdiag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp,\n+\t\t\t\t(void *)mbufs_pay, rxq->rx_free_thresh);\n+\t\tif (unlikely(diag_pay != 0)) {\n+\t\t\tPMD_RX_LOG(ERR, \"Failed to get payload mbufs in bulk\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n \trxdp = &rxq->rx_ring[alloc_idx];\n \tfor (i = 0; i < rxq->rx_free_thresh; i++) {\n \t\tif (likely(i < (rxq->rx_free_thresh - 1)))\n@@ -1718,13 +1874,21 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)\n \n \t\tmb = rxep[i].mbuf;\n \t\trte_mbuf_refcnt_set(mb, 1);\n-\t\tmb->next = NULL;\n \t\tmb->data_off = RTE_PKTMBUF_HEADROOM;\n \t\tmb->nb_segs = 1;\n \t\tmb->port = rxq->port_id;\n \t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));\n-\t\trxdp[i].read.hdr_addr = 0;\n-\t\trxdp[i].read.pkt_addr = dma_addr;\n+\n+\t\tif (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n+\t\t\tmb->next = NULL;\n+\t\t\trxdp[i].read.hdr_addr = 0;\n+\t\t\trxdp[i].read.pkt_addr = dma_addr;\n+\t\t} else {\n+\t\t\tmb->next = mbufs_pay[i];\n+\t\t\tpay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i]));\n+\t\t\trxdp[i].read.hdr_addr = dma_addr;\n+\t\t\trxdp[i].read.pkt_addr = pay_addr;\n+\t\t}\n \t}\n \n \t/* Update Rx tail register */\n@@ -2333,11 +2497,13 @@ ice_recv_pkts(void *rx_queue,\n \tstruct ice_rx_entry *sw_ring = rxq->sw_ring;\n \tstruct ice_rx_entry *rxe;\n \tstruct rte_mbuf *nmb; /* new allocated mbuf */\n+\tstruct rte_mbuf *nmb_pay; /* new allocated payload mbuf */\n \tstruct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */\n \tuint16_t rx_id = rxq->rx_tail;\n \tuint16_t nb_rx = 0;\n \tuint16_t nb_hold = 0;\n \tuint16_t rx_packet_len;\n+\tuint16_t rx_header_len;\n \tuint16_t rx_stat_err0;\n \tuint64_t dma_addr;\n \tuint64_t pkt_flags;\n@@ -2365,12 +2531,13 @@ ice_recv_pkts(void *rx_queue,\n \t\tif (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))\n \t\t\tbreak;\n \n-\t\t/* allocate mbuf */\n+\t\t/* allocate header mbuf */\n \t\tnmb = rte_mbuf_raw_alloc(rxq->mp);\n \t\tif (unlikely(!nmb)) {\n \t\t\trxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;\n \t\t\tbreak;\n \t\t}\n+\n \t\trxd = *rxdp; /* copy descriptor in ring to temp variable*/\n \n \t\tnb_hold++;\n@@ -2383,24 +2550,60 @@ ice_recv_pkts(void *rx_queue,\n \t\tdma_addr =\n \t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n \n-\t\t/**\n-\t\t * fill the read format of descriptor with physic address in\n-\t\t * new allocated mbuf: nmb\n-\t\t */\n-\t\trxdp->read.hdr_addr = 0;\n-\t\trxdp->read.pkt_addr = dma_addr;\n+\t\tif (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n+\t\t\t/**\n+\t\t\t * fill the read format of descriptor with physic address in\n+\t\t\t * new allocated mbuf: nmb\n+\t\t\t */\n+\t\t\trxdp->read.hdr_addr = 0;\n+\t\t\trxdp->read.pkt_addr = dma_addr;\n+\t\t} else {\n+\t\t\t/* allocate payload mbuf */\n+\t\t\tnmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);\n+\t\t\tif (unlikely(!nmb_pay)) {\n+\t\t\t\trxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;\n+\t\t\t\tbreak;\n+\t\t\t}\n \n-\t\t/* calculate rx_packet_len of the received pkt */\n-\t\trx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &\n-\t\t\t\t ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;\n+\t\t\tnmb->next = nmb_pay;\n+\t\t\tnmb_pay->next = NULL;\n+\n+\t\t\t/**\n+\t\t\t * fill the read format of descriptor with physic address in\n+\t\t\t * new allocated mbuf: nmb\n+\t\t\t */\n+\t\t\trxdp->read.hdr_addr = dma_addr;\n+\t\t\trxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay));\n+\t\t}\n \n \t\t/* fill old mbuf with received descriptor: rxd */\n \t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n \t\trte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));\n-\t\trxm->nb_segs = 1;\n-\t\trxm->next = NULL;\n-\t\trxm->pkt_len = rx_packet_len;\n-\t\trxm->data_len = rx_packet_len;\n+\t\tif (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n+\t\t\trxm->nb_segs = 1;\n+\t\t\trxm->next = NULL;\n+\t\t\t/* calculate rx_packet_len of the received pkt */\n+\t\t\trx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &\n+\t\t\t\t\tICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;\n+\t\t\trxm->data_len = rx_packet_len;\n+\t\t\trxm->pkt_len = rx_packet_len;\n+\t\t} else {\n+\t\t\trxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs);\n+\t\t\trxm->next->next = NULL;\n+\t\t\t/* calculate rx_packet_len of the received pkt */\n+\t\t\trx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) &\n+\t\t\t\t\tICE_RX_FLEX_DESC_HEADER_LEN_M;\n+\t\t\trx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &\n+\t\t\t\t\tICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;\n+\t\t\trxm->data_len = rx_header_len;\n+\t\t\trxm->pkt_len = rx_header_len + rx_packet_len;\n+\t\t\trxm->next->data_len = rx_packet_len;\n+\n+#ifdef RTE_ETHDEV_DEBUG_RX\n+\t\t\trte_pktmbuf_dump(stdout, rxm, rte_pktmbuf_pkt_len(rxm));\n+#endif\n+\t\t}\n+\n \t\trxm->port = rxq->port_id;\n \t\trxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &\n \t\t\trte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex e1d4fe8e47..4947d5c25f 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -16,6 +16,9 @@\n #define ICE_RX_MAX_BURST 32\n #define ICE_TX_MAX_BURST 32\n \n+/* Maximal number of segments to split. */\n+#define ICE_RX_MAX_NSEG 2\n+\n #define ICE_CHK_Q_ENA_COUNT        100\n #define ICE_CHK_Q_ENA_INTERVAL_US  100\n \n@@ -45,6 +48,11 @@\n extern uint64_t ice_timestamp_dynflag;\n extern int ice_timestamp_dynfield_offset;\n \n+/* Max header size can be 2K - 64 bytes */\n+#define ICE_RX_HDR_BUF_SIZE    (2048 - 64)\n+\n+#define ICE_HEADER_SPLIT_ENA   BIT(0)\n+\n typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);\n typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);\n typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,\n@@ -55,6 +63,12 @@ struct ice_rx_entry {\n \tstruct rte_mbuf *mbuf;\n };\n \n+enum ice_rx_dtype {\n+\tICE_RX_DTYPE_NO_SPLIT       = 0,\n+\tICE_RX_DTYPE_HEADER_SPLIT   = 1,\n+\tICE_RX_DTYPE_SPLIT_ALWAYS   = 2,\n+};\n+\n struct ice_rx_queue {\n \tstruct rte_mempool *mp; /* mbuf pool to populate RX ring */\n \tvolatile union ice_rx_flex_desc *rx_ring;/* RX ring virtual address */\n@@ -101,6 +115,8 @@ struct ice_rx_queue {\n \tuint32_t hw_time_high; /* high 32 bits of timestamp */\n \tuint32_t hw_time_low; /* low 32 bits of timestamp */\n \tuint64_t hw_time_update; /* SW time of HW record updating */\n+\tstruct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG];\n+\tuint32_t rxseg_nb;\n };\n \n struct ice_tx_entry {\ndiff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h\nindex 2dd2d83650..eec6ea2134 100644\n--- a/drivers/net/ice/ice_rxtx_vec_common.h\n+++ b/drivers/net/ice/ice_rxtx_vec_common.h\n@@ -291,6 +291,9 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)\n \tif (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)\n \t\treturn -1;\n \n+\tif (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)\n+\t\treturn -1;\n+\n \tif (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)\n \t\treturn ICE_VECTOR_OFFLOAD_PATH;\n \n",
    "prefixes": [
        "v9",
        "4/4"
    ]
}