get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/129335/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 129335,
    "url": "http://patchwork.dpdk.org/api/patches/129335/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230706095004.1848199-4-feifei.wang2@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230706095004.1848199-4-feifei.wang2@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230706095004.1848199-4-feifei.wang2@arm.com",
    "date": "2023-07-06T09:50:03",
    "name": "[v7,3/4] net/ixgbe: implement mbufs recycle mode",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4801a202ca8bad0a64f961fe44027a1f299b9e08",
    "submitter": {
        "id": 1771,
        "url": "http://patchwork.dpdk.org/api/people/1771/?format=api",
        "name": "Feifei Wang",
        "email": "feifei.wang2@arm.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230706095004.1848199-4-feifei.wang2@arm.com/mbox/",
    "series": [
        {
            "id": 28857,
            "url": "http://patchwork.dpdk.org/api/series/28857/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=28857",
            "date": "2023-07-06T09:50:00",
            "name": "Recycle mbufs from Tx queue to Rx queue",
            "version": 7,
            "mbox": "http://patchwork.dpdk.org/series/28857/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/129335/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/129335/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E221842DE6;\n\tThu,  6 Jul 2023 11:50:32 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E236F42F8A;\n\tThu,  6 Jul 2023 11:50:27 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by mails.dpdk.org (Postfix) with ESMTP id 1B93F42F88\n for <dev@dpdk.org>; Thu,  6 Jul 2023 11:50:26 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 9BF0C1474;\n Thu,  6 Jul 2023 02:51:07 -0700 (PDT)",
            "from net-x86-dell-8268.shanghai.arm.com\n (net-x86-dell-8268.shanghai.arm.com [10.169.210.116])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id EBB323F663;\n Thu,  6 Jul 2023 02:50:21 -0700 (PDT)"
        ],
        "From": "Feifei Wang <feifei.wang2@arm.com>",
        "To": "Qiming Yang <qiming.yang@intel.com>,\n\tWenjun Wu <wenjun1.wu@intel.com>",
        "Cc": "dev@dpdk.org, ferruh.yigit@amd.com, konstantin.v.ananyev@yandex.ru,\n thomas@monjalon.net, andrew.rybchenko@oktetlabs.ru,\n mb@smartsharesystems.com, nd@arm.com, Feifei Wang <feifei.wang2@arm.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>",
        "Subject": "[PATCH v7 3/4] net/ixgbe: implement mbufs recycle mode",
        "Date": "Thu,  6 Jul 2023 17:50:03 +0800",
        "Message-Id": "<20230706095004.1848199-4-feifei.wang2@arm.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230706095004.1848199-1-feifei.wang2@arm.com>",
        "References": "<20230706095004.1848199-1-feifei.wang2@arm.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Define specific function implementation for ixgbe driver.\nCurrently, recycle buffer mode can support 128bit\nvector path. And can be enabled both in fast free and\nno fast free mode.\n\nSuggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\nSigned-off-by: Feifei Wang <feifei.wang2@arm.com>\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\nReviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\n---\n drivers/net/ixgbe/ixgbe_ethdev.c              |   1 +\n drivers/net/ixgbe/ixgbe_ethdev.h              |   3 +\n .../ixgbe/ixgbe_recycle_mbufs_vec_common.c    | 143 ++++++++++++++++++\n drivers/net/ixgbe/ixgbe_rxtx.c                |  29 ++++\n drivers/net/ixgbe/ixgbe_rxtx.h                |   4 +\n drivers/net/ixgbe/meson.build                 |   1 +\n 6 files changed, 181 insertions(+)\n create mode 100644 drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c",
    "diff": "diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c\nindex 14a7d571e0..ea4c9dd561 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.c\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.c\n@@ -543,6 +543,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {\n \t.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,\n \t.rxq_info_get         = ixgbe_rxq_info_get,\n \t.txq_info_get         = ixgbe_txq_info_get,\n+\t.recycle_rxq_info_get = ixgbe_recycle_rxq_info_get,\n \t.timesync_enable      = ixgbe_timesync_enable,\n \t.timesync_disable     = ixgbe_timesync_disable,\n \t.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,\ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h\nindex 1291e9099c..22fc3be3d8 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.h\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.h\n@@ -626,6 +626,9 @@ void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n \tstruct rte_eth_txq_info *qinfo);\n \n+void ixgbe_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\t\tstruct rte_eth_recycle_rxq_info *recycle_rxq_info);\n+\n int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);\n \n void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c\nnew file mode 100644\nindex 0000000000..9a8cc86954\n--- /dev/null\n+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c\n@@ -0,0 +1,143 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 Arm Limited.\n+ */\n+\n+#include <stdint.h>\n+#include <ethdev_driver.h>\n+\n+#include \"ixgbe_ethdev.h\"\n+#include \"ixgbe_rxtx.h\"\n+\n+#pragma GCC diagnostic ignored \"-Wcast-qual\"\n+\n+void\n+ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs)\n+{\n+\tstruct ixgbe_rx_queue *rxq = rx_queue;\n+\tstruct ixgbe_rx_entry *rxep;\n+\tvolatile union ixgbe_adv_rx_desc *rxdp;\n+\tuint16_t rx_id;\n+\tuint64_t paddr;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\trxdp = rxq->rx_ring + rxq->rxrearm_start;\n+\trxep = &rxq->sw_ring[rxq->rxrearm_start];\n+\n+\tfor (i = 0; i < nb_mbufs; i++) {\n+\t\t/* Initialize rxdp descs. */\n+\t\tpaddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;\n+\t\tdma_addr = rte_cpu_to_le_64(paddr);\n+\t\t/* Flush descriptors with pa dma_addr */\n+\t\trxdp[i].read.hdr_addr = 0;\n+\t\trxdp[i].read.pkt_addr = dma_addr;\n+\t}\n+\n+\t/* Update the descriptor initializer index */\n+\trxq->rxrearm_start += nb_mbufs;\n+\tif (rxq->rxrearm_start >= rxq->nb_rx_desc)\n+\t\trxq->rxrearm_start = 0;\n+\n+\trxq->rxrearm_nb -= nb_mbufs;\n+\n+\trx_id = (uint16_t)((rxq->rxrearm_start == 0) ?\n+\t\t\t(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));\n+\n+\t/* Update the tail pointer on the NIC */\n+\tIXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);\n+}\n+\n+uint16_t\n+ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,\n+\t\tstruct rte_eth_recycle_rxq_info *recycle_rxq_info)\n+{\n+\tstruct ixgbe_tx_queue *txq = tx_queue;\n+\tstruct ixgbe_tx_entry *txep;\n+\tstruct rte_mbuf **rxep;\n+\tint i, n;\n+\tuint32_t status;\n+\tuint16_t nb_recycle_mbufs;\n+\tuint16_t avail = 0;\n+\tuint16_t mbuf_ring_size = recycle_rxq_info->mbuf_ring_size;\n+\tuint16_t mask = recycle_rxq_info->mbuf_ring_size - 1;\n+\tuint16_t refill_requirement = recycle_rxq_info->refill_requirement;\n+\tuint16_t refill_head = *recycle_rxq_info->refill_head;\n+\tuint16_t receive_tail = *recycle_rxq_info->receive_tail;\n+\n+\t/* Get available recycling Rx buffers. */\n+\tavail = (mbuf_ring_size - (refill_head - receive_tail)) & mask;\n+\n+\t/* Check Tx free thresh and Rx available space. */\n+\tif (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh)\n+\t\treturn 0;\n+\n+\t/* check DD bits on threshold descriptor */\n+\tstatus = txq->tx_ring[txq->tx_next_dd].wb.status;\n+\tif (!(status & IXGBE_ADVTXD_STAT_DD))\n+\t\treturn 0;\n+\n+\tn = txq->tx_rs_thresh;\n+\tnb_recycle_mbufs = n;\n+\n+\t/* Mbufs recycle can only support no ring buffer wrapping around.\n+\t * Two case for this:\n+\t *\n+\t * case 1: The refill head of Rx buffer ring needs to be aligned with\n+\t * buffer ring size. In this case, the number of Tx freeing buffers\n+\t * should be equal to refill_requirement.\n+\t *\n+\t * case 2: The refill head of Rx ring buffer does not need to be aligned\n+\t * with buffer ring size. In this case, the update of refill head can not\n+\t * exceed the Rx buffer ring size.\n+\t */\n+\tif (refill_requirement != n ||\n+\t\t(!refill_requirement && (refill_head + n > mbuf_ring_size)))\n+\t\treturn 0;\n+\n+\t/* First buffer to free from S/W ring is at index\n+\t * tx_next_dd - (tx_rs_thresh-1).\n+\t */\n+\ttxep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];\n+\trxep = recycle_rxq_info->mbuf_ring;\n+\trxep += refill_head;\n+\n+\tif (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {\n+\t\t/* Avoid txq contains buffers from unexpected mempool. */\n+\t\tif (unlikely(recycle_rxq_info->mp\n+\t\t\t\t\t!= txep[0].mbuf->pool))\n+\t\t\treturn 0;\n+\n+\t\t/* Directly put mbufs from Tx to Rx. */\n+\t\tfor (i = 0; i < n; i++)\n+\t\t\trxep[i] = txep[i].mbuf;\n+\t} else {\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\trxep[i] = rte_pktmbuf_prefree_seg(txep[i].mbuf);\n+\n+\t\t\t/* If Tx buffers are not the last reference or from\n+\t\t\t * unexpected mempool, previous copied buffers are\n+\t\t\t * considered as invalid.\n+\t\t\t */\n+\t\t\tif (unlikely((rxep[i] == NULL && refill_requirement) ||\n+\t\t\t\t\trecycle_rxq_info->mp != txep[i].mbuf->pool))\n+\t\t\t\tnb_recycle_mbufs = 0;\n+\t\t}\n+\t\t/* If Tx buffers are not the last reference or\n+\t\t * from unexpected mempool, all recycled buffers\n+\t\t * are put into mempool.\n+\t\t */\n+\t\tif (nb_recycle_mbufs == 0)\n+\t\t\tfor (i = 0; i < n; i++) {\n+\t\t\t\tif (rxep[i] != NULL)\n+\t\t\t\t\trte_mempool_put(rxep[i]->pool, rxep[i]);\n+\t\t\t}\n+\t}\n+\n+\t/* Update counters for Tx. */\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);\n+\ttxq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);\n+\tif (txq->tx_next_dd >= txq->nb_tx_desc)\n+\t\ttxq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);\n+\n+\treturn nb_recycle_mbufs;\n+}\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c\nindex 61f17cd90b..8174bd7d93 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.c\n@@ -2552,6 +2552,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)\n \t\t\t\t(rte_eal_process_type() != RTE_PROC_PRIMARY ||\n \t\t\t\t\tixgbe_txq_vec_setup(txq) == 0)) {\n \t\t\tPMD_INIT_LOG(DEBUG, \"Vector tx enabled.\");\n+\t\t\tdev->recycle_tx_mbufs_reuse = ixgbe_recycle_tx_mbufs_reuse_vec;\n \t\t\tdev->tx_pkt_burst = ixgbe_xmit_pkts_vec;\n \t\t} else\n \t\tdev->tx_pkt_burst = ixgbe_xmit_pkts_simple;\n@@ -4889,6 +4890,8 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)\n \t\t\t\t\t    \"callback (port=%d).\",\n \t\t\t\t     dev->data->port_id);\n \n+\t\t\tdev->recycle_rx_descriptors_refill =\n+\t\t\t\tixgbe_recycle_rx_descriptors_refill_vec;\n \t\t\tdev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;\n \t\t} else if (adapter->rx_bulk_alloc_allowed) {\n \t\t\tPMD_INIT_LOG(DEBUG, \"Using a Scattered with bulk \"\n@@ -4918,6 +4921,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)\n \t\t\t     RTE_IXGBE_DESCS_PER_LOOP,\n \t\t\t     dev->data->port_id);\n \n+\t\tdev->recycle_rx_descriptors_refill = ixgbe_recycle_rx_descriptors_refill_vec;\n \t\tdev->rx_pkt_burst = ixgbe_recv_pkts_vec;\n \t} else if (adapter->rx_bulk_alloc_allowed) {\n \t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions are \"\n@@ -5689,6 +5693,31 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n \tqinfo->conf.tx_deferred_start = txq->tx_deferred_start;\n }\n \n+void\n+ixgbe_recycle_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_recycle_rxq_info *recycle_rxq_info)\n+{\n+\tstruct ixgbe_rx_queue *rxq;\n+\tstruct ixgbe_adapter *adapter = dev->data->dev_private;\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\trecycle_rxq_info->mbuf_ring = (void *)rxq->sw_ring;\n+\trecycle_rxq_info->mp = rxq->mb_pool;\n+\trecycle_rxq_info->mbuf_ring_size = rxq->nb_rx_desc;\n+\trecycle_rxq_info->receive_tail = &rxq->rx_tail;\n+\n+\tif (adapter->rx_vec_allowed) {\n+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)\n+\t\trecycle_rxq_info->refill_requirement = RTE_IXGBE_RXQ_REARM_THRESH;\n+\t\trecycle_rxq_info->refill_head = &rxq->rxrearm_start;\n+#endif\n+\t} else {\n+\t\trecycle_rxq_info->refill_requirement = rxq->rx_free_thresh;\n+\t\trecycle_rxq_info->refill_head = &rxq->rx_free_trigger;\n+\t}\n+}\n+\n /*\n  * [VF] Initializes Receive Unit.\n  */\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h\nindex 668a5b9814..ee89c89929 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.h\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.h\n@@ -295,6 +295,10 @@ int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);\n extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];\n extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];\n \n+uint16_t ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,\n+\t\tstruct rte_eth_recycle_rxq_info *recycle_rxq_info);\n+void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs);\n+\n uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\t    uint16_t nb_pkts);\n int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);\ndiff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build\nindex a18908ef7c..5ad9aeb44b 100644\n--- a/drivers/net/ixgbe/meson.build\n+++ b/drivers/net/ixgbe/meson.build\n@@ -17,6 +17,7 @@ sources = files(\n         'ixgbe_rxtx.c',\n         'ixgbe_tm.c',\n         'ixgbe_vf_representor.c',\n+\t'ixgbe_recycle_mbufs_vec_common.c',\n         'rte_pmd_ixgbe.c',\n )\n \n",
    "prefixes": [
        "v7",
        "3/4"
    ]
}