get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/121314/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 121314,
    "url": "http://patchwork.dpdk.org/api/patches/121314/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221223015558.3143279-3-mingxia.liu@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221223015558.3143279-3-mingxia.liu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221223015558.3143279-3-mingxia.liu@intel.com",
    "date": "2022-12-23T01:55:39",
    "name": "[02/21] net/cpfl: add Tx queue setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8b24ad9601896509a489549a9ef642d31f54d6bb",
    "submitter": {
        "id": 2514,
        "url": "http://patchwork.dpdk.org/api/people/2514/?format=api",
        "name": "Liu, Mingxia",
        "email": "mingxia.liu@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221223015558.3143279-3-mingxia.liu@intel.com/mbox/",
    "series": [
        {
            "id": 26253,
            "url": "http://patchwork.dpdk.org/api/series/26253/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=26253",
            "date": "2022-12-23T01:55:37",
            "name": "add support for cpfl PMD in DPDK",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/26253/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/121314/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/121314/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 00BA1A0093;\n\tFri, 23 Dec 2022 03:51:56 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3240042D22;\n\tFri, 23 Dec 2022 03:51:48 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n by mails.dpdk.org (Postfix) with ESMTP id 5DE9A42BB1\n for <dev@dpdk.org>; Fri, 23 Dec 2022 03:51:46 +0100 (CET)",
            "from orsmga006.jf.intel.com ([10.7.209.51])\n by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 22 Dec 2022 18:51:46 -0800",
            "from dpdk-mingxial-01.sh.intel.com ([10.67.119.112])\n by orsmga006.jf.intel.com with ESMTP; 22 Dec 2022 18:51:44 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1671763906; x=1703299906;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=PgiZNxIkzyWlOSUbQjnznTM7UqwgQhROIED3ZVmduH8=;\n b=TCG70vQ1v6Tu7CTrhxA9vJkD9H0p2thIritgbhRBsqO7CXyXI2gsVF94\n guFp7jzNX4HKWR45TheAAPA+dtoeHlazdJNPiW4lDBbOdOXiui+8bQly8\n eF62iApxwSrdH7bDJ3Xj6rO8tpXnfp84rXWM7lTBjPhYW7LgFtHgiZGQk\n 6WG+81Ic3ntTB6oX3cepiNuggC+2/DAI3qLb3ARwkTXMS7irkt5OhvWHG\n 9DPN2dWw6prnNDPlVmuCJi0GtZNEwydo64OVui6gq1BZ5yL/gCkvl3Zeh\n A9VlduVIEEfu8J17TfBEFHJrJPFr1ANQt5vefbSvFaVh7WYqD96Mdte09 A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10569\"; a=\"321467079\"",
            "E=Sophos;i=\"5.96,267,1665471600\"; d=\"scan'208\";a=\"321467079\"",
            "E=McAfee;i=\"6500,9779,10569\"; a=\"629707141\"",
            "E=Sophos;i=\"5.96,267,1665471600\"; d=\"scan'208\";a=\"629707141\""
        ],
        "X-ExtLoop1": "1",
        "From": "Mingxia Liu <mingxia.liu@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "jingjing.wu@intel.com, beilei.xing@intel.com, qi.z.zhang@intel.com,\n Mingxia Liu <mingxia.liu@intel.com>",
        "Subject": "[PATCH 02/21] net/cpfl: add Tx queue setup",
        "Date": "Fri, 23 Dec 2022 01:55:39 +0000",
        "Message-Id": "<20221223015558.3143279-3-mingxia.liu@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20221223015558.3143279-1-mingxia.liu@intel.com>",
        "References": "<20221223015558.3143279-1-mingxia.liu@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for tx_queue_setup ops.\n\nIn the single queue model, the same descriptor queue is used by SW to\npost buffer descriptors to HW and by HW to post completed descriptors\nto SW.\n\nIn the split queue model, \"RX buffer queues\" are used to pass\ndescriptor buffers from SW to HW while Rx queues are used only to\npass the descriptor completions, that is, descriptors that point\nto completed buffers, from HW to SW. This is contrary to the single\nqueue model in which Rx queues are used for both purposes.\n\nSigned-off-by: Mingxia Liu <mingxia.liu@intel.com>\n---\n drivers/net/cpfl/cpfl_ethdev.c |  13 ++\n drivers/net/cpfl/cpfl_rxtx.c   | 244 +++++++++++++++++++++++++++++++++\n drivers/net/cpfl/cpfl_rxtx.h   |  25 ++++\n drivers/net/cpfl/meson.build   |   1 +\n 4 files changed, 283 insertions(+)\n create mode 100644 drivers/net/cpfl/cpfl_rxtx.c\n create mode 100644 drivers/net/cpfl/cpfl_rxtx.h",
    "diff": "diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c\nindex 7c3bc945e0..10d2387b66 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.c\n+++ b/drivers/net/cpfl/cpfl_ethdev.c\n@@ -12,6 +12,7 @@\n #include <rte_alarm.h>\n \n #include \"cpfl_ethdev.h\"\n+#include \"cpfl_rxtx.h\"\n \n #define CPFL_TX_SINGLE_Q\t\"tx_single\"\n #define CPFL_RX_SINGLE_Q\t\"rx_single\"\n@@ -96,6 +97,17 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->max_mtu = dev_info->max_rx_pktlen - CPFL_ETH_OVERHEAD;\n \tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n \n+\tdev_info->default_txconf = (struct rte_eth_txconf) {\n+\t\t.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,\n+\t\t.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,\n+\t};\n+\n+\tdev_info->tx_desc_lim = (struct rte_eth_desc_lim) {\n+\t\t.nb_max = CPFL_MAX_RING_DESC,\n+\t\t.nb_min = CPFL_MIN_RING_DESC,\n+\t\t.nb_align = CPFL_ALIGN_RING_DESC,\n+\t};\n+\n \treturn 0;\n }\n \n@@ -513,6 +525,7 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a\n static const struct eth_dev_ops cpfl_eth_dev_ops = {\n \t.dev_configure\t\t\t= cpfl_dev_configure,\n \t.dev_close\t\t\t= cpfl_dev_close,\n+\t.tx_queue_setup\t\t\t= cpfl_tx_queue_setup,\n \t.dev_infos_get\t\t\t= cpfl_dev_info_get,\n \t.link_update\t\t\t= cpfl_dev_link_update,\n \t.dev_supported_ptypes_get\t= cpfl_dev_supported_ptypes_get,\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c\nnew file mode 100644\nindex 0000000000..ea4a2002bf\n--- /dev/null\n+++ b/drivers/net/cpfl/cpfl_rxtx.c\n@@ -0,0 +1,244 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#include <ethdev_driver.h>\n+#include <rte_net.h>\n+#include <rte_vect.h>\n+\n+#include \"cpfl_ethdev.h\"\n+#include \"cpfl_rxtx.h\"\n+\n+static uint64_t\n+cpfl_tx_offload_convert(uint64_t offload)\n+{\n+\tuint64_t ol = 0;\n+\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_IPV4_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_UDP_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_TCP_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_SCTP_CKSUM;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_MULTI_SEGS;\n+\tif ((offload & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0)\n+\t\tol |= IDPF_TX_OFFLOAD_MBUF_FAST_FREE;\n+\n+\treturn ol;\n+}\n+\n+static const struct rte_memzone *\n+cpfl_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t      uint16_t len, uint16_t queue_type,\n+\t\t      unsigned int socket_id, bool splitq)\n+{\n+\tchar ring_name[RTE_MEMZONE_NAMESIZE];\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\n+\tmemset(ring_name, 0, RTE_MEMZONE_NAMESIZE);\n+\tswitch (queue_type) {\n+\tcase VIRTCHNL2_QUEUE_TYPE_TX:\n+\t\tif (splitq)\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_sched_desc),\n+\t\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\telse\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_desc),\n+\t\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"cpfl Tx ring\", sizeof(\"cpfl Tx ring\"));\n+\t\tbreak;\n+\tcase VIRTCHNL2_QUEUE_TYPE_RX:\n+\t\tif (splitq)\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),\n+\t\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\telse\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_singleq_rx_buf_desc),\n+\t\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"cpfl Rx ring\", sizeof(\"cpfl Rx ring\"));\n+\t\tbreak;\n+\tcase VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:\n+\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_splitq_tx_compl_desc),\n+\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"cpfl Tx compl ring\", sizeof(\"cpfl Tx compl ring\"));\n+\t\tbreak;\n+\tcase VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:\n+\t\tring_size = RTE_ALIGN(len * sizeof(struct virtchnl2_splitq_rx_buf_desc),\n+\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\trte_memcpy(ring_name, \"cpfl Rx buf ring\", sizeof(\"cpfl Rx buf ring\"));\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_INIT_LOG(ERR, \"Invalid queue type\");\n+\t\treturn NULL;\n+\t}\n+\n+\tmz = rte_eth_dma_zone_reserve(dev, ring_name, queue_idx,\n+\t\t\t\t      ring_size, CPFL_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for ring\");\n+\t\treturn NULL;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(mz->addr, 0, ring_size);\n+\n+\treturn mz;\n+}\n+\n+static void\n+cpfl_dma_zone_release(const struct rte_memzone *mz)\n+{\n+\trte_memzone_free(mz);\n+}\n+\n+static int\n+cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,\n+\t\t     uint16_t queue_idx, uint16_t nb_desc,\n+\t\t     unsigned int socket_id)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_tx_queue *cq;\n+\tint ret;\n+\n+\tcq = rte_zmalloc_socket(\"cpfl splitq cq\",\n+\t\t\t\tsizeof(struct idpf_tx_queue),\n+\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\tsocket_id);\n+\tif (cq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for Tx compl queue\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_cq_alloc;\n+\t}\n+\n+\tcq->nb_tx_desc = nb_desc;\n+\tcq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;\n+\tcq->port_id = dev->data->port_id;\n+\tcq->txqs = dev->data->tx_queues;\n+\tcq->tx_start_qid = vport->chunks_info.tx_start_qid;\n+\n+\tmz = cpfl_dma_zone_reserve(dev, queue_idx, nb_desc,\n+\t\t\t\t   VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION,\n+\t\t\t\t   socket_id, true);\n+\tif (mz == NULL) {\n+\t\tret = -ENOMEM;\n+\t\tgoto err_mz_reserve;\n+\t}\n+\tcq->tx_ring_phys_addr = mz->iova;\n+\tcq->compl_ring = mz->addr;\n+\tcq->mz = mz;\n+\treset_split_tx_complq(cq);\n+\n+\ttxq->complq = cq;\n+\n+\treturn 0;\n+\n+err_mz_reserve:\n+\trte_free(cq);\n+err_cq_alloc:\n+\treturn ret;\n+}\n+\n+int\n+cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tuint16_t tx_rs_thresh, tx_free_thresh;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_tx_queue *txq;\n+\tuint64_t offloads;\n+\tuint16_t len;\n+\tbool is_splitq;\n+\tint ret;\n+\n+\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\ttx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh > 0) ?\n+\t\ttx_conf->tx_rs_thresh : CPFL_DEFAULT_TX_RS_THRESH);\n+\ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh > 0) ?\n+\t\ttx_conf->tx_free_thresh : CPFL_DEFAULT_TX_FREE_THRESH);\n+\tif (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)\n+\t\treturn -EINVAL;\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"cpfl txq\",\n+\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (txq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_txq_alloc;\n+\t}\n+\n+\tis_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->rs_thresh = tx_rs_thresh;\n+\ttxq->free_thresh = tx_free_thresh;\n+\ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->offloads = cpfl_tx_offload_convert(offloads);\n+\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+\n+\tif (is_splitq)\n+\t\tlen = 2 * nb_desc;\n+\telse\n+\t\tlen = nb_desc;\n+\ttxq->sw_nb_desc = len;\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tmz = cpfl_dma_zone_reserve(dev, queue_idx, nb_desc, VIRTCHNL2_QUEUE_TYPE_TX,\n+\t\t\t\t   socket_id, is_splitq);\n+\tif (mz == NULL) {\n+\t\tret = -ENOMEM;\n+\t\tgoto err_mz_reserve;\n+\t}\n+\ttxq->tx_ring_phys_addr = mz->iova;\n+\ttxq->mz = mz;\n+\n+\ttxq->sw_ring = rte_zmalloc_socket(\"cpfl tx sw ring\",\n+\t\t\t\t\t  sizeof(struct idpf_tx_entry) * len,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (txq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_sw_ring_alloc;\n+\t}\n+\n+\tif (!is_splitq) {\n+\t\ttxq->tx_ring = mz->addr;\n+\t\treset_single_tx_queue(txq);\n+\t} else {\n+\t\ttxq->desc_ring = mz->addr;\n+\t\treset_split_tx_descq(txq);\n+\n+\t\t/* Setup tx completion queue if split model */\n+\t\tret = cpfl_tx_complq_setup(dev, txq, queue_idx,\n+\t\t\t\t\t   2 * nb_desc, socket_id);\n+\t\tif (ret != 0)\n+\t\t\tgoto err_complq_setup;\n+\t}\n+\n+\ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n+\ttxq->q_set = true;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\n+\treturn 0;\n+\n+err_complq_setup:\n+err_sw_ring_alloc:\n+\tcpfl_dma_zone_release(mz);\n+err_mz_reserve:\n+\trte_free(txq);\n+err_txq_alloc:\n+\treturn ret;\n+}\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h\nnew file mode 100644\nindex 0000000000..ec42478393\n--- /dev/null\n+++ b/drivers/net/cpfl/cpfl_rxtx.h\n@@ -0,0 +1,25 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _CPFL_RXTX_H_\n+#define _CPFL_RXTX_H_\n+\n+#include <idpf_common_rxtx.h>\n+#include \"cpfl_ethdev.h\"\n+\n+/* In QLEN must be whole number of 32 descriptors. */\n+#define CPFL_ALIGN_RING_DESC\t32\n+#define CPFL_MIN_RING_DESC\t32\n+#define CPFL_MAX_RING_DESC\t4096\n+#define CPFL_DMA_MEM_ALIGN\t4096\n+/* Base address of the HW descriptor ring should be 128B aligned. */\n+#define CPFL_RING_BASE_ALIGN\t128\n+\n+#define CPFL_DEFAULT_TX_RS_THRESH\t32\n+#define CPFL_DEFAULT_TX_FREE_THRESH\t32\n+\n+int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_txconf *tx_conf);\n+#endif /* _CPFL_RXTX_H_ */\ndiff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build\nindex 106cc97e60..3ccee15703 100644\n--- a/drivers/net/cpfl/meson.build\n+++ b/drivers/net/cpfl/meson.build\n@@ -11,4 +11,5 @@ deps += ['common_idpf']\n \n sources = files(\n         'cpfl_ethdev.c',\n+        'cpfl_rxtx.c',\n )\n\\ No newline at end of file\n",
    "prefixes": [
        "02/21"
    ]
}