get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131658/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131658,
    "url": "http://patchwork.dpdk.org/api/patches/131658/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230920062236.375308-2-simei.su@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230920062236.375308-2-simei.su@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230920062236.375308-2-simei.su@intel.com",
    "date": "2023-09-20T06:22:26",
    "name": "[v5,01/11] common/idpf: refactor single queue Tx data path",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "70ae2108b28f8ab675c2d9fcf22d4fa1a6a29cf6",
    "submitter": {
        "id": 1298,
        "url": "http://patchwork.dpdk.org/api/people/1298/?format=api",
        "name": "Simei Su",
        "email": "simei.su@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patchwork.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230920062236.375308-2-simei.su@intel.com/mbox/",
    "series": [
        {
            "id": 29562,
            "url": "http://patchwork.dpdk.org/api/series/29562/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29562",
            "date": "2023-09-20T06:22:25",
            "name": "update idpf base code",
            "version": 5,
            "mbox": "http://patchwork.dpdk.org/series/29562/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/131658/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/131658/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DC2A2425E9;\n\tWed, 20 Sep 2023 08:22:22 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id BA51F406FF;\n\tWed, 20 Sep 2023 08:22:19 +0200 (CEST)",
            "from mgamail.intel.com (mgamail.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id 8CB6E4067E\n for <dev@dpdk.org>; Wed, 20 Sep 2023 08:22:16 +0200 (CEST)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 19 Sep 2023 23:22:15 -0700",
            "from dpdk-simei-icelake.sh.intel.com ([10.67.110.167])\n by orsmga005.jf.intel.com with ESMTP; 19 Sep 2023 23:22:13 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1695190937; x=1726726937;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=m+IMsHG98LbjDQPwKiHHKbmvUyW+mX/3lTAY3rMs61M=;\n b=n9ZVDQ1LlMIQqL9+N33J63U5utQsa5YDo9B5giAZQdlNxILr+WmipcxD\n 5QdjDgB3p6u3gcazRlUDisj1JPEu1X+DNotlqHc+BDEcbCSmJ3aje9osr\n nI5UkEvXSFO/3bdXEPCZUp6RkJtp+j5u/N1PuSBYMzyPezJOk7lulKJea\n lCwtVgqKFRW92V/4UkSBWpcvNgVlSZ6ZCwHzAjvwSRbQPOqSskRif9f+h\n MMvaGaMPmHVubwd7OJx5dD+a+i3avTKr35+pSRPwBOfYCWGWT7CVosWOf\n vdQiQ/mbcUrJfaRV8I5qXa/Oof+R8fPFiMUht40TiXsJgs/ilVU0zb+xU w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6600,9927,10838\"; a=\"466453221\"",
            "E=Sophos;i=\"6.02,161,1688454000\"; d=\"scan'208\";a=\"466453221\"",
            "E=McAfee;i=\"6600,9927,10838\"; a=\"920154550\"",
            "E=Sophos;i=\"6.02,161,1688454000\"; d=\"scan'208\";a=\"920154550\""
        ],
        "X-ExtLoop1": "1",
        "From": "Simei Su <simei.su@intel.com>",
        "To": "jingjing.wu@intel.com,\n\tbeilei.xing@intel.com,\n\tqi.z.zhang@intel.com",
        "Cc": "dev@dpdk.org, wenjun1.wu@intel.com, mingxia.liu@intel.com,\n wenjing.qiao@intel.com, Simei Su <simei.su@intel.com>",
        "Subject": "[PATCH v5 01/11] common/idpf: refactor single queue Tx data path",
        "Date": "Wed, 20 Sep 2023 14:22:26 +0800",
        "Message-Id": "<20230920062236.375308-2-simei.su@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230920062236.375308-1-simei.su@intel.com>",
        "References": "<20230918021130.192982-1-simei.su@intel.com>\n <20230920062236.375308-1-simei.su@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Currently, single queue Tx data path uses flex Tx data\ndescriptor(DTYPE3) which is removed in the latest idpf spec.\nThis patch replaces flex Tx data descriptor with base Tx data\ndescriptor for single queue Tx data path and refines Tx single\nqueue setup to align with Tx data path.\n\nSigned-off-by: Simei Su <simei.su@intel.com>\nAcked-by: Wenjun Wu <wenjun1.wu@intel.com>\nAcked-by: Beilei Xing <beilei.xing@intel.com>\n---\n drivers/common/idpf/idpf_common_rxtx.c        | 39 +++++++++----------\n drivers/common/idpf/idpf_common_rxtx.h        |  2 +-\n drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +++++++++---------\n drivers/net/cpfl/cpfl_rxtx.c                  |  2 +-\n drivers/net/idpf/idpf_rxtx.c                  |  2 +-\n 5 files changed, 40 insertions(+), 42 deletions(-)",
    "diff": "diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c\nindex fc87e3e243..e6d2486272 100644\n--- a/drivers/common/idpf/idpf_common_rxtx.c\n+++ b/drivers/common/idpf/idpf_common_rxtx.c\n@@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)\n \t}\n \n \ttxe = txq->sw_ring;\n-\tsize = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;\n+\tsize = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;\n \tfor (i = 0; i < size; i++)\n \t\t((volatile char *)txq->tx_ring)[i] = 0;\n \n \tprev = (uint16_t)(txq->nb_tx_desc - 1);\n \tfor (i = 0; i < txq->nb_tx_desc; i++) {\n-\t\ttxq->tx_ring[i].qw1.cmd_dtype =\n-\t\t\trte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);\n+\t\ttxq->tx_ring[i].qw1 =\n+\t\t\trte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);\n \t\ttxe[i].mbuf =  NULL;\n \t\ttxe[i].last_id = i;\n \t\ttxe[prev].next_id = i;\n@@ -1307,17 +1307,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)\n \tuint16_t nb_tx_to_clean;\n \tuint16_t i;\n \n-\tvolatile struct idpf_flex_tx_desc *txd = txq->tx_ring;\n+\tvolatile struct idpf_base_tx_desc *txd = txq->tx_ring;\n \n \tdesc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);\n \tif (desc_to_clean_to >= nb_tx_desc)\n \t\tdesc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);\n \n \tdesc_to_clean_to = sw_ring[desc_to_clean_to].last_id;\n-\t/* In the writeback Tx desccriptor, the only significant fields are the 4-bit DTYPE */\n-\tif ((txd[desc_to_clean_to].qw1.cmd_dtype &\n-\t     rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=\n-\t    rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {\n+\tif ((txd[desc_to_clean_to].qw1 &\n+\t     rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=\n+\t    rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {\n \t\tTX_LOG(DEBUG, \"TX descriptor %4u is not done \"\n \t\t       \"(port=%d queue=%d)\", desc_to_clean_to,\n \t\t       txq->port_id, txq->queue_id);\n@@ -1331,10 +1330,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)\n \t\tnb_tx_to_clean = (uint16_t)(desc_to_clean_to -\n \t\t\t\t\t    last_desc_cleaned);\n \n-\ttxd[desc_to_clean_to].qw1.cmd_dtype = 0;\n-\ttxd[desc_to_clean_to].qw1.buf_size = 0;\n-\tfor (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)\n-\t\ttxd[desc_to_clean_to].qw1.flex.raw[i] = 0;\n+\ttxd[desc_to_clean_to].qw1 = 0;\n \n \ttxq->last_desc_cleaned = desc_to_clean_to;\n \ttxq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);\n@@ -1347,8 +1343,8 @@ uint16_t\n idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t  uint16_t nb_pkts)\n {\n-\tvolatile struct idpf_flex_tx_desc *txd;\n-\tvolatile struct idpf_flex_tx_desc *txr;\n+\tvolatile struct idpf_base_tx_desc *txd;\n+\tvolatile struct idpf_base_tx_desc *txr;\n \tunion idpf_tx_offload tx_offload = {0};\n \tstruct idpf_tx_entry *txe, *txn;\n \tstruct idpf_tx_entry *sw_ring;\n@@ -1356,6 +1352,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tstruct rte_mbuf *tx_pkt;\n \tstruct rte_mbuf *m_seg;\n \tuint64_t buf_dma_addr;\n+\tuint32_t td_offset;\n \tuint64_t ol_flags;\n \tuint16_t tx_last;\n \tuint16_t nb_used;\n@@ -1382,6 +1379,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \n \tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n \t\ttd_cmd = 0;\n+\t\ttd_offset = 0;\n \n \t\ttx_pkt = *tx_pkts++;\n \t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n@@ -1462,9 +1460,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\tslen = m_seg->data_len;\n \t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n \t\t\ttxd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);\n-\t\t\ttxd->qw1.buf_size = slen;\n-\t\t\ttxd->qw1.cmd_dtype = rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_DATA <<\n-\t\t\t\t\t\t\t      IDPF_FLEX_TXD_QW1_DTYPE_S);\n+\t\t\ttxd->qw1 = rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DATA |\n+\t\t\t\t((uint64_t)td_cmd  << IDPF_TXD_QW1_CMD_S) |\n+\t\t\t\t((uint64_t)td_offset << IDPF_TXD_QW1_OFFSET_S) |\n+\t\t\t\t((uint64_t)slen << IDPF_TXD_QW1_TX_BUF_SZ_S));\n \n \t\t\ttxe->last_id = tx_last;\n \t\t\ttx_id = txe->next_id;\n@@ -1473,7 +1472,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t} while (m_seg);\n \n \t\t/* The last packet data descriptor needs End Of Packet (EOP) */\n-\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_EOP;\n+\t\ttd_cmd |= IDPF_TX_DESC_CMD_EOP;\n \t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n \t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n \n@@ -1482,7 +1481,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t       \"%4u (port=%d queue=%d)\",\n \t\t\t       tx_last, txq->port_id, txq->queue_id);\n \n-\t\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_RS;\n+\t\t\ttd_cmd |= IDPF_TX_DESC_CMD_RS;\n \n \t\t\t/* Update txq RS bit counters */\n \t\t\ttxq->nb_used = 0;\n@@ -1491,7 +1490,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\tif (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)\n \t\t\ttd_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;\n \n-\t\ttxd->qw1.cmd_dtype |= rte_cpu_to_le_16(td_cmd << IDPF_FLEX_TXD_QW1_CMD_S);\n+\t\ttxd->qw1 |= rte_cpu_to_le_16(td_cmd << IDPF_TXD_QW1_CMD_S);\n \t}\n \n end_of_tx:\ndiff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h\nindex 6cb83fc0a6..b49b1ed737 100644\n--- a/drivers/common/idpf/idpf_common_rxtx.h\n+++ b/drivers/common/idpf/idpf_common_rxtx.h\n@@ -157,7 +157,7 @@ struct idpf_tx_entry {\n /* Structure associated with each TX queue. */\n struct idpf_tx_queue {\n \tconst struct rte_memzone *mz;\t\t/* memzone for Tx ring */\n-\tvolatile struct idpf_flex_tx_desc *tx_ring;\t/* Tx ring virtual address */\n+\tvolatile struct idpf_base_tx_desc *tx_ring;\t/* Tx ring virtual address */\n \tvolatile union {\n \t\tstruct idpf_flex_tx_sched_desc *desc_ring;\n \t\tstruct idpf_splitq_tx_compl_desc *compl_ring;\ndiff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c\nindex 2ac46fb1d2..f65e8d512b 100644\n--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c\n+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c\n@@ -1005,7 +1005,7 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq)\n \tstruct rte_mbuf *m, *free[txq->rs_thresh];\n \n \t/* check DD bits on threshold descriptor */\n-\tif ((txq->tx_ring[txq->next_dd].qw1.cmd_dtype &\n+\tif ((txq->tx_ring[txq->next_dd].qw1 &\n \t\t\trte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=\n \t\t\trte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE))\n \t\treturn 0;\n@@ -1113,15 +1113,14 @@ tx_backlog_entry_avx512(struct idpf_tx_vec_entry *txep,\n \t\ttxep[i].mbuf = tx_pkts[i];\n }\n \n-#define IDPF_FLEX_TXD_QW1_BUF_SZ_S 48\n static __rte_always_inline void\n-idpf_singleq_vtx1(volatile struct idpf_flex_tx_desc *txdp,\n+idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,\n \t  struct rte_mbuf *pkt, uint64_t flags)\n {\n \tuint64_t high_qw =\n-\t\t(IDPF_TX_DESC_DTYPE_FLEX_DATA << IDPF_FLEX_TXD_QW1_DTYPE_S |\n-\t\t ((uint64_t)flags  << IDPF_FLEX_TXD_QW1_CMD_S) |\n-\t\t ((uint64_t)pkt->data_len << IDPF_FLEX_TXD_QW1_BUF_SZ_S));\n+\t\t(IDPF_TX_DESC_DTYPE_DATA |\n+\t\t ((uint64_t)flags  << IDPF_TXD_QW1_CMD_S) |\n+\t\t ((uint64_t)pkt->data_len << IDPF_TXD_QW1_TX_BUF_SZ_S));\n \n \t__m128i descriptor = _mm_set_epi64x(high_qw,\n \t\t\t\t\t    pkt->buf_iova + pkt->data_off);\n@@ -1131,11 +1130,11 @@ idpf_singleq_vtx1(volatile struct idpf_flex_tx_desc *txdp,\n #define IDPF_TX_LEN_MASK 0xAA\n #define IDPF_TX_OFF_MASK 0x55\n static __rte_always_inline void\n-idpf_singleq_vtx(volatile struct idpf_flex_tx_desc *txdp,\n+idpf_singleq_vtx(volatile struct idpf_base_tx_desc *txdp,\n \t struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)\n {\n-\tconst uint64_t hi_qw_tmpl = (IDPF_TX_DESC_DTYPE_FLEX_DATA  |\n-\t\t\t((uint64_t)flags  << IDPF_FLEX_TXD_QW1_CMD_S));\n+\tconst uint64_t hi_qw_tmpl = (IDPF_TX_DESC_DTYPE_DATA  |\n+\t\t\t((uint64_t)flags  << IDPF_TXD_QW1_CMD_S));\n \n \t/* if unaligned on 32-bit boundary, do one to align */\n \tif (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {\n@@ -1148,19 +1147,19 @@ idpf_singleq_vtx(volatile struct idpf_flex_tx_desc *txdp,\n \t\tuint64_t hi_qw3 =\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[3]->data_len <<\n-\t\t\t IDPF_FLEX_TXD_QW1_BUF_SZ_S);\n+\t\t\t IDPF_TXD_QW1_TX_BUF_SZ_S);\n \t\tuint64_t hi_qw2 =\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[2]->data_len <<\n-\t\t\t IDPF_FLEX_TXD_QW1_BUF_SZ_S);\n+\t\t\t IDPF_TXD_QW1_TX_BUF_SZ_S);\n \t\tuint64_t hi_qw1 =\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[1]->data_len <<\n-\t\t\t IDPF_FLEX_TXD_QW1_BUF_SZ_S);\n+\t\t\t IDPF_TXD_QW1_TX_BUF_SZ_S);\n \t\tuint64_t hi_qw0 =\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[0]->data_len <<\n-\t\t\t IDPF_FLEX_TXD_QW1_BUF_SZ_S);\n+\t\t\t IDPF_TXD_QW1_TX_BUF_SZ_S);\n \n \t\t__m512i desc0_3 =\n \t\t\t_mm512_set_epi64\n@@ -1187,11 +1186,11 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk\n \t\t\t\t\t uint16_t nb_pkts)\n {\n \tstruct idpf_tx_queue *txq = tx_queue;\n-\tvolatile struct idpf_flex_tx_desc *txdp;\n+\tvolatile struct idpf_base_tx_desc *txdp;\n \tstruct idpf_tx_vec_entry *txep;\n \tuint16_t n, nb_commit, tx_id;\n-\tuint64_t flags = IDPF_TX_FLEX_DESC_CMD_EOP;\n-\tuint64_t rs = IDPF_TX_FLEX_DESC_CMD_RS | flags;\n+\tuint64_t flags = IDPF_TX_DESC_CMD_EOP;\n+\tuint64_t rs = IDPF_TX_DESC_CMD_RS | flags;\n \n \t/* cross rx_thresh boundary is not allowed */\n \tnb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);\n@@ -1238,9 +1237,9 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk\n \n \ttx_id = (uint16_t)(tx_id + nb_commit);\n \tif (tx_id > txq->next_rs) {\n-\t\ttxq->tx_ring[txq->next_rs].qw1.cmd_dtype |=\n-\t\t\trte_cpu_to_le_64(((uint64_t)IDPF_TX_FLEX_DESC_CMD_RS) <<\n-\t\t\t\t\t IDPF_FLEX_TXD_QW1_CMD_S);\n+\t\ttxq->tx_ring[txq->next_rs].qw1 |=\n+\t\t\trte_cpu_to_le_64(((uint64_t)IDPF_TX_DESC_CMD_RS) <<\n+\t\t\t\t\t IDPF_TXD_QW1_CMD_S);\n \t\ttxq->next_rs =\n \t\t\t(uint16_t)(txq->next_rs + txq->rs_thresh);\n \t}\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c\nindex 2ef6871a85..ab8bec4645 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.c\n+++ b/drivers/net/cpfl/cpfl_rxtx.c\n@@ -135,7 +135,7 @@ cpfl_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_sched_desc),\n \t\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n \t\telse\n-\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_desc),\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_base_tx_desc),\n \t\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n \t\tmemcpy(ring_name, \"cpfl Tx ring\", sizeof(\"cpfl Tx ring\"));\n \t\tbreak;\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex 3e3d81ca6d..64f2235580 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -74,7 +74,7 @@ idpf_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_sched_desc),\n \t\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n \t\telse\n-\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_flex_tx_desc),\n+\t\t\tring_size = RTE_ALIGN(len * sizeof(struct idpf_base_tx_desc),\n \t\t\t\t\t      IDPF_DMA_MEM_ALIGN);\n \t\trte_memcpy(ring_name, \"idpf Tx ring\", sizeof(\"idpf Tx ring\"));\n \t\tbreak;\n",
    "prefixes": [
        "v5",
        "01/11"
    ]
}