get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119021/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119021,
    "url": "http://patchwork.dpdk.org/api/patches/119021/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221024131227.1062446-17-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221024131227.1062446-17-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221024131227.1062446-17-junfeng.guo@intel.com",
    "date": "2022-10-24T13:12:25",
    "name": "[v11,16/18] net/idpf: add support for Tx offloading",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "f1fa648845a4e6b692aaf3fac49109259909a428",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221024131227.1062446-17-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25386,
            "url": "http://patchwork.dpdk.org/api/series/25386/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25386",
            "date": "2022-10-24T13:12:09",
            "name": "add support for idpf PMD in DPDK",
            "version": 11,
            "mbox": "http://patchwork.dpdk.org/series/25386/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/119021/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/119021/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 086FCA034C;\n\tMon, 24 Oct 2022 15:16:05 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A2A2F42BEF;\n\tMon, 24 Oct 2022 15:14:50 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by mails.dpdk.org (Postfix) with ESMTP id 39F4142BEF\n for <dev@dpdk.org>; Mon, 24 Oct 2022 15:14:43 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 24 Oct 2022 06:14:42 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by orsmga001.jf.intel.com with ESMTP; 24 Oct 2022 06:14:40 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666617283; x=1698153283;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=Dwkh6vObO9Rg2seQBJah7nude05Ie2zWClgvmv3vlZ0=;\n b=GsHKWUfgay14D9fBLVABHxcSjL1CoMiT2JkX1lMQZR47obAX7dY7ClS3\n G0rYXVBeC90DZR86hwUHJFt7yfqxqvs2uaT76DUqYf1o0lixudJ/fN/e6\n lDgPaOiBE1gMRy8M9LUPEAxTCnhqrtIwQCyT3bj96mBJRQygGChQ7NH8J\n fld5oIWd+R9FVRtiwT9DeWPPjlL6Q0QtPelMFyYxGp1nASyko1Cw9EkHS\n UJiWJ7UjIi8IjG6+rOkvQKifwmN8TFVzdDWAjKR04u7q9WcWNahgmRoW3\n Ra/detpYxLcWEractjiuHzqWIK82y7agQOmykciFWNgdq2HuKEJ2uMk3V w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10510\"; a=\"309100081\"",
            "E=Sophos;i=\"5.95,209,1661842800\"; d=\"scan'208\";a=\"309100081\"",
            "E=McAfee;i=\"6500,9779,10510\"; a=\"664540084\"",
            "E=Sophos;i=\"5.95,209,1661842800\"; d=\"scan'208\";a=\"664540084\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH v11 16/18] net/idpf: add support for Tx offloading",
        "Date": "Mon, 24 Oct 2022 21:12:25 +0800",
        "Message-Id": "<20221024131227.1062446-17-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221024131227.1062446-1-junfeng.guo@intel.com>",
        "References": "<20221024130134.1046536-2-junfeng.guo@intel.com>\n <20221024131227.1062446-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add Tx offloading support:\n - support TSO\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n doc/guides/nics/features/idpf.ini |   1 +\n drivers/net/idpf/idpf_ethdev.c    |   4 +-\n drivers/net/idpf/idpf_rxtx.c      | 134 +++++++++++++++++++++++++++++-\n drivers/net/idpf/idpf_rxtx.h      |  22 +++++\n 4 files changed, 158 insertions(+), 3 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini\nindex c86d9378ea..47c686762d 100644\n--- a/doc/guides/nics/features/idpf.ini\n+++ b/doc/guides/nics/features/idpf.ini\n@@ -9,6 +9,7 @@\n [Features]\n Queue start/stop     = Y\n MTU update           = Y\n+TSO                  = P\n L3 checksum offload  = P\n L4 checksum offload  = P\n Packet type parsing  = Y\ndiff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex d8cc423a23..21315866bf 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -102,7 +102,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\tRTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM\t|\n \t\tRTE_ETH_RX_OFFLOAD_RSS_HASH;\n \n-\tdev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;\n+\tdev_info->tx_offload_capa =\n+\t\tRTE_ETH_TX_OFFLOAD_TCP_TSO\t\t|\n+\t\tRTE_ETH_TX_OFFLOAD_MULTI_SEGS;\n \n \tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n \t\t.rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH,\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex 143c8b69f3..8f82cf1b59 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -1549,6 +1549,49 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)\n \tcq->tx_tail = next;\n }\n \n+/* Check if the context descriptor is needed for TX offloading */\n+static inline uint16_t\n+idpf_calc_context_desc(uint64_t flags)\n+{\n+\tif ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n+\t\treturn 1;\n+\n+\treturn 0;\n+}\n+\n+/* set TSO context descriptor\n+ */\n+static inline void\n+idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,\n+\t\t\tunion idpf_tx_offload tx_offload,\n+\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_desc)\n+{\n+\tuint16_t cmd_dtype;\n+\tuint32_t tso_len;\n+\tuint8_t hdr_len;\n+\n+\tif (tx_offload.l4_len == 0) {\n+\t\tPMD_TX_LOG(DEBUG, \"L4 length set to 0\");\n+\t\treturn;\n+\t}\n+\n+\thdr_len = tx_offload.l2_len +\n+\t\ttx_offload.l3_len +\n+\t\ttx_offload.l4_len;\n+\tcmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |\n+\t\tIDPF_TX_FLEX_CTX_DESC_CMD_TSO;\n+\ttso_len = mbuf->pkt_len - hdr_len;\n+\n+\tctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);\n+\tctx_desc->tso.qw0.hdr_len = hdr_len;\n+\tctx_desc->tso.qw0.mss_rt =\n+\t\trte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &\n+\t\t\t\t IDPF_TXD_FLEX_CTX_MSS_RT_M);\n+\tctx_desc->tso.qw0.flex_tlen =\n+\t\trte_cpu_to_le_32(tso_len &\n+\t\t\t\t IDPF_TXD_FLEX_CTX_MSS_RT_M);\n+}\n+\n uint16_t\n idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t      uint16_t nb_pkts)\n@@ -1557,11 +1600,14 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tvolatile struct idpf_flex_tx_sched_desc *txr;\n \tvolatile struct idpf_flex_tx_sched_desc *txd;\n \tstruct idpf_tx_entry *sw_ring;\n+\tunion idpf_tx_offload tx_offload = {0};\n \tstruct idpf_tx_entry *txe, *txn;\n \tuint16_t nb_used, tx_id, sw_id;\n \tstruct rte_mbuf *tx_pkt;\n \tuint16_t nb_to_clean;\n \tuint16_t nb_tx = 0;\n+\tuint64_t ol_flags;\n+\tuint16_t nb_ctx;\n \n \tif (unlikely(txq == NULL) || unlikely(!txq->q_started))\n \t\treturn nb_tx;\n@@ -1591,7 +1637,29 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \n \t\tif (txq->nb_free < tx_pkt->nb_segs)\n \t\t\tbreak;\n-\t\tnb_used = tx_pkt->nb_segs;\n+\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t/* Calculate the number of context descriptors needed. */\n+\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n+\t\tnb_used = tx_pkt->nb_segs + nb_ctx;\n+\n+\t\t/* context descriptor */\n+\t\tif (nb_ctx != 0) {\n+\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_desc =\n+\t\t\t(volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];\n+\n+\t\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n+\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n+\t\t\t\t\t\t\tctx_desc);\n+\n+\t\t\ttx_id++;\n+\t\t\tif (tx_id == txq->nb_tx_desc)\n+\t\t\t\ttx_id = 0;\n+\t\t}\n \n \t\tdo {\n \t\t\ttxd = &txr[tx_id];\n@@ -1842,14 +1910,17 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n {\n \tvolatile struct idpf_flex_tx_desc *txd;\n \tvolatile struct idpf_flex_tx_desc *txr;\n+\tunion idpf_tx_offload tx_offload = {0};\n \tstruct idpf_tx_entry *txe, *txn;\n \tstruct idpf_tx_entry *sw_ring;\n \tstruct idpf_tx_queue *txq;\n \tstruct rte_mbuf *tx_pkt;\n \tstruct rte_mbuf *m_seg;\n \tuint64_t buf_dma_addr;\n+\tuint64_t ol_flags;\n \tuint16_t tx_last;\n \tuint16_t nb_used;\n+\tuint16_t nb_ctx;\n \tuint16_t td_cmd;\n \tuint16_t tx_id;\n \tuint16_t nb_tx;\n@@ -1876,11 +1947,19 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\ttx_pkt = *tx_pkts++;\n \t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n \n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t/* Calculate the number of context descriptors needed. */\n+\t\tnb_ctx = idpf_calc_context_desc(ol_flags);\n+\n \t\t/* The number of descriptors that must be allocated for\n \t\t * a packet equals to the number of the segments of that\n \t\t * packet plus 1 context descriptor if needed.\n \t\t */\n-\t\tnb_used = (uint16_t)(tx_pkt->nb_segs);\n+\t\tnb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);\n \t\ttx_last = (uint16_t)(tx_id + nb_used - 1);\n \n \t\t/* Circular ring */\n@@ -1908,6 +1987,29 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t}\n \t\t}\n \n+\t\tif (nb_ctx != 0) {\n+\t\t\t/* Setup TX context descriptor if required */\n+\t\t\tvolatile union idpf_flex_tx_ctx_desc *ctx_txd =\n+\t\t\t\t(volatile union idpf_flex_tx_ctx_desc *)\n+\t\t\t\t\t\t\t&txr[tx_id];\n+\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\t\t\tif (txe->mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\t\ttxe->mbuf = NULL;\n+\t\t\t}\n+\n+\t\t\t/* TSO enabled */\n+\t\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)\n+\t\t\t\tidpf_set_splitq_tso_ctx(tx_pkt, tx_offload,\n+\t\t\t\t\t\t\tctx_txd);\n+\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t}\n+\n \t\tm_seg = tx_pkt;\n \t\tdo {\n \t\t\ttxd = &txr[tx_id];\n@@ -1968,16 +2070,44 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n \t       uint16_t nb_pkts)\n {\n \tint i, ret;\n+\tuint64_t ol_flags;\n \tstruct rte_mbuf *m;\n \n \tfor (i = 0; i < nb_pkts; i++) {\n \t\tm = tx_pkts[i];\n+\t\tol_flags = m->ol_flags;\n+\n+\t\t/* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */\n+\t\tif ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {\n+\t\t\tif (m->nb_segs > IDPF_TX_MAX_MTU_SEG) {\n+\t\t\t\trte_errno = EINVAL;\n+\t\t\t\treturn i;\n+\t\t\t}\n+\t\t} else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||\n+\t\t\t   (m->tso_segsz > IDPF_MAX_TSO_MSS) ||\n+\t\t\t   (m->pkt_len > IDPF_MAX_TSO_FRAME_SIZE)) {\n+\t\t\t/* MSS outside the range are considered malicious */\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn i;\n+\t\t}\n+\n+\t\tif ((ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) != 0) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn i;\n+\t\t}\n \n \t\tif (m->pkt_len < IDPF_MIN_FRAME_SIZE) {\n \t\t\trte_errno = EINVAL;\n \t\t\treturn i;\n \t\t}\n \n+#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n+\t\tret = rte_validate_tx_offload(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+#endif\n \t\tret = rte_net_intel_cksum_prepare(m);\n \t\tif (ret != 0) {\n \t\t\trte_errno = -ret;\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex eb0b230d3a..efb2734d85 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -21,6 +21,16 @@\n #define IDPF_DEFAULT_TX_RS_THRESH\t32\n #define IDPF_DEFAULT_TX_FREE_THRESH\t32\n \n+#define IDPF_MIN_TSO_MSS\t88\n+#define IDPF_MAX_TSO_MSS\t9728\n+#define IDPF_MAX_TSO_FRAME_SIZE\t262143\n+#define IDPF_TX_MAX_MTU_SEG     10\n+\n+#define IDPF_TX_OFFLOAD_MASK RTE_MBUF_F_TX_TCP_SEG\n+\n+#define IDPF_TX_OFFLOAD_NOTSUP_MASK \\\n+\t\t(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)\n+\n #define IDPF_GET_PTYPE_SIZE(p) \\\n \t(sizeof(struct virtchnl2_ptype) + \\\n \t(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))\n@@ -115,6 +125,18 @@ struct idpf_tx_queue {\n \tstruct idpf_tx_queue *complq;\n };\n \n+/* Offload features */\n+union idpf_tx_offload {\n+\tuint64_t data;\n+\tstruct {\n+\t\tuint64_t l2_len:7; /* L2 (MAC) Header Length. */\n+\t\tuint64_t l3_len:9; /* L3 (IP) Header Length. */\n+\t\tuint64_t l4_len:8; /* L4 Header Length. */\n+\t\tuint64_t tso_segsz:16; /* TCP TSO segment size */\n+\t\t/* uint64_t unused : 24; */\n+\t};\n+};\n+\n struct idpf_rxq_ops {\n \tvoid (*release_mbufs)(struct idpf_rx_queue *rxq);\n };\n",
    "prefixes": [
        "v11",
        "16/18"
    ]
}