From patchwork Wed Jan 18 06:00:35 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 122240 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 13A7F42409; Wed, 18 Jan 2023 07:04:39 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C19C742D47; Wed, 18 Jan 2023 07:04:21 +0100 (CET) Received: from smtpbguseast2.qq.com (smtpbguseast2.qq.com [54.204.34.130]) by mails.dpdk.org (Postfix) with ESMTP id 119C242D3D; Wed, 18 Jan 2023 07:04:17 +0100 (CET) X-QQ-mid: bizesmtp69t1674021855tod274mi Received: from wxdbg.localdomain.com ( [183.129.236.74]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 18 Jan 2023 14:04:14 +0800 (CST) X-QQ-SSF: 01400000000000H0X000B00A0000000 X-QQ-FEAT: eeGIu46hDGhIV7yDxmj+W/7zZ4Tz9Rv7h51IMJvSLUgdlpTtQEGgPZW2+acIG pIaP615ekUE39y+eAERq4sPMxa6c8fluCPvND/HnLcFonyzIFHyZFmWKdte8zpMxSgvneEB sMth1i/NvSmS+mX0i7YXcn+IkO3RQ5JjHfhp2NpeiZLNexVwwBwkOS35b0i+7Vytx3DrvLL WAUwCSy/OZimhPHxN+Ax7Bp6Z4l5FscXp2mJ/LbqxtwpBMoSXaqnhmudYBUC/TYABJ3OvAX /b0CG+/WGlDdH2LNOpzE9eSFijVPvT0Vaei0oTURi21IVl9inzWjKSA6n97EnpmD+4I7BW6 ZvaiFlnoziRgDYo2fI94zoJEFh35jje0QCX8fDOj+Gd64xeoinR0m+S25qnz9lh3UOTjJFU BOw1rv1qIjk= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu , stable@dpdk.org Subject: [PATCH 4/8] net/ngbe: fix packet type to parse from offload flags Date: Wed, 18 Jan 2023 14:00:35 +0800 Message-Id: <20230118060039.3074016-5-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20230118060039.3074016-1-jiawenwu@trustnetic.com> References: <20230118060039.3074016-1-jiawenwu@trustnetic.com> MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybglogicsvr:qybglogicsvr5 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org In some external applications, developers may fill in wrong packet_type in rte_mbuf for transmission. It will result in Tx ring hang when Tx checksum offload is on. So change it to parse from ol_flags. And remove redundant tunnel type since the NIC does not support it. Fixes: 9f3206140274 ("net/ngbe: support TSO") Cc: stable@dpdk.org Signed-off-by: Jiawen Wu --- drivers/net/ngbe/ngbe_rxtx.c | 87 +++++++++--------------------------- 1 file changed, 20 insertions(+), 67 deletions(-) diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c index 9fd24fa444..09312cf40d 100644 --- a/drivers/net/ngbe/ngbe_rxtx.c +++ b/drivers/net/ngbe/ngbe_rxtx.c @@ -24,15 +24,11 @@ /* Bit Mask to indicate what bits required for building Tx context */ static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM | - RTE_MBUF_F_TX_OUTER_IPV6 | - RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_IPV6 | RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG | - RTE_MBUF_F_TX_TUNNEL_MASK | - RTE_MBUF_F_TX_OUTER_IP_CKSUM | NGBE_TX_IEEE1588_TMST); #define NGBE_TX_OFFLOAD_NOTSUP_MASK \ @@ -333,34 +329,15 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq, } vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1); - - if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { - tx_offload_mask.outer_tun_len |= ~0; - tx_offload_mask.outer_l2_len |= ~0; - tx_offload_mask.outer_l3_len |= ~0; - tx_offload_mask.l2_len |= ~0; - tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1); - tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2); - - switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { - case RTE_MBUF_F_TX_TUNNEL_IPIP: - /* for non UDP / GRE tunneling, set to 0b */ - break; - default: - PMD_TX_LOG(ERR, "Tunnel type not supported"); - return; - } - vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len); - } else { - tunnel_seed = 0; - vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len); - } + vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len); if (ol_flags & RTE_MBUF_F_TX_VLAN) { tx_offload_mask.vlan_tci |= ~0; vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci); } + tunnel_seed = 0; + txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = tx_offload_mask.data[0] & tx_offload.data[0]; @@ -449,16 +426,10 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) return cmdtype; } -static inline uint8_t -tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) +static inline uint32_t +tx_desc_ol_flags_to_ptype(uint64_t oflags) { - bool tun; - - if (ptype) - return ngbe_encode_ptype(ptype); - - /* Only support flags in NGBE_TX_OFFLOAD_MASK */ - tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK); + uint32_t ptype; /* L2 level */ ptype = RTE_PTYPE_L2_ETHER; @@ -466,41 +437,34 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) ptype |= RTE_PTYPE_L2_ETHER_VLAN; /* L3 level */ - if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM)) - ptype |= RTE_PTYPE_L3_IPV4; - else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6)) - ptype |= RTE_PTYPE_L3_IPV6; - if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM)) - ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4); + ptype |= RTE_PTYPE_L3_IPV4; else if (oflags & (RTE_MBUF_F_TX_IPV6)) - ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6); + ptype |= RTE_PTYPE_L3_IPV6; /* L4 level */ switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) { case RTE_MBUF_F_TX_TCP_CKSUM: - ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); + ptype |= RTE_PTYPE_L4_TCP; break; case RTE_MBUF_F_TX_UDP_CKSUM: - ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP); + ptype |= RTE_PTYPE_L4_UDP; break; case RTE_MBUF_F_TX_SCTP_CKSUM: - ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP); + ptype |= RTE_PTYPE_L4_SCTP; break; } if (oflags & RTE_MBUF_F_TX_TCP_SEG) - ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); - - /* Tunnel */ - switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) { - case RTE_MBUF_F_TX_TUNNEL_IPIP: - case RTE_MBUF_F_TX_TUNNEL_IP: - ptype |= RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | - RTE_PTYPE_TUNNEL_IP; - break; - } + ptype |= RTE_PTYPE_L4_TCP; + + return ptype; +} + +static inline uint8_t +tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) +{ + ptype = tx_desc_ol_flags_to_ptype(oflags); return ngbe_encode_ptype(ptype); } @@ -629,9 +593,6 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.l4_len = tx_pkt->l4_len; tx_offload.vlan_tci = tx_pkt->vlan_tci; tx_offload.tso_segsz = tx_pkt->tso_segsz; - tx_offload.outer_l2_len = tx_pkt->outer_l2_len; - tx_offload.outer_l3_len = tx_pkt->outer_l3_len; - tx_offload.outer_tun_len = 0; /* If new context need be built or reuse the exist ctx*/ ctx = what_ctx_update(txq, tx_ol_req, tx_offload); @@ -752,10 +713,6 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ pkt_len -= (tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len); - pkt_len -= - (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) - ? tx_offload.outer_l2_len + - tx_offload.outer_l3_len : 0; } /* @@ -1939,12 +1896,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | - RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO | - RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO | - RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | - RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS; if (hw->is_pf)