[v1] net/ice: revert the TSO fake fix
Checks
Commit Message
The two fixes are not the real root cause for MDD event, it mitigates
the failure rate when different test mode, so revert them.
Fixes: 2a0c9ae4f646 ("net/ice: fix TCP checksum offload")
Fixes: 7365a3cee51f ("net/ice: calculate TCP header size for offload")
Cc: stable@dpdk.org
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
drivers/net/ice/ice_rxtx.c | 32 ++------------------------------
1 file changed, 2 insertions(+), 30 deletions(-)
Comments
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Friday, July 31, 2020 2:00 PM
> To: dev@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: Xu, Ting <ting.xu@intel.com>; Sun, Chenmin <chenmin.sun@intel.com>;
> Wang, Yinan <yinan.wang@intel.com>; Wang, Haiyue
> <haiyue.wang@intel.com>; stable@dpdk.org
> Subject: [PATCH v1] net/ice: revert the TSO fake fix
>
> The two fixes are not the real root cause for MDD event, it mitigates the failure
> rate when different test mode, so revert them.
>
> Fixes: 2a0c9ae4f646 ("net/ice: fix TCP checksum offload")
> Fixes: 7365a3cee51f ("net/ice: calculate TCP header size for offload")
> Cc: stable@dpdk.org
>
> Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Qi zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
@@ -2234,7 +2234,7 @@ ice_txd_enable_checksum(uint64_t ol_flags,
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (tx_offload.l4_len >> 2) <<
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_SCTP_CKSUM:
@@ -2371,28 +2371,6 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
return count;
}
-/* Calculate TCP header length for PKT_TX_TCP_CKSUM if not provided */
-static inline uint16_t
-ice_calc_pkt_tcp_hdr(struct rte_mbuf *tx_pkt, union ice_tx_offload tx_offload)
-{
- uint16_t tcpoff = tx_offload.l2_len + tx_offload.l3_len;
- const struct rte_tcp_hdr *tcp_hdr;
- struct rte_tcp_hdr _tcp_hdr;
-
- if (tcpoff + sizeof(struct rte_tcp_hdr) < tx_pkt->data_len) {
- tcp_hdr = rte_pktmbuf_mtod_offset(tx_pkt, struct rte_tcp_hdr *,
- tcpoff);
-
- return (tcp_hdr->data_off & 0xf0) >> 2;
- }
-
- tcp_hdr = rte_pktmbuf_read(tx_pkt, tcpoff, sizeof(_tcp_hdr), &_tcp_hdr);
- if (tcp_hdr)
- return (tcp_hdr->data_off & 0xf0) >> 2;
- else
- return 0;
-}
-
uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -2491,15 +2469,9 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
&cd_tunneling_params);
/* Enable checksum offloading */
- if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM &&
- !tx_offload.l4_len)
- tx_offload.l4_len =
- ice_calc_pkt_tcp_hdr(tx_pkt, tx_offload);
-
+ if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
ice_txd_enable_checksum(ol_flags, &td_cmd,
&td_offset, tx_offload);
- }
if (nb_ctx) {
/* Setup TX context descriptor if required */