[v3,2/2] app/testpmd: fix UDP cksum error for UFO enable
Checks
Commit Message
The command "tso set <tso_segsz> <port_id>" is used to enable UFO, please
see commit ce8e6e742807 ("app/testpmd: support UFO in checksum engine")
The above patch sets the RTE_MBUF_F_TX_UDP_SEG in mbuf ol_flags, only
by checking if 'tso_segsz' is set, but missing check if UFO offload
(RTE_ETH_TX_OFFLOAD_UDP_TSO) supported by device. The RTE_MBUF_F_TX_UDP_SEG
flag causes driver that supports TSO to compute pseudo header checksum.
As a result, if device only supports TSO, but not UFO, UDP packet checksum
will be wrong.
So enabling UFO also depends on if driver has RTE_ETH_TX_OFFLOAD_UDP_TSO
capability. Similarly, TSO also need to do like this.
Note: all offloads about tunnel TSO are added in process_inner_cksums() in
case of the impact on tunnel TSO.
In addition, this patch also fixes cmd_tso_set_parsed() for UFO to make
it better to support TSO and UFO.
Fixes: ce8e6e742807 ("app/testpmd: support UFO in checksum engine")
Cc: stable@dpdk.org
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
---
app/test-pmd/cmdline.c | 31 +++++++++++++++++++++----------
app/test-pmd/csumonly.c | 11 +++++++++--
2 files changed, 30 insertions(+), 12 deletions(-)
@@ -4933,6 +4933,7 @@ cmd_tso_set_parsed(void *parsed_result,
{
struct cmd_tso_set_result *res = parsed_result;
struct rte_eth_dev_info dev_info;
+ uint64_t offloads;
int ret;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
@@ -4949,21 +4950,31 @@ cmd_tso_set_parsed(void *parsed_result,
if (ret != 0)
return;
- if ((ports[res->port_id].tso_segsz != 0) &&
- (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
- fprintf(stderr, "Error: TSO is not supported by port %d\n",
- res->port_id);
- return;
+ if (ports[res->port_id].tso_segsz != 0) {
+ if ((dev_info.tx_offload_capa & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TSO)) == 0) {
+ fprintf(stderr, "Error: both TSO and UFO are not supported by port %d\n",
+ res->port_id);
+ return;
+ }
+ /* display warnings if configuration is not supported by the NIC */
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0)
+ printf("Warning: port %d doesn't support TSO\n", res->port_id);
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TSO) == 0)
+ printf("Warning: port %d doesn't support UFO\n", res->port_id);
}
if (ports[res->port_id].tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
- ~RTE_ETH_TX_OFFLOAD_TCP_TSO;
- printf("TSO for non-tunneled packets is disabled\n");
+ ~(RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO);
+ printf("TSO and UFO for non-tunneled packets is disabled\n");
} else {
- ports[res->port_id].dev_conf.txmode.offloads |=
- RTE_ETH_TX_OFFLOAD_TCP_TSO;
- printf("TSO segment size for non-tunneled packets is %d\n",
+ offloads = (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) ?
+ RTE_ETH_TX_OFFLOAD_TCP_TSO : 0;
+ offloads |= (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TSO) ?
+ RTE_ETH_TX_OFFLOAD_UDP_TSO : 0;
+ ports[res->port_id].dev_conf.txmode.offloads |= offloads;
+ printf("segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
cmd_config_queue_tx_offloads(&ports[res->port_id]);
@@ -466,6 +466,12 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
uint64_t ol_flags = 0;
uint32_t max_pkt_len, tso_segsz = 0;
uint16_t l4_off;
+ uint64_t all_tunnel_tso = RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO;
/* ensure packet is large enough to require tso */
if (!info->is_tunnel) {
@@ -505,7 +511,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
- if (tso_segsz)
+ if (tso_segsz && (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO))
ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
else if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
@@ -528,7 +534,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
#endif
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
- if (tso_segsz)
+ if (tso_segsz &&
+ (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO | all_tunnel_tso)))
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;