[dpdk-dev,6/6] app/testpmd:test VxLAN Tx checksum offload
Commit Message
Add test cases in testpmd to test VxLAN Tx Checksum offlad, which include IP4 and IPV6 case,
and also include inner L3 and L4 test cases.
Signed-off-by: jijiangl <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jing Chen <jing.d.chen@intel.com>
---
app/test-pmd/config.c | 6 +-
app/test-pmd/csumonly.c | 194 ++++++++++++++++++++++++++++++++++++--
lib/librte_pmd_i40e/i40e_rxtx.c | 4 +-
3 files changed, 188 insertions(+), 16 deletions(-)
@@ -1719,9 +1719,9 @@ tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
uint16_t tx_ol_flags;
if (port_id_is_invalid(port_id))
return;
- /* Clear last 4 bits and then set L3/4 checksum mask again */
- tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
- ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags);
+ /* Clear last 8 bits and then set L3/4 checksum mask again */
+ tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFF00);
+ ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xff) | tx_ol_flags);
}
void
@@ -196,7 +196,6 @@ get_ipv6_udptcp_checksum(struct ipv6_hdr *ipv6_hdr, uint16_t *l4_hdr)
return (uint16_t)cksum;
}
-
/*
* Forwarding of packets. Change the checksum field with HW or SW methods
* The HW/SW method selection depends on the ol_flags on every packet
@@ -209,10 +208,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
struct rte_mbuf *mb;
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
+ struct ether_hdr *inner_eth_hdr;
+ struct ipv4_hdr *inner_ipv4_hdr = NULL;
struct ipv6_hdr *ipv6_hdr;
+ struct ipv6_hdr *inner_ipv6_hdr = NULL;
struct udp_hdr *udp_hdr;
+ struct udp_hdr *inner_udp_hdr;
struct tcp_hdr *tcp_hdr;
+ struct tcp_hdr *inner_tcp_hdr;
struct sctp_hdr *sctp_hdr;
+ struct sctp_hdr *inner_sctp_hdr;
uint16_t nb_rx;
uint16_t nb_tx;
@@ -221,12 +226,18 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint16_t pkt_ol_flags;
uint16_t tx_ol_flags;
uint16_t l4_proto;
+ uint16_t inner_l4_proto = 0;
uint16_t eth_type;
uint8_t l2_len;
uint8_t l3_len;
+ uint8_t inner_l2_len;
+ uint8_t inner_l3_len = 0;
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
+ uint8_t ipv4_tunnel;
+ uint8_t ipv6_tunnel;
+ uint16_t ptype;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@@ -261,8 +272,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
mb = pkts_burst[i];
l2_len = sizeof(struct ether_hdr);
pkt_ol_flags = mb->ol_flags;
+ ptype = mb->reserved;
ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
+ ipv4_tunnel = IS_ETH_IPV4_TUNNEL(ptype);
+ ipv6_tunnel = IS_ETH_IPV6_TUNNEL(ptype);
+
eth_hdr = (struct ether_hdr *) mb->pkt.data;
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
@@ -295,7 +310,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
* + ipv4 or ipv6
* + udp or tcp or sctp or others
*/
- if (pkt_ol_flags & PKT_RX_IPV4_HDR) {
+ if (pkt_ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) {
/* Do not support ipv4 option field */
l3_len = sizeof(struct ipv4_hdr) ;
@@ -325,15 +340,96 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (tx_ol_flags & 0x2) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
- /* Pseudo header sum need be set properly */
- udp_hdr->dgram_cksum = get_ipv4_psd_sum(ipv4_hdr);
+ if (ipv4_tunnel)
+ udp_hdr->dgram_cksum = 0;
+ else
+ /* Pseudo header sum need be set properly */
+ udp_hdr->dgram_cksum =
+ get_ipv4_psd_sum(ipv4_hdr);
}
else {
/* SW Implementation, clear checksum field first */
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
- (uint16_t*)udp_hdr);
+ (uint16_t *)udp_hdr);
}
+
+ if (ipv4_tunnel) {
+
+ uint16_t len;
+
+ /* Check if inner L3/L4 checkum flag is set */
+ if (tx_ol_flags & 0xF0)
+ ol_flags |= PKT_TX_VXLAN_CKSUM;
+
+ inner_l2_len = sizeof(struct ether_hdr);
+ inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len + l3_len
+ + ETHER_VXLAN_HLEN);
+
+ eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type);
+ if (eth_type == ETHER_TYPE_VLAN) {
+ ol_flags |= PKT_TX_IVLAN_PKT;
+ inner_l2_len += sizeof(struct vlan_hdr);
+ eth_type = rte_be_to_cpu_16(*(uint16_t *)
+ ((uintptr_t)ð_hdr->ether_type +
+ sizeof(struct vlan_hdr)));
+ }
+
+ len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len;
+ if (eth_type == ETHER_TYPE_IPv4) {
+ inner_l3_len = sizeof(struct ipv4_hdr);
+ inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv4_hdr->next_proto_id;
+
+ if (tx_ol_flags & 0x10)
+ /* Do not delete, this is required by HW*/
+ inner_ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_IPV4_CSUM;
+
+ } else if (eth_type == ETHER_TYPE_IPv6) {
+ inner_l3_len = sizeof(struct ipv6_hdr);
+ inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv6_hdr->proto;
+ }
+ if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
+
+ /* HW Offload */
+ ol_flags |= PKT_TX_UDP_CKSUM;
+ inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+
+ } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+ /* HW Offload */
+ ol_flags |= PKT_TX_TCP_CKSUM;
+ inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+ } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+ /* HW Offload */
+ ol_flags |= PKT_TX_SCTP_CKSUM;
+ inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ inner_sctp_hdr->cksum = 0;
+
+ /* Sanity check, only number of 4 bytes supported */
+ if ((rte_be_to_cpu_16(inner_ipv4_hdr->total_length) % 4) != 0)
+ printf("sctp payload must be a multiple "
+ "of 4 bytes for checksum offload");
+ }
+
+ mb->reserved = inner_l3_len;
+ }
+
}
else if (l4_proto == IPPROTO_TCP){
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
@@ -367,14 +463,11 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
/* End of L4 Handling*/
- }
- else if (pkt_ol_flags & PKT_RX_IPV6_HDR) {
-
+ } else if (pkt_ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) {
ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len);
l3_len = sizeof(struct ipv6_hdr) ;
l4_proto = ipv6_hdr->proto;
- ol_flags |= PKT_TX_IPV6;
if (l4_proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
@@ -382,15 +475,94 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (tx_ol_flags & 0x2) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
- udp_hdr->dgram_cksum = get_ipv6_psd_sum(ipv6_hdr);
+ if (ipv6_tunnel)
+ udp_hdr->dgram_cksum = 0;
+ else
+ udp_hdr->dgram_cksum =
+ get_ipv6_psd_sum(ipv6_hdr);
}
else {
/* SW Implementation */
/* checksum field need be clear first */
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum = get_ipv6_udptcp_checksum(ipv6_hdr,
- (uint16_t*)udp_hdr);
+ (uint16_t *)udp_hdr);
}
+
+ if (ipv6_tunnel) {
+
+ uint16_t len;
+
+ /* Check if inner L3/L4 checksum flag is set */
+ if (tx_ol_flags & 0xF0)
+ ol_flags |= PKT_TX_VXLAN_CKSUM;
+
+ inner_l2_len = sizeof(struct ether_hdr);
+ inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + l2_len + l3_len + ETHER_VXLAN_HLEN);
+ eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type);
+
+ if (eth_type == ETHER_TYPE_VLAN) {
+ ol_flags |= PKT_TX_IVLAN_PKT;
+ inner_l2_len += sizeof(struct vlan_hdr);
+ eth_type = rte_be_to_cpu_16(*(uint16_t *)
+ ((uintptr_t)ð_hdr->ether_type +
+ sizeof(struct vlan_hdr)));
+ }
+
+ len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len;
+
+ if (eth_type == ETHER_TYPE_IPv4) {
+ inner_l3_len = sizeof(struct ipv4_hdr);
+ inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv4_hdr->next_proto_id;
+
+ /* HW offload */
+ if (tx_ol_flags & 0x10)
+ /* Do not delete, this is required by HW*/
+ inner_ipv4_hdr->hdr_checksum = 0;
+ ol_flags |= PKT_TX_IPV4_CSUM;
+ } else if (eth_type == ETHER_TYPE_IPv6) {
+ inner_l3_len = sizeof(struct ipv6_hdr);
+ inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len);
+ inner_l4_proto = inner_ipv6_hdr->proto;
+ }
+
+ if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
+ inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ /* HW offload */
+ ol_flags |= PKT_TX_UDP_CKSUM;
+ inner_udp_hdr->dgram_cksum = 0;
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+ } else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
+ /* HW offload */
+ ol_flags |= PKT_TX_TCP_CKSUM;
+ inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+
+ if (eth_type == ETHER_TYPE_IPv4)
+ inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
+ else
+ inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
+
+ } else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
+ /* HW offload */
+ ol_flags |= PKT_TX_SCTP_CKSUM;
+ inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
+ unsigned char *) + len + inner_l3_len);
+ inner_sctp_hdr->cksum = 0;
+ }
+
+ /* pass the inner l3 length to driver */
+ mb->reserved = inner_l3_len;
+ }
+
}
else if (l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
@@ -645,7 +645,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
/* reserved is used to store packet type for RX side */
- mb->reserved = (uint8_t)((qword1 &
+ mb->reserved = (uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
@@ -1050,7 +1050,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
- first_seg->reserved = (uint8_t)((qword1 &
+ first_seg->reserved = (uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
first_seg->ol_flags = pkt_flags;