[v4] app/testpmd: txonly multiflow port change support

Message ID 20230412181619.496342-1-joshwash@google.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series [v4] app/testpmd: txonly multiflow port change support |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-unit-testing success Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS

Commit Message

Joshua Washington April 12, 2023, 6:16 p.m. UTC
  Google cloud routes traffic using IP addresses without the support of MAC
addresses, so changing source IP address for txonly-multi-flow can have
negative performance implications for net/gve when using testpmd. This
patch updates txonly multiflow mode to modify source ports instead of
source IP addresses.

The change can be tested with the following command:
dpdk-testpmd -- --forward-mode=txonly --txonly-multi-flow \
    --txip=<SRC>,<DST>

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
---
 app/test-pmd/txonly.c | 34 ++++++++++++++++++----------------
 1 file changed, 18 insertions(+), 16 deletions(-)
  

Comments

Singh, Aman Deep April 19, 2023, 12:21 p.m. UTC | #1
On 4/12/2023 11:46 PM, Joshua Washington wrote:
> Google cloud routes traffic using IP addresses without the support of MAC
> addresses, so changing source IP address for txonly-multi-flow can have
> negative performance implications for net/gve when using testpmd. This
> patch updates txonly multiflow mode to modify source ports instead of
> source IP addresses.

Generally routing is based on DST IP addresses, was SRC IP also having an
impact in your case ?

>
> The change can be tested with the following command:
> dpdk-testpmd -- --forward-mode=txonly --txonly-multi-flow \
>      --txip=<SRC>,<DST>

Missing "-" in command: --tx-ip=<SRC>,<DST>

>
> Signed-off-by: Joshua Washington <joshwash@google.com>
> Reviewed-by: Rushil Gupta <rushilg@google.com>
> ---
>   app/test-pmd/txonly.c | 34 ++++++++++++++++++----------------
>   1 file changed, 18 insertions(+), 16 deletions(-)
>
> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
> index b3d6873104..7fc743b508 100644
> --- a/app/test-pmd/txonly.c
> +++ b/app/test-pmd/txonly.c
> @@ -56,7 +56,7 @@ uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
>   #define IP_DEFTTL  64   /* from RFC 1340. */
>   
>   static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
> -RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
> +RTE_DEFINE_PER_LCORE(uint16_t, _src_var); /**< Source port variation */
>   static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
>   
>   static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
> @@ -230,28 +230,30 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
>   	copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0);
>   	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
>   			sizeof(struct rte_ether_hdr));
> +	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
> +			sizeof(struct rte_ether_hdr) +
> +			sizeof(struct rte_ipv4_hdr));
>   	if (txonly_multi_flow) {
> -		uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
> -		struct rte_ipv4_hdr *ip_hdr;
> -		uint32_t addr;
> +		uint16_t src_var = RTE_PER_LCORE(_src_var);
> +		struct rte_udp_hdr *udp_hdr;
> +		uint16_t port;
>   
> -		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
> -				struct rte_ipv4_hdr *,
> -				sizeof(struct rte_ether_hdr));
> +		udp_hdr = rte_pktmbuf_mtod_offset(pkt,
> +				struct rte_udp_hdr *,
> +				sizeof(struct rte_ether_hdr) +
> +				sizeof(struct rte_ipv4_hdr));
>   		/*
> -		 * Generate multiple flows by varying IP src addr. This
> -		 * enables packets are well distributed by RSS in
> +		 * Generate multiple flows by varying UDP source port.
> +		 * This enables packets are well distributed by RSS in
>   		 * receiver side if any and txonly mode can be a decent
>   		 * packet generator for developer's quick performance
>   		 * regression test.
>   		 */
> -		addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();

Good to have feature where last IP octet had lcore_id.
Helped to identify, packet came from which core.

> -		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
> -		RTE_PER_LCORE(_ip_var) = ip_var;
> +
> +		port = src_var++;
> +		udp_hdr->src_port = rte_cpu_to_be_16(port);
> +		RTE_PER_LCORE(_src_var) = src_var;

To be safe, can we use the Ephemeral port range [49152	to 65535]
and adjust lcore_id within it.

>   	}
> -	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
> -			sizeof(struct rte_ether_hdr) +
> -			sizeof(struct rte_ipv4_hdr));
>   
>   	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
>   		update_pkt_header(pkt, pkt_len);
> @@ -393,7 +395,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
>   	nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
>   
>   	if (txonly_multi_flow)
> -		RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
> +		RTE_PER_LCORE(_src_var) -= nb_pkt - nb_tx;
>   
>   	if (unlikely(nb_tx < nb_pkt)) {
>   		if (verbose_level > 0 && fs->fwd_dropped == 0)
  
Stephen Hemminger April 19, 2023, 2:38 p.m. UTC | #2
On Wed, 19 Apr 2023 17:51:18 +0530
"Singh, Aman Deep" <aman.deep.singh@intel.com> wrote:

> On 4/12/2023 11:46 PM, Joshua Washington wrote:
> > Google cloud routes traffic using IP addresses without the support of MAC
> > addresses, so changing source IP address for txonly-multi-flow can have
> > negative performance implications for net/gve when using testpmd. This
> > patch updates txonly multiflow mode to modify source ports instead of
> > source IP addresses.  
> 
> Generally routing is based on DST IP addresses, was SRC IP also having an
> impact in your case ?

Most hypervisor infrastructures will not allow a guest to use a source
address that has not been allocated to that guest. Therefore when using
multiple source addresses, either the guest has to be configured to have
all the addresses it will use, or the test needs to only use one source address.
  

Patch

diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index b3d6873104..7fc743b508 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -56,7 +56,7 @@  uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
 #define IP_DEFTTL  64   /* from RFC 1340. */
 
 static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
-RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
+RTE_DEFINE_PER_LCORE(uint16_t, _src_var); /**< Source port variation */
 static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
 
 static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
@@ -230,28 +230,30 @@  pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 	copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0);
 	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
 			sizeof(struct rte_ether_hdr));
+	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
+			sizeof(struct rte_ether_hdr) +
+			sizeof(struct rte_ipv4_hdr));
 	if (txonly_multi_flow) {
-		uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
-		struct rte_ipv4_hdr *ip_hdr;
-		uint32_t addr;
+		uint16_t src_var = RTE_PER_LCORE(_src_var);
+		struct rte_udp_hdr *udp_hdr;
+		uint16_t port;
 
-		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
-				struct rte_ipv4_hdr *,
-				sizeof(struct rte_ether_hdr));
+		udp_hdr = rte_pktmbuf_mtod_offset(pkt,
+				struct rte_udp_hdr *,
+				sizeof(struct rte_ether_hdr) +
+				sizeof(struct rte_ipv4_hdr));
 		/*
-		 * Generate multiple flows by varying IP src addr. This
-		 * enables packets are well distributed by RSS in
+		 * Generate multiple flows by varying UDP source port.
+		 * This enables packets are well distributed by RSS in
 		 * receiver side if any and txonly mode can be a decent
 		 * packet generator for developer's quick performance
 		 * regression test.
 		 */
-		addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
-		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
-		RTE_PER_LCORE(_ip_var) = ip_var;
+
+		port = src_var++;
+		udp_hdr->src_port = rte_cpu_to_be_16(port);
+		RTE_PER_LCORE(_src_var) = src_var;
 	}
-	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
-			sizeof(struct rte_ether_hdr) +
-			sizeof(struct rte_ipv4_hdr));
 
 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
 		update_pkt_header(pkt, pkt_len);
@@ -393,7 +395,7 @@  pkt_burst_transmit(struct fwd_stream *fs)
 	nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
 
 	if (txonly_multi_flow)
-		RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
+		RTE_PER_LCORE(_src_var) -= nb_pkt - nb_tx;
 
 	if (unlikely(nb_tx < nb_pkt)) {
 		if (verbose_level > 0 && fs->fwd_dropped == 0)