[v3,9/9] app/testpmd: factorize fwd engine Tx

Message ID 20230220183502.3348368-10-david.marchand@redhat.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series Testpmd code cleanup |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-abi-testing success Testing PASS

Commit Message

David Marchand Feb. 20, 2023, 6:35 p.m. UTC
  Reduce code duplication by introducing a helper that takes care of
transmitting, retrying if enabled and incrementing tx counter.
inc_tx_burst_stats() is then unneeded and removed.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
Changes since v1:
- changed Tx helper so it matches rte_eth_tx_burst() semantic,
- updated ieee1588,
- removed inc_tx_burst_stats helper,

---
 app/test-pmd/5tswap.c      | 22 +-----------
 app/test-pmd/csumonly.c    | 23 +------------
 app/test-pmd/flowgen.c     | 20 +----------
 app/test-pmd/icmpecho.c    | 28 ++--------------
 app/test-pmd/ieee1588fwd.c |  5 +--
 app/test-pmd/iofwd.c       | 22 +-----------
 app/test-pmd/macfwd.c      | 21 +-----------
 app/test-pmd/macswap.c     | 27 ++-------------
 app/test-pmd/noisy_vnf.c   | 68 ++++++--------------------------------
 app/test-pmd/testpmd.h     | 27 +++++++++++++--
 app/test-pmd/txonly.c      | 19 +----------
 11 files changed, 47 insertions(+), 235 deletions(-)
  

Comments

Ferruh Yigit Feb. 28, 2023, 6:35 p.m. UTC | #1
On 2/20/2023 6:35 PM, David Marchand wrote:
> Reduce code duplication by introducing a helper that takes care of
> transmitting, retrying if enabled and incrementing tx counter.
> inc_tx_burst_stats() is then unneeded and removed.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>

Reviewed-by: Ferruh Yigit <ferruh.yigit@amd.com>
  

Patch

diff --git a/app/test-pmd/5tswap.c b/app/test-pmd/5tswap.c
index 27da867d7f..ff8c2dcde5 100644
--- a/app/test-pmd/5tswap.c
+++ b/app/test-pmd/5tswap.c
@@ -91,9 +91,6 @@  pkt_burst_5tuple_swap(struct fwd_stream *fs)
 	uint64_t ol_flags;
 	uint16_t proto;
 	uint16_t nb_rx;
-	uint16_t nb_tx;
-	uint32_t retry;
-
 	int i;
 	union {
 		struct rte_ether_hdr *eth;
@@ -155,24 +152,7 @@  pkt_burst_5tuple_swap(struct fwd_stream *fs)
 		}
 		mbuf_field_set(mb, ol_flags);
 	}
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_rx - nb_tx);
-		}
-	}
-	fs->tx_packets += nb_tx;
-	inc_tx_burst_stats(fs, nb_tx);
-	if (unlikely(nb_tx < nb_rx)) {
-		fs->fwd_dropped += (nb_rx - nb_tx);
-		rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
-	}
+	common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
 
 	return true;
 }
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index d758ae0ac6..fc85c22a77 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -847,12 +847,10 @@  pkt_burst_checksum_forward(struct fwd_stream *fs)
 	uint8_t gro_enable;
 #endif
 	uint16_t nb_rx;
-	uint16_t nb_tx;
 	uint16_t nb_prep;
 	uint16_t i;
 	uint64_t rx_ol_flags, tx_ol_flags;
 	uint64_t tx_offloads;
-	uint32_t retry;
 	uint32_t rx_bad_ip_csum;
 	uint32_t rx_bad_l4_csum;
 	uint32_t rx_bad_outer_l4_csum;
@@ -1169,32 +1167,13 @@  pkt_burst_checksum_forward(struct fwd_stream *fs)
 		rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_prep], nb_rx - nb_prep);
 	}
 
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
-			nb_prep);
+	common_fwd_stream_transmit(fs, tx_pkts_burst, nb_prep);
 
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_prep) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_prep && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&tx_pkts_burst[nb_tx], nb_prep - nb_tx);
-		}
-	}
-	fs->tx_packets += nb_tx;
 	fs->rx_bad_ip_csum += rx_bad_ip_csum;
 	fs->rx_bad_l4_csum += rx_bad_l4_csum;
 	fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
 	fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum;
 
-	inc_tx_burst_stats(fs, nb_tx);
-	if (unlikely(nb_tx < nb_prep)) {
-		fs->fwd_dropped += (nb_prep - nb_tx);
-		rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_tx], nb_prep - nb_tx);
-	}
-
 	return true;
 }
 
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 3705cc60c5..53b5f24f11 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -75,7 +75,6 @@  pkt_burst_flow_gen(struct fwd_stream *fs)
 	uint16_t nb_dropped;
 	uint16_t nb_pkt;
 	uint16_t nb_clones = nb_pkt_flowgen_clones;
-	uint32_t retry;
 	uint64_t tx_offloads;
 	int next_flow = RTE_PER_LCORE(_next_flow);
 
@@ -158,30 +157,13 @@  pkt_burst_flow_gen(struct fwd_stream *fs)
 			next_flow = 0;
 	}
 
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_pkt - nb_tx);
-		}
-	}
-	fs->tx_packets += nb_tx;
-
-	inc_tx_burst_stats(fs, nb_tx);
+	nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
 	nb_dropped = nb_pkt - nb_tx;
 	if (unlikely(nb_dropped > 0)) {
 		/* Back out the flow counter. */
 		next_flow -= nb_dropped;
 		while (next_flow < 0)
 			next_flow += nb_flows_flowgen;
-
-		fs->fwd_dropped += nb_dropped;
-		rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_pkt - nb_tx);
 	}
 
 	RTE_PER_LCORE(_next_flow) = next_flow;
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 48f8fe0bf1..68524484e3 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -280,10 +280,8 @@  reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
 	struct rte_ipv4_hdr *ip_h;
 	struct rte_icmp_hdr *icmp_h;
 	struct rte_ether_addr eth_addr;
-	uint32_t retry;
 	uint32_t ip_addr;
 	uint16_t nb_rx;
-	uint16_t nb_tx;
 	uint16_t nb_replies;
 	uint16_t eth_type;
 	uint16_t vlan_id;
@@ -476,30 +474,8 @@  reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
 	}
 
 	/* Send back ICMP echo replies, if any. */
-	if (nb_replies > 0) {
-		nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
-					 nb_replies);
-		/*
-		 * Retry if necessary
-		 */
-		if (unlikely(nb_tx < nb_replies) && fs->retry_enabled) {
-			retry = 0;
-			while (nb_tx < nb_replies &&
-					retry++ < burst_tx_retry_num) {
-				rte_delay_us(burst_tx_delay_time);
-				nb_tx += rte_eth_tx_burst(fs->tx_port,
-						fs->tx_queue,
-						&pkts_burst[nb_tx],
-						nb_replies - nb_tx);
-			}
-		}
-		fs->tx_packets += nb_tx;
-		inc_tx_burst_stats(fs, nb_tx);
-		if (unlikely(nb_tx < nb_replies)) {
-			fs->fwd_dropped += (nb_replies - nb_tx);
-			rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_replies - nb_tx);
-		}
-	}
+	if (nb_replies > 0)
+		common_fwd_stream_transmit(fs, pkts_burst, nb_replies);
 
 	return true;
 }
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
index 1d51ebfe9d..386d9f10e6 100644
--- a/app/test-pmd/ieee1588fwd.c
+++ b/app/test-pmd/ieee1588fwd.c
@@ -182,13 +182,10 @@  ieee1588_packet_fwd(struct fwd_stream *fs)
 
 	/* Forward PTP packet with hardware TX timestamp */
 	mb->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST;
-	if (rte_eth_tx_burst(fs->tx_port, fs->tx_queue, &mb, 1) == 0) {
+	if (common_fwd_stream_transmit(fs, &mb, 1) == 0) {
 		printf("Port %u sent PTP packet dropped\n", fs->tx_port);
-		fs->fwd_dropped += 1;
-		rte_pktmbuf_free(mb);
 		return false;
 	}
-	fs->tx_packets += 1;
 
 	/*
 	 * Check the TX timestamp.
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 69b583cb5b..ba06fae4a6 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -46,8 +46,6 @@  pkt_burst_io_forward(struct fwd_stream *fs)
 {
 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
 	uint16_t nb_rx;
-	uint16_t nb_tx;
-	uint32_t retry;
 
 	/*
 	 * Receive a burst of packets and forward them.
@@ -56,25 +54,7 @@  pkt_burst_io_forward(struct fwd_stream *fs)
 	if (unlikely(nb_rx == 0))
 		return false;
 
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-			pkts_burst, nb_rx);
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_rx - nb_tx);
-		}
-	}
-	fs->tx_packets += nb_tx;
-	inc_tx_burst_stats(fs, nb_tx);
-	if (unlikely(nb_tx < nb_rx)) {
-		fs->fwd_dropped += (nb_rx - nb_tx);
-		rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
-	}
+	common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
 
 	return true;
 }
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index a72f5ccb75..7316d73315 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -48,9 +48,7 @@  pkt_burst_mac_forward(struct fwd_stream *fs)
 	struct rte_port  *txp;
 	struct rte_mbuf  *mb;
 	struct rte_ether_hdr *eth_hdr;
-	uint32_t retry;
 	uint16_t nb_rx;
-	uint16_t nb_tx;
 	uint16_t i;
 	uint64_t ol_flags = 0;
 	uint64_t tx_offloads;
@@ -87,25 +85,8 @@  pkt_burst_mac_forward(struct fwd_stream *fs)
 		mb->vlan_tci = txp->tx_vlan_id;
 		mb->vlan_tci_outer = txp->tx_vlan_id_outer;
 	}
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_rx - nb_tx);
-		}
-	}
 
-	fs->tx_packets += nb_tx;
-	inc_tx_burst_stats(fs, nb_tx);
-	if (unlikely(nb_tx < nb_rx)) {
-		fs->fwd_dropped += (nb_rx - nb_tx);
-		rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
-	}
+	common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
 
 	return true;
 }
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index ab37123404..57f77003fe 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -51,10 +51,7 @@  static bool
 pkt_burst_mac_swap(struct fwd_stream *fs)
 {
 	struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
-	struct rte_port  *txp;
 	uint16_t nb_rx;
-	uint16_t nb_tx;
-	uint32_t retry;
 
 	/*
 	 * Receive a burst of packets and forward them.
@@ -63,28 +60,8 @@  pkt_burst_mac_swap(struct fwd_stream *fs)
 	if (unlikely(nb_rx == 0))
 		return false;
 
-	txp = &ports[fs->tx_port];
-
-	do_macswap(pkts_burst, nb_rx, txp);
-
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_rx - nb_tx);
-		}
-	}
-	fs->tx_packets += nb_tx;
-	inc_tx_burst_stats(fs, nb_tx);
-	if (unlikely(nb_tx < nb_rx)) {
-		fs->fwd_dropped += (nb_rx - nb_tx);
-		rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
-	}
+	do_macswap(pkts_burst, nb_rx, &ports[fs->tx_port]);
+	common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
 
 	return true;
 }
diff --git a/app/test-pmd/noisy_vnf.c b/app/test-pmd/noisy_vnf.c
index e543adc865..2bf90a983c 100644
--- a/app/test-pmd/noisy_vnf.c
+++ b/app/test-pmd/noisy_vnf.c
@@ -93,30 +93,6 @@  sim_memory_lookups(struct noisy_config *ncf, uint16_t nb_pkts)
 	}
 }
 
-static uint16_t
-do_retry(uint16_t nb_rx, uint16_t nb_tx, struct rte_mbuf **pkts,
-	 struct fwd_stream *fs)
-{
-	uint32_t retry = 0;
-
-	while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
-		rte_delay_us(burst_tx_delay_time);
-		nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-				&pkts[nb_tx], nb_rx - nb_tx);
-	}
-
-	return nb_tx;
-}
-
-static uint32_t
-drop_pkts(struct rte_mbuf **pkts, uint16_t nb_rx, uint16_t nb_tx)
-{
-	if (nb_tx < nb_rx)
-		rte_pktmbuf_free_bulk(&pkts[nb_tx], nb_rx - nb_tx);
-
-	return nb_rx - nb_tx;
-}
-
 /*
  * Forwarding of packets in noisy VNF mode.  Forward packets but perform
  * memory operations first as specified on cmdline.
@@ -156,37 +132,22 @@  pkt_burst_noisy_vnf(struct fwd_stream *fs)
 
 	if (!ncf->do_buffering) {
 		sim_memory_lookups(ncf, nb_rx);
-		nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-				pkts_burst, nb_rx);
-		if (unlikely(nb_tx < nb_rx) && fs->retry_enabled)
-			nb_tx += do_retry(nb_rx, nb_tx, pkts_burst, fs);
-		inc_tx_burst_stats(fs, nb_tx);
-		fs->tx_packets += nb_tx;
-		fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
+		nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
 		goto end;
 	}
 
 	fifo_free = rte_ring_free_count(ncf->f);
 	if (fifo_free >= nb_rx) {
-		nb_enqd = rte_ring_enqueue_burst(ncf->f,
-				(void **) pkts_burst, nb_rx, NULL);
-		if (nb_enqd < nb_rx)
-			fs->fwd_dropped += drop_pkts(pkts_burst,
-						     nb_rx, nb_enqd);
-	} else {
-		nb_deqd = rte_ring_dequeue_burst(ncf->f,
-				(void **) tmp_pkts, nb_rx, NULL);
-		nb_enqd = rte_ring_enqueue_burst(ncf->f,
-				(void **) pkts_burst, nb_deqd, NULL);
-		if (nb_deqd > 0) {
-			nb_tx = rte_eth_tx_burst(fs->tx_port,
-					fs->tx_queue, tmp_pkts,
-					nb_deqd);
-			if (unlikely(nb_tx < nb_rx) && fs->retry_enabled)
-				nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs);
-			inc_tx_burst_stats(fs, nb_tx);
-			fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, nb_tx);
+		nb_enqd = rte_ring_enqueue_burst(ncf->f, (void **) pkts_burst, nb_rx, NULL);
+		if (nb_enqd < nb_rx) {
+			fs->fwd_dropped += nb_rx - nb_enqd;
+			rte_pktmbuf_free_bulk(&pkts_burst[nb_enqd], nb_rx - nb_enqd);
 		}
+	} else {
+		nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **) tmp_pkts, nb_rx, NULL);
+		nb_enqd = rte_ring_enqueue_burst(ncf->f, (void **) pkts_burst, nb_deqd, NULL);
+		if (nb_deqd > 0)
+			nb_tx = common_fwd_stream_transmit(fs, tmp_pkts, nb_deqd);
 	}
 
 	sim_memory_lookups(ncf, nb_enqd);
@@ -202,16 +163,9 @@  pkt_burst_noisy_vnf(struct fwd_stream *fs)
 				noisy_tx_sw_buf_flush_time > 0 && !nb_tx;
 	}
 	while (needs_flush && !rte_ring_empty(ncf->f)) {
-		unsigned int sent;
 		nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **)tmp_pkts,
 				MAX_PKT_BURST, NULL);
-		sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					 tmp_pkts, nb_deqd);
-		if (unlikely(sent < nb_deqd) && fs->retry_enabled)
-			sent += do_retry(nb_deqd, sent, tmp_pkts, fs);
-		inc_tx_burst_stats(fs, sent);
-		fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
-		nb_tx += sent;
+		nb_tx += common_fwd_stream_transmit(fs, tmp_pkts, nb_deqd);
 		ncf->prev_time = rte_get_timer_cycles();
 	}
 end:
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 6c82cbab45..b9215720b6 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -852,11 +852,34 @@  common_fwd_stream_receive(struct fwd_stream *fs, struct rte_mbuf **burst,
 	return nb_rx;
 }
 
-static inline void
-inc_tx_burst_stats(struct fwd_stream *fs, uint16_t nb_tx)
+static inline uint16_t
+common_fwd_stream_transmit(struct fwd_stream *fs, struct rte_mbuf **burst,
+	unsigned int nb_pkts)
 {
+	uint16_t nb_tx;
+	uint32_t retry;
+
+	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, burst, nb_pkts);
+	/*
+	 * Retry if necessary
+	 */
+	if (unlikely(nb_tx < nb_pkts) && fs->retry_enabled) {
+		retry = 0;
+		while (nb_tx < nb_pkts && retry++ < burst_tx_retry_num) {
+			rte_delay_us(burst_tx_delay_time);
+			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+				&burst[nb_tx], nb_pkts - nb_tx);
+		}
+	}
+	fs->tx_packets += nb_tx;
 	if (record_burst_stats)
 		fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+	if (unlikely(nb_tx < nb_pkts)) {
+		fs->fwd_dropped += (nb_pkts - nb_tx);
+		rte_pktmbuf_free_bulk(&burst[nb_tx], nb_pkts - nb_tx);
+	}
+
+	return nb_tx;
 }
 
 /* Prototypes */
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index b80ab6f5df..b3d6873104 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -334,7 +334,6 @@  pkt_burst_transmit(struct fwd_stream *fs)
 	uint16_t nb_tx;
 	uint16_t nb_pkt;
 	uint16_t vlan_tci, vlan_tci_outer;
-	uint32_t retry;
 	uint64_t ol_flags = 0;
 	uint64_t tx_offloads;
 
@@ -391,25 +390,11 @@  pkt_burst_transmit(struct fwd_stream *fs)
 	if (nb_pkt == 0)
 		return false;
 
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
-
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_pkt - nb_tx);
-		}
-	}
-	fs->tx_packets += nb_tx;
+	nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
 
 	if (txonly_multi_flow)
 		RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
 
-	inc_tx_burst_stats(fs, nb_tx);
 	if (unlikely(nb_tx < nb_pkt)) {
 		if (verbose_level > 0 && fs->fwd_dropped == 0)
 			printf("port %d tx_queue %d - drop "
@@ -417,8 +402,6 @@  pkt_burst_transmit(struct fwd_stream *fs)
 			       fs->tx_port, fs->tx_queue,
 			       (unsigned) nb_pkt, (unsigned) nb_tx,
 			       (unsigned) (nb_pkt - nb_tx));
-		fs->fwd_dropped += (nb_pkt - nb_tx);
-		rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_pkt - nb_tx);
 	}
 
 	return true;