app/testpmd: fix scheduling send burst interval

Message ID 20211123163258.14972-1-viacheslavo@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series app/testpmd: fix scheduling send burst interval |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS

Commit Message

Slava Ovsiienko Nov. 23, 2021, 4:32 p.m. UTC
  The first "set txtimes" command parameter specifies the time
interval between scheduled send bursts for single queue. This
interval should be the same for all the forwarding ports.
It requires to maintain the timing related variables on per
queue basis instead of per core, as currently implemented.
This resulted in wrong burst intervals if two or more cores
were generating the scheduled traffic for two or more ports
in txonly mode.

This patch moves the timing variable to the fstream structure.
Only txonly forwarding mode with enabled send scheduling is
affected.

Fixes: 4940344dab1d ("app/testpmd: add Tx scheduling command")
Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 app/test-pmd/testpmd.h |  1 +
 app/test-pmd/txonly.c  | 20 ++++++--------------
 2 files changed, 7 insertions(+), 14 deletions(-)
  

Comments

Ferruh Yigit Jan. 14, 2022, 5:53 p.m. UTC | #1
On 11/23/2021 4:32 PM, Viacheslav Ovsiienko wrote:
> The first "set txtimes" command parameter specifies the time
> interval between scheduled send bursts for single queue. This
> interval should be the same for all the forwarding ports.
> It requires to maintain the timing related variables on per
> queue basis instead of per core, as currently implemented.
> This resulted in wrong burst intervals if two or more cores
> were generating the scheduled traffic for two or more ports
> in txonly mode.
> 
> This patch moves the timing variable to the fstream structure.
> Only txonly forwarding mode with enabled send scheduling is
> affected.
> 
> Fixes: 4940344dab1d ("app/testpmd: add Tx scheduling command")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>

Applied to dpdk-next-net/main, thanks.
  

Patch

diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 2149ecd93a..9967825044 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -147,6 +147,7 @@  struct fwd_stream {
 	/**< received packets has bad outer l4 checksum */
 	uint64_t rx_bad_outer_ip_csum;
 	/**< received packets having bad outer ip checksum */
+	uint64_t ts_skew; /**< TX scheduling timestamp */
 #ifdef RTE_LIB_GRO
 	unsigned int gro_times;	/**< GRO operation times */
 #endif
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index b8497e733d..c53c4e7eae 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -59,14 +59,10 @@  uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
 static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
 RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
 static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
-RTE_DEFINE_PER_LCORE(uint64_t, timestamp_qskew);
-					/**< Timestamp offset per queue */
-RTE_DEFINE_PER_LCORE(uint32_t, timestamp_idone); /**< Timestamp init done. */
 
 static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
 static int32_t timestamp_off; /**< Timestamp dynamic field offset */
 static bool timestamp_enable; /**< Timestamp enable */
-static uint32_t timestamp_init_req; /**< Timestamp initialization request. */
 static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];
 
 static void
@@ -194,7 +190,7 @@  static inline bool
 pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 		struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
 		const uint16_t vlan_tci_outer, const uint64_t ol_flags,
-		const uint16_t idx, const struct fwd_stream *fs)
+		const uint16_t idx, struct fwd_stream *fs)
 {
 	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
 	struct rte_mbuf *pkt_seg;
@@ -262,11 +258,10 @@  pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 		update_pkt_header(pkt, pkt_len);
 
 	if (unlikely(timestamp_enable)) {
-		uint64_t skew = RTE_PER_LCORE(timestamp_qskew);
+		uint64_t skew = fs->ts_skew;
 		struct tx_timestamp timestamp_mark;
 
-		if (unlikely(timestamp_init_req !=
-				RTE_PER_LCORE(timestamp_idone))) {
+		if (unlikely(!skew)) {
 			struct rte_eth_dev_info dev_info;
 			unsigned int txqs_n;
 			uint64_t phase;
@@ -289,8 +284,7 @@  pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 			 */
 			skew = timestamp_initial[fs->tx_port] +
 			       tx_pkt_times_inter + phase;
-			RTE_PER_LCORE(timestamp_qskew) = skew;
-			RTE_PER_LCORE(timestamp_idone) = timestamp_init_req;
+			fs->ts_skew = skew;
 		}
 		timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
 		timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
@@ -300,14 +294,14 @@  pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
 			pkt->ol_flags |= timestamp_mask;
 			*RTE_MBUF_DYNFIELD
 				(pkt, timestamp_off, uint64_t *) = skew;
-			RTE_PER_LCORE(timestamp_qskew) = skew;
+			fs->ts_skew = skew;
 			timestamp_mark.ts = rte_cpu_to_be_64(skew);
 		} else if (tx_pkt_times_intra) {
 			skew +=	tx_pkt_times_intra;
 			pkt->ol_flags |= timestamp_mask;
 			*RTE_MBUF_DYNFIELD
 				(pkt, timestamp_off, uint64_t *) = skew;
-			RTE_PER_LCORE(timestamp_qskew) = skew;
+			fs->ts_skew = skew;
 			timestamp_mark.ts = rte_cpu_to_be_64(skew);
 		} else {
 			timestamp_mark.ts = RTE_BE64(0);
@@ -461,7 +455,6 @@  tx_only_begin(portid_t pi)
 	timestamp_enable = false;
 	timestamp_mask = 0;
 	timestamp_off = -1;
-	RTE_PER_LCORE(timestamp_qskew) = 0;
 	dynf = rte_mbuf_dynflag_lookup
 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
 	if (dynf >= 0)
@@ -504,7 +497,6 @@  tx_only_begin(portid_t pi)
 				return -EINVAL;
 			}
 		}
-		timestamp_init_req++;
 	}
 
 	/* Make sure all settings are visible on forwarding cores.*/