[v9,3/6] app/testpmd: new parameter to enable shared Rx queue

Message ID 20211019081738.2165150-4-xuemingl@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series ethdev: introduce shared Rx queue |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Xueming Li Oct. 19, 2021, 8:17 a.m. UTC
  Adds "--rxq-share=X" parameter to enable shared RxQ, share if device
supports, otherwise fallback to standard RxQ.

Share group number grows per X ports. X defaults to MAX, implies all
ports join share group 1. Queue ID is mapped equally with shared Rx
queue ID.

Forwarding engine "shared-rxq" should be used which Rx only and update
stream statistics correctly.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
 app/test-pmd/config.c                 |  7 ++++++-
 app/test-pmd/parameters.c             | 13 +++++++++++++
 app/test-pmd/testpmd.c                | 20 +++++++++++++++++---
 app/test-pmd/testpmd.h                |  2 ++
 doc/guides/testpmd_app_ug/run_app.rst |  7 +++++++
 5 files changed, 45 insertions(+), 4 deletions(-)
  

Patch

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index c0616dcd2fd..f8fb8961cae 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2713,7 +2713,12 @@  rxtx_config_display(void)
 			printf("      RX threshold registers: pthresh=%d hthresh=%d "
 				" wthresh=%d\n",
 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
-			printf("      RX Offloads=0x%"PRIx64"\n", offloads_tmp);
+			printf("      RX Offloads=0x%"PRIx64, offloads_tmp);
+			if (rx_conf->share_group > 0)
+				printf(" share_group=%u share_qid=%u",
+				       rx_conf->share_group,
+				       rx_conf->share_qid);
+			printf("\n");
 		}
 
 		/* per tx queue config only for first queue to be less verbose */
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 3f94a82e321..30dae326310 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -167,6 +167,7 @@  usage(char* progname)
 	printf("  --tx-ip=src,dst: IP addresses in Tx-only mode\n");
 	printf("  --tx-udp=src[,dst]: UDP ports in Tx-only mode\n");
 	printf("  --eth-link-speed: force link speed.\n");
+	printf("  --rxq-share: number of ports per shared rxq groups, defaults to MAX(1 group)\n");
 	printf("  --disable-link-check: disable check on link status when "
 	       "starting/stopping ports.\n");
 	printf("  --disable-device-start: do not automatically start port\n");
@@ -607,6 +608,7 @@  launch_args_parse(int argc, char** argv)
 		{ "rxpkts",			1, 0, 0 },
 		{ "txpkts",			1, 0, 0 },
 		{ "txonly-multi-flow",		0, 0, 0 },
+		{ "rxq-share",			2, 0, 0 },
 		{ "eth-link-speed",		1, 0, 0 },
 		{ "disable-link-check",		0, 0, 0 },
 		{ "disable-device-start",	0, 0, 0 },
@@ -1271,6 +1273,17 @@  launch_args_parse(int argc, char** argv)
 			}
 			if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow"))
 				txonly_multi_flow = 1;
+			if (!strcmp(lgopts[opt_idx].name, "rxq-share")) {
+				if (optarg == NULL) {
+					rxq_share = UINT32_MAX;
+				} else {
+					n = atoi(optarg);
+					if (n >= 0)
+						rxq_share = (uint32_t)n;
+					else
+						rte_exit(EXIT_FAILURE, "rxq-share must be >= 0\n");
+				}
+			}
 			if (!strcmp(lgopts[opt_idx].name, "no-flush-rx"))
 				no_flush_rx = 1;
 			if (!strcmp(lgopts[opt_idx].name, "eth-link-speed")) {
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 97ae52e17ec..123142ed110 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -498,6 +498,11 @@  uint8_t record_core_cycles;
  */
 uint8_t record_burst_stats;
 
+/*
+ * Number of ports per shared Rx queue group, 0 disable.
+ */
+uint32_t rxq_share;
+
 unsigned int num_sockets = 0;
 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
 
@@ -3393,14 +3398,23 @@  dev_event_callback(const char *device_name, enum rte_dev_event_type type,
 }
 
 static void
-rxtx_port_config(struct rte_port *port)
+rxtx_port_config(portid_t pid)
 {
 	uint16_t qid;
 	uint64_t offloads;
+	struct rte_port *port = &ports[pid];
 
 	for (qid = 0; qid < nb_rxq; qid++) {
 		offloads = port->rx_conf[qid].offloads;
 		port->rx_conf[qid] = port->dev_info.default_rxconf;
+
+		if (rxq_share > 0 &&
+		    (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) {
+			/* Non-zero share group to enable RxQ share. */
+			port->rx_conf[qid].share_group = pid / rxq_share + 1;
+			port->rx_conf[qid].share_qid = qid; /* Equal mapping. */
+		}
+
 		if (offloads != 0)
 			port->rx_conf[qid].offloads = offloads;
 
@@ -3558,7 +3572,7 @@  init_port_config(void)
 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
 		}
 
-		rxtx_port_config(port);
+		rxtx_port_config(pid);
 
 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
 		if (ret != 0)
@@ -3772,7 +3786,7 @@  init_port_dcb_config(portid_t pid,
 
 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
 
-	rxtx_port_config(rte_port);
+	rxtx_port_config(pid);
 	/* VLAN filter */
 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 5863b2f43f3..3dfaaad94c0 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -477,6 +477,8 @@  extern enum tx_pkt_split tx_pkt_split;
 
 extern uint8_t txonly_multi_flow;
 
+extern uint32_t rxq_share;
+
 extern uint16_t nb_pkt_per_burst;
 extern uint16_t nb_pkt_flowgen_clones;
 extern int nb_flows_flowgen;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 640eadeff73..ff5908dcd50 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -389,6 +389,13 @@  The command line options are:
 
     Generate multiple flows in txonly mode.
 
+*   ``--rxq-share=[X]``
+
+    Create queues in shared Rx queue mode if device supports.
+    Group number grows per X ports. X defaults to MAX, implies all ports
+    join share group 1. Forwarding engine "shared-rxq" should be used
+    which Rx only and update stream statistics correctly.
+
 *   ``--eth-link-speed``
 
     Set a forced link speed to the ethernet port::