[04/11] examples/l3fwd: add ethdev setup based on eventdev
diff mbox series

Message ID 20190926100558.24348-5-pbhagavatula@marvell.com
State Superseded
Delegated to: Thomas Monjalon
Headers show
Series
  • example/l3fwd: introduce event device support
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Pavan Nikhilesh Bhagavatula Sept. 26, 2019, 10:05 a.m. UTC
From: Sunil Kumar Kori <skori@marvell.com>

Add ethernet port Rx/Tx queue setup for event device which are later
used for setting up event eth Rx/Tx adapters.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 examples/l3fwd/l3fwd.h          |  10 +++
 examples/l3fwd/l3fwd_eventdev.c | 129 +++++++++++++++++++++++++++++++-
 examples/l3fwd/l3fwd_eventdev.h |   5 +-
 examples/l3fwd/main.c           |  15 ++--
 4 files changed, 147 insertions(+), 12 deletions(-)

Patch
diff mbox series

diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index 838aeed1d..ef978ae64 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -18,9 +18,16 @@ 
 #define NO_HASH_MULTI_LOOKUP 1
 #endif
 
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+
 #define MAX_PKT_BURST     32
 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
 
+#define MEMPOOL_CACHE_SIZE 256
 #define MAX_RX_QUEUE_PER_LCORE 16
 
 /*
@@ -172,6 +179,9 @@  is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
 }
 #endif /* DO_RFC_1812_CHECKS */
 
+int
+init_mem(uint16_t portid, unsigned int nb_mbuf);
+
 /* Function pointers for LPM or EM functionality. */
 void
 setup_lpm(const int socketid);
diff --git a/examples/l3fwd/l3fwd_eventdev.c b/examples/l3fwd/l3fwd_eventdev.c
index 7e2c4c66b..f07cd4b31 100644
--- a/examples/l3fwd/l3fwd_eventdev.c
+++ b/examples/l3fwd/l3fwd_eventdev.c
@@ -8,6 +8,14 @@ 
 #include "l3fwd.h"
 #include "l3fwd_eventdev.h"
 
+static void
+print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
+{
+	char buf[RTE_ETHER_ADDR_FMT_SIZE];
+	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+	printf("%s%s", name, buf);
+}
+
 static void
 parse_mode(const char *optarg)
 {
@@ -66,6 +74,122 @@  l3fwd_parse_eventdev_args(char **argv, int argc)
 	return 0;
 }
 
+static void
+l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf)
+{
+	struct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();
+	uint16_t nb_ports = rte_eth_dev_count_avail();
+	uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+	uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+	unsigned int nb_lcores = rte_lcore_count();
+	struct rte_eth_conf local_port_conf;
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_txconf txconf;
+	struct rte_eth_rxconf rxconf;
+	unsigned int nb_mbuf;
+	uint16_t port_id;
+	int32_t ret;
+
+	/* initialize all ports */
+	RTE_ETH_FOREACH_DEV(port_id) {
+		local_port_conf = *port_conf;
+		/* skip ports that are not enabled */
+		if ((evdev_rsrc->port_mask & (1 << port_id)) == 0) {
+			printf("\nSkipping disabled port %d\n", port_id);
+			continue;
+		}
+
+		/* init port */
+		printf("Initializing port %d ... ", port_id);
+		fflush(stdout);
+		printf("Creating queues: nb_rxq=1 nb_txq=1...\n");
+
+		rte_eth_dev_info_get(port_id, &dev_info);
+		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+			local_port_conf.txmode.offloads |=
+						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+						dev_info.flow_type_rss_offloads;
+		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+				port_conf->rx_adv_conf.rss_conf.rss_hf) {
+			printf("Port %u modified RSS hash function "
+			       "based on hardware support,"
+			       "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+			       port_id,
+			       port_conf->rx_adv_conf.rss_conf.rss_hf,
+			       local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+		}
+
+		ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "Cannot configure device: err=%d, port=%d\n",
+				 ret, port_id);
+
+		ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
+						       &nb_txd);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "Cannot adjust number of descriptors: err=%d, "
+				 "port=%d\n", ret, port_id);
+
+		rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]);
+		print_ethaddr(" Address:", &ports_eth_addr[port_id]);
+		printf(", ");
+		print_ethaddr("Destination:",
+			(const struct rte_ether_addr *)&dest_eth_addr[port_id]);
+		printf(", ");
+
+		/* prepare source MAC for each port. */
+		rte_ether_addr_copy(&ports_eth_addr[port_id],
+			(struct rte_ether_addr *)(val_eth + port_id) + 1);
+
+		/* init memory */
+		if (!evdev_rsrc->per_port_pool) {
+			/* port_id = 0; this is *not* signifying the first port,
+			 * rather, it signifies that port_id is ignored.
+			 */
+			nb_mbuf = RTE_MAX(nb_ports * nb_rxd +
+					  nb_ports * nb_txd +
+					  nb_ports * nb_lcores *
+							MAX_PKT_BURST +
+					  nb_lcores * MEMPOOL_CACHE_SIZE,
+					  8192u);
+			ret = init_mem(0, nb_mbuf);
+		} else {
+			nb_mbuf = RTE_MAX(nb_rxd + nb_rxd +
+					  nb_lcores * MAX_PKT_BURST +
+					  nb_lcores * MEMPOOL_CACHE_SIZE,
+					  8192u);
+			ret = init_mem(port_id, nb_mbuf);
+		}
+		/* init one Rx queue per port */
+		rxconf = dev_info.default_rxconf;
+		rxconf.offloads = local_port_conf.rxmode.offloads;
+		if (!evdev_rsrc->per_port_pool)
+			ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, 0,
+					&rxconf, evdev_rsrc->pkt_pool[0][0]);
+		else
+			ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, 0,
+					&rxconf,
+					evdev_rsrc->pkt_pool[port_id][0]);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "rte_eth_rx_queue_setup: err=%d, "
+				 "port=%d\n", ret, port_id);
+
+		/* init one Tx queue per port */
+		txconf = dev_info.default_txconf;
+		txconf.offloads = local_port_conf.txmode.offloads;
+		ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 0, &txconf);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "rte_eth_tx_queue_setup: err=%d, "
+				 "port=%d\n", ret, port_id);
+	}
+}
+
 static void
 l3fwd_eventdev_capability_setup(void)
 {
@@ -155,7 +279,7 @@  l3fwd_eventdev_setup(uint16_t ethdev_count)
 }
 
 void
-l3fwd_eventdev_resource_setup(void)
+l3fwd_eventdev_resource_setup(struct rte_eth_conf *port_conf)
 {
 	struct l3fwd_eventdev_resources *evdev_rsrc = l3fwd_get_eventdev_rsrc();
 	uint16_t ethdev_count = rte_eth_dev_count_avail();
@@ -172,6 +296,9 @@  l3fwd_eventdev_resource_setup(void)
 	/* Setup eventdev capability callbacks */
 	l3fwd_eventdev_capability_setup();
 
+	/* Ethernet device configuration */
+	l3fwd_eth_dev_port_setup(port_conf);
+
 	/* Event device configuration */
 	l3fwd_eventdev_setup(ethdev_count);
 }
diff --git a/examples/l3fwd/l3fwd_eventdev.h b/examples/l3fwd/l3fwd_eventdev.h
index ce4e35443..f63f3d4ef 100644
--- a/examples/l3fwd/l3fwd_eventdev.h
+++ b/examples/l3fwd/l3fwd_eventdev.h
@@ -40,6 +40,9 @@  struct l3fwd_eventdev_setup_ops {
 struct l3fwd_eventdev_resources {
 	uint8_t disable_implicit_release;
 	struct l3fwd_eventdev_setup_ops ops;
+	struct rte_mempool * (*pkt_pool)[NB_SOCKETS];
+	uint32_t port_mask;
+	uint8_t per_port_pool;
 	uint8_t event_d_id;
 	uint8_t sync_mode;
 	uint8_t tx_mode_q;
@@ -72,7 +75,7 @@  l3fwd_get_eventdev_rsrc(void)
 	return NULL;
 }
 
-void l3fwd_eventdev_resource_setup(void);
+void l3fwd_eventdev_resource_setup(struct rte_eth_conf *port_conf);
 void l3fwd_eventdev_set_generic_ops(struct l3fwd_eventdev_setup_ops *ops);
 void l3fwd_eventdev_set_internal_port_ops(struct l3fwd_eventdev_setup_ops *ops);
 
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index bd88bd4ce..0ecb0ef68 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -45,12 +45,6 @@ 
 #include <cmdline_parse_etheraddr.h>
 
 #include "l3fwd.h"
-
-/*
- * Configurable number of RX/TX ring descriptors
- */
-#define RTE_TEST_RX_DESC_DEFAULT 1024
-#define RTE_TEST_TX_DESC_DEFAULT 1024
 #include "l3fwd_eventdev.h"
 
 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
@@ -448,7 +442,6 @@  parse_eth_dest(const char *optarg)
 }
 
 #define MAX_JUMBO_PKT_LEN  9600
-#define MEMPOOL_CACHE_SIZE 256
 
 static const char short_options[] =
 	"p:"  /* portmask */
@@ -678,7 +671,7 @@  print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
 	printf("%s%s", name, buf);
 }
 
-static int
+int
 init_mem(uint16_t portid, unsigned int nb_mbuf)
 {
 	struct lcore_conf *qconf;
@@ -857,14 +850,16 @@  main(int argc, char **argv)
 	}
 
 	evdev_rsrc = l3fwd_get_eventdev_rsrc();
-	RTE_SET_USED(evdev_rsrc);
 	/* parse application arguments (after the EAL ones) */
 	ret = parse_args(argc, argv);
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
 
+	evdev_rsrc->per_port_pool = per_port_pool;
+	evdev_rsrc->pkt_pool = pktmbuf_pool;
+	evdev_rsrc->port_mask = enabled_port_mask;
 	/* Configure eventdev parameters if user has requested */
-	l3fwd_eventdev_resource_setup();
+	l3fwd_eventdev_resource_setup(&port_conf);
 
 	if (check_lcore_params() < 0)
 		rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");