[v2,4/6] examples/l3fwd: clean up worker state before exit

Message ID 20220513160719.10558-4-pbhagavatula@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [v2,1/6] app/eventdev: simplify signal handling and teardown |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Pavan Nikhilesh Bhagavatula May 13, 2022, 4:07 p.m. UTC
  From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit, as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.

Add clean up function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 examples/l3fwd/l3fwd_em.c    | 32 ++++++++++++++++++++++----------
 examples/l3fwd/l3fwd_event.c | 34 ++++++++++++++++++++++++++++++++++
 examples/l3fwd/l3fwd_event.h |  5 +++++
 examples/l3fwd/l3fwd_fib.c   | 10 ++++++++--
 examples/l3fwd/l3fwd_lpm.c   | 32 ++++++++++++++++++++++----------
 5 files changed, 91 insertions(+), 22 deletions(-)
  

Patch

diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 24d0910fe0..6f8d94f120 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -653,6 +653,7 @@  em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
 		evt_rsrc->evq.nb_queues - 1];
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
+	uint8_t deq = 0, enq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
 	struct rte_event ev;
@@ -665,7 +666,9 @@  em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 
 	RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
 	while (!force_quit) {
-		if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+		deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+					      0);
+		if (!deq)
 			continue;
 
 		struct rte_mbuf *mbuf = ev.mbuf;
@@ -684,19 +687,22 @@  em_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 		if (flags & L3FWD_EVENT_TX_ENQ) {
 			ev.queue_id = tx_q_id;
 			ev.op = RTE_EVENT_OP_FORWARD;
-			while (rte_event_enqueue_burst(event_d_id, event_p_id,
-						&ev, 1) && !force_quit)
-				;
+			do {
+				enq = rte_event_enqueue_burst(
+					event_d_id, event_p_id, &ev, 1);
+			} while (!enq && !force_quit);
 		}
 
 		if (flags & L3FWD_EVENT_TX_DIRECT) {
 			rte_event_eth_tx_adapter_txq_set(mbuf, 0);
-			while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
-						event_p_id, &ev, 1, 0) &&
-					!force_quit)
-				;
+			do {
+				enq = rte_event_eth_tx_adapter_enqueue(
+					event_d_id, event_p_id, &ev, 1, 0);
+			} while (!enq && !force_quit);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
 }
 
 static __rte_always_inline void
@@ -709,9 +715,9 @@  em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -769,6 +775,9 @@  em_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 						nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 0);
 }
 
 static __rte_always_inline void
@@ -832,9 +841,9 @@  em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -887,6 +896,9 @@  em_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 					nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 1);
 }
 
 int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c
index 7a401290f8..a14a21b414 100644
--- a/examples/l3fwd/l3fwd_event.c
+++ b/examples/l3fwd/l3fwd_event.c
@@ -287,3 +287,37 @@  l3fwd_event_resource_setup(struct rte_eth_conf *port_conf)
 		fib_event_loop[evt_rsrc->vector_enabled][evt_rsrc->tx_mode_q]
 			      [evt_rsrc->has_burst];
 }
+
+static void
+l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
+{
+	uint16_t i;
+
+	for (i = 0; i < num; i++) {
+		rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+				      events[i].vec->nb_elem);
+		rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+				events[i].vec);
+	}
+}
+
+void
+l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+			   struct rte_event events[], uint16_t nb_enq,
+			   uint16_t nb_deq, uint8_t is_vector)
+{
+	int i;
+
+	if (nb_deq) {
+		if (is_vector)
+			l3fwd_event_vector_array_free(events + nb_enq,
+						      nb_deq - nb_enq);
+		else
+			for (i = nb_enq; i < nb_deq; i++)
+				rte_pktmbuf_free(events[i].mbuf);
+
+		for (i = 0; i < nb_deq; i++)
+			events[i].op = RTE_EVENT_OP_RELEASE;
+		rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
+	}
+}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index f139632016..b93841a16f 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -103,10 +103,15 @@  event_vector_txq_set(struct rte_event_vector *vec, uint16_t txq)
 	}
 }
 
+
+
 struct l3fwd_event_resources *l3fwd_get_eventdev_rsrc(void);
 void l3fwd_event_resource_setup(struct rte_eth_conf *port_conf);
 int l3fwd_get_free_event_port(struct l3fwd_event_resources *eventdev_rsrc);
 void l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops);
 void l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops);
+void l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
+				struct rte_event events[], uint16_t nb_enq,
+				uint16_t nb_deq, uint8_t is_vector);
 
 #endif /* __L3FWD_EVENTDEV_H__ */
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6e0054b4cb..26d0767ae2 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -252,9 +252,9 @@  fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int nb_enq, nb_deq, i;
 
 	uint32_t ipv4_arr[MAX_PKT_BURST];
 	uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
@@ -370,6 +370,9 @@  fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
 						nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 0);
 }
 
 int __rte_noinline
@@ -491,7 +494,7 @@  fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
-	int nb_enq, nb_deq, i;
+	int nb_enq = 0, nb_deq = 0, i;
 
 	if (event_p_id < 0)
 		return;
@@ -538,6 +541,9 @@  fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 					nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 1);
 }
 
 int __rte_noinline
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index bec22c44cd..501fc5db5e 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -273,6 +273,7 @@  lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
 		evt_rsrc->evq.nb_queues - 1];
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
+	uint8_t enq = 0, deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
 	struct rte_event ev;
@@ -285,7 +286,9 @@  lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 
 	RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
 	while (!force_quit) {
-		if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
+		deq = rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1,
+					      0);
+		if (!deq)
 			continue;
 
 		if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
@@ -296,19 +299,22 @@  lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
 		if (flags & L3FWD_EVENT_TX_ENQ) {
 			ev.queue_id = tx_q_id;
 			ev.op = RTE_EVENT_OP_FORWARD;
-			while (rte_event_enqueue_burst(event_d_id, event_p_id,
-						&ev, 1) && !force_quit)
-				;
+			do {
+				enq = rte_event_enqueue_burst(
+					event_d_id, event_p_id, &ev, 1);
+			} while (!enq && !force_quit);
 		}
 
 		if (flags & L3FWD_EVENT_TX_DIRECT) {
 			rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
-			while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
-						event_p_id, &ev, 1, 0) &&
-					!force_quit)
-				;
+			do {
+				enq = rte_event_eth_tx_adapter_enqueue(
+					event_d_id, event_p_id, &ev, 1, 0);
+			} while (!enq && !force_quit);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, &ev, enq, deq, 0);
 }
 
 static __rte_always_inline void
@@ -321,9 +327,9 @@  lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -375,6 +381,9 @@  lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
 						nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 0);
 }
 
 static __rte_always_inline void
@@ -459,9 +468,9 @@  lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	int i, nb_enq = 0, nb_deq = 0;
 	struct lcore_conf *lconf;
 	unsigned int lcore_id;
-	int i, nb_enq, nb_deq;
 
 	if (event_p_id < 0)
 		return;
@@ -510,6 +519,9 @@  lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 					nb_deq - nb_enq, 0);
 		}
 	}
+
+	l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
+				   nb_deq, 1);
 }
 
 int __rte_noinline