[6/6] examples/ipsec-secgw: cleanup worker state before exit

Message ID 20220426211412.6138-6-pbhagavatula@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series [1/6] app/eventdev: simplify signal handling and teardown |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-abi-testing success Testing PASS

Commit Message

Pavan Nikhilesh Bhagavatula April 26, 2022, 9:14 p.m. UTC
  Event ports are configured to implicitly release the scheduler contexts
currently held in the next call to rte_event_dequeue_burst().
A worker core might still hold a scheduling context during exit as the
next call to rte_event_dequeue_burst() is never made.
This might lead to deadlock based on the worker exit timing and when
there are very less number of flows.

Add a cleanup function to release any scheduling contexts held by the
worker by using RTE_EVENT_OP_RELEASE.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 examples/ipsec-secgw/ipsec_worker.c | 40 ++++++++++++++++++++---------
 1 file changed, 28 insertions(+), 12 deletions(-)
  

Comments

Jerin Jacob May 13, 2022, 1:41 p.m. UTC | #1
On Wed, Apr 27, 2022 at 2:45 AM Pavan Nikhilesh
<pbhagavatula@marvell.com> wrote:
>
> Event ports are configured to implicitly release the scheduler contexts
> currently held in the next call to rte_event_dequeue_burst().
> A worker core might still hold a scheduling context during exit as the
> next call to rte_event_dequeue_burst() is never made.
> This might lead to deadlock based on the worker exit timing and when
> there are very less number of flows.
>
> Add a cleanup function to release any scheduling contexts held by the
> worker by using RTE_EVENT_OP_RELEASE.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>

Please fix following check path error in this series

### app/eventdev: simplify signal handling and teardown

WARNING:SPACING: space prohibited between function name and open parenthesis '('
#163: FILE: app/test-eventdev/test_perf_common.c:1112:
+               RTE_ETH_FOREACH_DEV (i) {

WARNING:SPACING: space prohibited between function name and open parenthesis '('
#234: FILE: app/test-eventdev/test_pipeline_common.c:515:
+               RTE_ETH_FOREACH_DEV (i) {

> ---
>  examples/ipsec-secgw/ipsec_worker.c | 40 ++++++++++++++++++++---------
>  1 file changed, 28 insertions(+), 12 deletions(-)
>
> diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
> index 8639426c5c..3df5acf384 100644
> --- a/examples/ipsec-secgw/ipsec_worker.c
> +++ b/examples/ipsec-secgw/ipsec_worker.c
> @@ -749,7 +749,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
>                 uint8_t nb_links)
>  {
>         struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
> -       unsigned int nb_rx = 0;
> +       unsigned int nb_rx = 0, nb_tx;
>         struct rte_mbuf *pkt;
>         struct rte_event ev;
>         uint32_t lcore_id;
> @@ -847,11 +847,19 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
>                  * directly enqueued to the adapter and it would be
>                  * internally submitted to the eth device.
>                  */
> -               rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> -                               links[0].event_port_id,
> -                               &ev,    /* events */
> -                               1,      /* nb_events */
> -                               0       /* flags */);
> +               nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> +                                                        links[0].event_port_id,
> +                                                        &ev, /* events */
> +                                                        1,   /* nb_events */
> +                                                        0 /* flags */);
> +               if (!nb_tx)
> +                       rte_pktmbuf_free(ev.mbuf);
> +       }
> +
> +       if (ev.u64) {
> +               ev.op = RTE_EVENT_OP_RELEASE;
> +               rte_event_enqueue_burst(links[0].eventdev_id,
> +                                       links[0].event_port_id, &ev, 1);
>         }
>  }
>
> @@ -864,7 +872,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
>                 uint8_t nb_links)
>  {
>         struct lcore_conf_ev_tx_int_port_wrkr lconf;
> -       unsigned int nb_rx = 0;
> +       unsigned int nb_rx = 0, nb_tx;
>         struct rte_event ev;
>         uint32_t lcore_id;
>         int32_t socket_id;
> @@ -952,11 +960,19 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
>                  * directly enqueued to the adapter and it would be
>                  * internally submitted to the eth device.
>                  */
> -               rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> -                               links[0].event_port_id,
> -                               &ev,    /* events */
> -                               1,      /* nb_events */
> -                               0       /* flags */);
> +               nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> +                                                        links[0].event_port_id,
> +                                                        &ev, /* events */
> +                                                        1,   /* nb_events */
> +                                                        0 /* flags */);
> +               if (!nb_tx)
> +                       rte_pktmbuf_free(ev.mbuf);
> +       }
> +
> +       if (ev.u64) {
> +               ev.op = RTE_EVENT_OP_RELEASE;
> +               rte_event_enqueue_burst(links[0].eventdev_id,
> +                                       links[0].event_port_id, &ev, 1);
>         }
>  }
>
> --
> 2.25.1
>
  

Patch

diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426c5c..3df5acf384 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -749,7 +749,7 @@  ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 		uint8_t nb_links)
 {
 	struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
-	unsigned int nb_rx = 0;
+	unsigned int nb_rx = 0, nb_tx;
 	struct rte_mbuf *pkt;
 	struct rte_event ev;
 	uint32_t lcore_id;
@@ -847,11 +847,19 @@  ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 		 * directly enqueued to the adapter and it would be
 		 * internally submitted to the eth device.
 		 */
-		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
-				links[0].event_port_id,
-				&ev,	/* events */
-				1,	/* nb_events */
-				0	/* flags */);
+		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+							 links[0].event_port_id,
+							 &ev, /* events */
+							 1,   /* nb_events */
+							 0 /* flags */);
+		if (!nb_tx)
+			rte_pktmbuf_free(ev.mbuf);
+	}
+
+	if (ev.u64) {
+		ev.op = RTE_EVENT_OP_RELEASE;
+		rte_event_enqueue_burst(links[0].eventdev_id,
+					links[0].event_port_id, &ev, 1);
 	}
 }
 
@@ -864,7 +872,7 @@  ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 		uint8_t nb_links)
 {
 	struct lcore_conf_ev_tx_int_port_wrkr lconf;
-	unsigned int nb_rx = 0;
+	unsigned int nb_rx = 0, nb_tx;
 	struct rte_event ev;
 	uint32_t lcore_id;
 	int32_t socket_id;
@@ -952,11 +960,19 @@  ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 		 * directly enqueued to the adapter and it would be
 		 * internally submitted to the eth device.
 		 */
-		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
-				links[0].event_port_id,
-				&ev,	/* events */
-				1,	/* nb_events */
-				0	/* flags */);
+		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+							 links[0].event_port_id,
+							 &ev, /* events */
+							 1,   /* nb_events */
+							 0 /* flags */);
+		if (!nb_tx)
+			rte_pktmbuf_free(ev.mbuf);
+	}
+
+	if (ev.u64) {
+		ev.op = RTE_EVENT_OP_RELEASE;
+		rte_event_enqueue_burst(links[0].eventdev_id,
+					links[0].event_port_id, &ev, 1);
 	}
 }