[2/6] examples/ipsec-secgw: add queue for event crypto adapter

Message ID 20220804103626.102688-3-vfialko@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series examples/ipsec-secgw: add lookaside event mode |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Volodymyr Fialko Aug. 4, 2022, 10:36 a.m. UTC
  Add separate event queue for event crypto adapter processing, to resolve
queue contention between new and already processed events.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 95 +++++++++++++++++++++--------
 examples/ipsec-secgw/event_helper.h |  2 +
 2 files changed, 71 insertions(+), 26 deletions(-)
  

Patch

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 6b00a21b6a..9c20a05da8 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -17,6 +17,8 @@ 
 #define DEFAULT_VECTOR_SIZE  16
 #define DEFAULT_VECTOR_TMO   102400
 
+#define INVALID_EV_QUEUE_ID -1
+
 static volatile bool eth_core_running;
 
 static int
@@ -151,11 +153,10 @@  eh_dev_has_burst_mode(uint8_t dev_id)
 }
 
 static int
-eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+eh_set_nb_eventdev(struct eventmode_conf *em_conf)
 {
-	int lcore_count, nb_eventdev, nb_eth_dev, ret;
 	struct eventdev_params *eventdev_config;
-	struct rte_event_dev_info dev_info;
+	int nb_eventdev;
 
 	/* Get the number of event devices */
 	nb_eventdev = rte_event_dev_count();
@@ -170,6 +171,23 @@  eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		return -EINVAL;
 	}
 
+	/* Set event dev id*/
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_config->eventdev_id = 0;
+
+	/* Update the number of event devices */
+	em_conf->nb_eventdev = 1;
+
+	return 0;
+}
+
+static int
+eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+{
+	int lcore_count, nb_eth_dev, ret;
+	struct eventdev_params *eventdev_config;
+	struct rte_event_dev_info dev_info;
+
 	/* Get the number of eth devs */
 	nb_eth_dev = rte_eth_dev_count_avail();
 	if (nb_eth_dev == 0) {
@@ -197,15 +215,30 @@  eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 	eventdev_config = &(em_conf->eventdev_config[0]);
 
 	/* Save number of queues & ports available */
-	eventdev_config->eventdev_id = 0;
-	eventdev_config->nb_eventqueue = dev_info.max_event_queues;
+	eventdev_config->nb_eventqueue = nb_eth_dev;
 	eventdev_config->nb_eventport = dev_info.max_event_ports;
 	eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
 
-	/* Check if there are more queues than required */
-	if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
-		/* One queue is reserved for Tx */
-		eventdev_config->nb_eventqueue = nb_eth_dev + 1;
+	/* One queue is reserved for Tx */
+	eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID;
+	if (eventdev_config->all_internal_ports) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->tx_queue_id =
+			eventdev_config->nb_eventqueue++;
+	}
+
+	/* One queue is reserved for event crypto adapter */
+	eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID;
+	if (em_conf->enable_event_crypto_adapter) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->ev_cpt_queue_id =
+			eventdev_config->nb_eventqueue++;
 	}
 
 	/* Check if there are more ports than required */
@@ -214,9 +247,6 @@  eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		eventdev_config->nb_eventport = lcore_count;
 	}
 
-	/* Update the number of event devices */
-	em_conf->nb_eventdev++;
-
 	return 0;
 }
 
@@ -245,15 +275,10 @@  eh_do_capability_check(struct eventmode_conf *em_conf)
 
 	/*
 	 * If Rx & Tx internal ports are supported by all event devices then
-	 * eth cores won't be required. Override the eth core mask requested
-	 * and decrement number of event queues by one as it won't be needed
-	 * for Tx.
+	 * eth cores won't be required. Override the eth core mask requested.
 	 */
-	if (all_internal_ports) {
+	if (all_internal_ports)
 		rte_bitmap_reset(em_conf->eth_core_mask);
-		for (i = 0; i < em_conf->nb_eventdev; i++)
-			em_conf->eventdev_config[i].nb_eventqueue--;
-	}
 }
 
 static int
@@ -370,6 +395,10 @@  eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 			eventdev_config->nb_eventqueue :
 			eventdev_config->nb_eventqueue - 1;
 
+	/* Reserve one queue for event crypto adapter */
+	if (em_conf->enable_event_crypto_adapter)
+		nb_eventqueue--;
+
 	/*
 	 * Map all queues of eth device (port) to an event queue. If there
 	 * are more event queues than eth ports then create 1:1 mapping.
@@ -541,14 +570,18 @@  eh_validate_conf(struct eventmode_conf *em_conf)
 	 * and initialize the config with all ports & queues available
 	 */
 	if (em_conf->nb_eventdev == 0) {
+		ret = eh_set_nb_eventdev(em_conf);
+		if (ret != 0)
+			return ret;
+		eh_do_capability_check(em_conf);
 		ret = eh_set_default_conf_eventdev(em_conf);
 		if (ret != 0)
 			return ret;
+	} else {
+		/* Perform capability check for the selected event devices */
+		eh_do_capability_check(em_conf);
 	}
 
-	/* Perform capability check for the selected event devices */
-	eh_do_capability_check(em_conf);
-
 	/*
 	 * Check if links are specified. Else generate a default config for
 	 * the event ports used.
@@ -594,8 +627,8 @@  eh_initialize_eventdev(struct eventmode_conf *em_conf)
 	uint8_t *queue = NULL;
 	uint8_t eventdev_id;
 	int nb_eventqueue;
-	uint8_t i, j;
-	int ret;
+	int ret, j;
+	uint8_t i;
 
 	for (i = 0; i < nb_eventdev; i++) {
 
@@ -657,14 +690,24 @@  eh_initialize_eventdev(struct eventmode_conf *em_conf)
 			 * stage if event device does not have internal
 			 * ports. This will be an atomic queue.
 			 */
-			if (!eventdev_config->all_internal_ports &&
-			    j == nb_eventqueue-1) {
+			if (j == eventdev_config->tx_queue_id) {
 				eventq_conf.schedule_type =
 					RTE_SCHED_TYPE_ATOMIC;
 			} else {
 				eventq_conf.schedule_type =
 					em_conf->ext_params.sched_type;
 			}
+			/*
+			 * Give event crypto device's queue higher priority then Rx queues. This
+			 * will allow crypto events to be processed with highest priority.
+			 */
+			if (j == eventdev_config->ev_cpt_queue_id) {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_HIGHEST;
+			} else {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_NORMAL;
+			}
 
 			/* Set max atomic flows to 1024 */
 			eventq_conf.nb_atomic_flows = 1024;
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index 4b26dc8fc2..af5cfcf794 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -88,6 +88,8 @@  struct eventdev_params {
 	uint8_t nb_eventport;
 	uint8_t ev_queue_mode;
 	uint8_t all_internal_ports;
+	int tx_queue_id;
+	int ev_cpt_queue_id;
 };
 
 /**