[5/7] examples/ipsec-secgw: get security context from lcore conf

Message ID 20220322175902.363520-5-ndabilpuram@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series [1/7] examples/ipsec-secgw: disable Tx chksum offload for inline |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Nithin Dabilpuram March 22, 2022, 5:58 p.m. UTC
  Store security context pointer in lcore Rx queue config and
get it from there in fast path for better performance.
Currently rte_eth_dev_get_sec_ctx() which is meant to be control
path API is called per packet basis. For every call to that
API, ethdev port status is checked.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 examples/ipsec-secgw/ipsec-secgw.c  | 22 +++++++++++++++++++---
 examples/ipsec-secgw/ipsec.h        |  1 +
 examples/ipsec-secgw/ipsec_worker.h | 17 +++++++----------
 3 files changed, 27 insertions(+), 13 deletions(-)
  

Patch

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 1d0ce3a..a04b5e8 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -544,11 +544,11 @@  process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
 
 static inline void
 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
-		uint8_t nb_pkts, uint16_t portid)
+	     uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx)
 {
 	struct ipsec_traffic traffic;
 
-	prepare_traffic(pkts, &traffic, nb_pkts);
+	prepare_traffic(ctx, pkts, &traffic, nb_pkts);
 
 	if (unlikely(single_sa)) {
 		if (is_unprotected_port(portid))
@@ -740,7 +740,8 @@  ipsec_poll_mode_worker(void)
 
 			if (nb_rx > 0) {
 				core_stats_update_rx(nb_rx);
-				process_pkts(qconf, pkts, nb_rx, portid);
+				process_pkts(qconf, pkts, nb_rx, portid,
+					     rxql->sec_ctx);
 			}
 
 			/* dequeue and process completed crypto-ops */
@@ -3060,6 +3061,21 @@  main(int32_t argc, char **argv)
 
 	flow_init();
 
+	/* Get security context if available and only if dynamic field is
+	 * registered for fast path access.
+	 */
+	if (!rte_security_dynfield_is_registered())
+		goto skip_sec_ctx;
+
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) {
+			portid = lcore_conf[lcore_id].rx_queue_list[i].port_id;
+			lcore_conf[lcore_id].rx_queue_list[i].sec_ctx =
+				rte_eth_dev_get_sec_ctx(portid);
+		}
+	}
+skip_sec_ctx:
+
 	check_all_ports_link_status(enabled_port_mask);
 
 	if (stats_interval > 0)
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 9a4e7ea..ecad262 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -269,6 +269,7 @@  struct cnt_blk {
 struct lcore_rx_queue {
 	uint16_t port_id;
 	uint8_t queue_id;
+	struct rte_security_ctx *sec_ctx;
 } __rte_cache_aligned;
 
 struct buffer {
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index eb966a6..838b3f6 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -115,7 +115,8 @@  adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
 }
 
 static __rte_always_inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt,
+		   struct ipsec_traffic *t)
 {
 	uint32_t ptype = pkt->packet_type;
 	const struct rte_ether_hdr *eth;
@@ -201,13 +202,9 @@  prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 	 * with the security session.
 	 */
 
-	if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
-			rte_security_dynfield_is_registered()) {
+	if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
 		struct ipsec_sa *sa;
 		struct ipsec_mbuf_metadata *priv;
-		struct rte_security_ctx *ctx = (struct rte_security_ctx *)
-						rte_eth_dev_get_sec_ctx(
-						pkt->port);
 
 		/* Retrieve the userdata registered. Here, the userdata
 		 * registered is the SA pointer.
@@ -229,8 +226,8 @@  prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 }
 
 static __rte_always_inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
-		uint16_t nb_pkts)
+prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts,
+		struct ipsec_traffic *t, uint16_t nb_pkts)
 {
 	int32_t i;
 
@@ -241,11 +238,11 @@  prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
 	for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
 					void *));
-		prepare_one_packet(pkts[i], t);
+		prepare_one_packet(ctx, pkts[i], t);
 	}
 	/* Process left packets */
 	for (; i < nb_pkts; i++)
-		prepare_one_packet(pkts[i], t);
+		prepare_one_packet(ctx, pkts[i], t);
 }
 
 static __rte_always_inline void