[v3,5/5] examples/ipsec-secgw: update ether type using tunnel info

Message ID 20220822143812.30010-5-ndabilpuram@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series [v3,1/5] mbuf: clarify meta data needed for Outbound Inline |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-x86_64-unit-testing fail Testing issues
ci/iol-x86_64-compile-testing fail Testing issues
ci/iol-aarch64-compile-testing success Testing PASS
ci/github-robot: build success github build: passed
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Nithin Dabilpuram Aug. 22, 2022, 2:38 p.m. UTC
  Update ether type for outbound SA processing based on tunnel header
information in both NEON functions for poll mode and event mode worker
functions.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
---
 examples/ipsec-secgw/ipsec_neon.h   | 41 +++++++++++++++++++++++++------------
 examples/ipsec-secgw/ipsec_worker.c | 30 +++++++++++++++++++--------
 2 files changed, 49 insertions(+), 22 deletions(-)
  

Patch

diff --git a/examples/ipsec-secgw/ipsec_neon.h b/examples/ipsec-secgw/ipsec_neon.h
index 3f2d0a0..9c0498b 100644
--- a/examples/ipsec-secgw/ipsec_neon.h
+++ b/examples/ipsec-secgw/ipsec_neon.h
@@ -18,12 +18,13 @@  extern xmm_t val_eth[RTE_MAX_ETHPORTS];
  */
 static inline void
 processx4_step3(struct rte_mbuf *pkts[FWDSTEP], uint16_t dst_port[FWDSTEP],
-		uint64_t tx_offloads, bool ip_cksum, uint8_t *l_pkt)
+		uint64_t tx_offloads, bool ip_cksum, bool is_ipv4, uint8_t *l_pkt)
 {
 	uint32x4_t te[FWDSTEP];
 	uint32x4_t ve[FWDSTEP];
 	uint32_t *p[FWDSTEP];
 	struct rte_mbuf *pkt;
+	uint32_t val;
 	uint8_t i;
 
 	for (i = 0; i < FWDSTEP; i++) {
@@ -38,7 +39,15 @@  processx4_step3(struct rte_mbuf *pkts[FWDSTEP], uint16_t dst_port[FWDSTEP],
 		te[i] = vld1q_u32(p[i]);
 
 		/* Update last 4 bytes */
-		ve[i] = vsetq_lane_u32(vgetq_lane_u32(te[i], 3), ve[i], 3);
+		val = vgetq_lane_u32(te[i], 3);
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+		val &= 0xFFFFUL << 16;
+		val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6);
+#else
+		val &= 0xFFFFUL;
+		val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6) << 16;
+#endif
+		ve[i] = vsetq_lane_u32(val, ve[i], 3);
 		vst1q_u32(p[i], ve[i]);
 
 		if (ip_cksum) {
@@ -64,10 +73,11 @@  processx4_step3(struct rte_mbuf *pkts[FWDSTEP], uint16_t dst_port[FWDSTEP],
  */
 static inline void
 process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint64_t tx_offloads,
-	       bool ip_cksum, uint8_t *l_pkt)
+	       bool ip_cksum, bool is_ipv4, uint8_t *l_pkt)
 {
 	struct rte_ether_hdr *eth_hdr;
 	uint32x4_t te, ve;
+	uint32_t val;
 
 	/* Check if it is a large packet */
 	if (pkt->pkt_len - RTE_ETHER_HDR_LEN > mtu_size)
@@ -78,7 +88,15 @@  process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint64_t tx_offloads,
 	te = vld1q_u32((uint32_t *)eth_hdr);
 	ve = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
 
-	ve = vcopyq_laneq_u32(ve, 3, te, 3);
+	val = vgetq_lane_u32(te, 3);
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+	val &= 0xFFFFUL << 16;
+	val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6);
+#else
+	val &= 0xFFFFUL;
+	val |= rte_cpu_to_be_16(is_ipv4 ? RTE_ETHER_TYPE_IPV4 : RTE_ETHER_TYPE_IPV6) << 16;
+#endif
+	ve = vsetq_lane_u32(val, ve, 3);
 	vst1q_u32((uint32_t *)eth_hdr, ve);
 
 	if (ip_cksum) {
@@ -223,14 +241,14 @@  send_multi_pkts(struct rte_mbuf **pkts, uint16_t dst_port[MAX_PKT_BURST],
 		lp = pnum;
 		lp[0] = 1;
 
-		processx4_step3(pkts, dst_port, tx_offloads, ip_cksum, &l_pkt);
+		processx4_step3(pkts, dst_port, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
 
 		/* dp1: <d[0], d[1], d[2], d[3], ... > */
 		dp1 = vld1q_u16(dst_port);
 
 		for (i = FWDSTEP; i != k; i += FWDSTEP) {
-			processx4_step3(&pkts[i], &dst_port[i], tx_offloads,
-					ip_cksum, &l_pkt);
+			processx4_step3(&pkts[i], &dst_port[i], tx_offloads, ip_cksum, is_ipv4,
+					&l_pkt);
 
 			/*
 			 * dp2:
@@ -268,20 +286,17 @@  send_multi_pkts(struct rte_mbuf **pkts, uint16_t dst_port[MAX_PKT_BURST],
 	/* Process up to last 3 packets one by one. */
 	switch (nb_rx % FWDSTEP) {
 	case 3:
-		process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum,
-			       &l_pkt);
+		process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
 		GROUP_PORT_STEP(dlp, dst_port, lp, pnum, i);
 		i++;
 		/* fallthrough */
 	case 2:
-		process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum,
-			       &l_pkt);
+		process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
 		GROUP_PORT_STEP(dlp, dst_port, lp, pnum, i);
 		i++;
 		/* fallthrough */
 	case 1:
-		process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum,
-			       &l_pkt);
+		process_packet(pkts[i], dst_port + i, tx_offloads, ip_cksum, is_ipv4, &l_pkt);
 		GROUP_PORT_STEP(dlp, dst_port, lp, pnum, i);
 	}
 
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 803157d..5e69450 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -53,11 +53,8 @@  process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
 }
 
 static inline void
-update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
+update_mac_addrs(struct rte_ether_hdr *ethhdr, uint16_t portid)
 {
-	struct rte_ether_hdr *ethhdr;
-
-	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
 }
@@ -374,7 +371,7 @@  process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* else, we have a matching route */
 
 	/* Update mac addresses */
-	update_mac_addrs(pkt, port_id);
+	update_mac_addrs(rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *), port_id);
 
 	/* Update the event with the dest port */
 	ipsec_event_pre_forward(pkt, port_id);
@@ -392,6 +389,7 @@  process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 		struct rte_event *ev)
 {
 	struct rte_ipsec_session *sess;
+	struct rte_ether_hdr *ethhdr;
 	struct sa_ctx *sa_ctx;
 	struct rte_mbuf *pkt;
 	uint16_t port_id = 0;
@@ -430,6 +428,7 @@  process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 		goto drop_pkt_and_exit;
 	}
 
+	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
 	/* Check if the packet has to be bypassed */
 	if (sa_idx == BYPASS) {
 		port_id = get_route(pkt, rt, type);
@@ -467,6 +466,9 @@  process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 	/* Mark the packet for Tx security offload */
 	pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
+	/* Update ether type */
+	ethhdr->ether_type = (IS_IP4(sa->flags) ? rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
+			      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
 
 	/* Get the port to which this pkt need to be submitted */
 	port_id = sa->portid;
@@ -476,7 +478,7 @@  process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	pkt->l2_len = RTE_ETHER_HDR_LEN;
 
 	/* Update mac addresses */
-	update_mac_addrs(pkt, port_id);
+	update_mac_addrs(ethhdr, port_id);
 
 	/* Update the event with the dest port */
 	ipsec_event_pre_forward(pkt, port_id);
@@ -494,6 +496,7 @@  ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
 {
 	struct rte_ipsec_session *sess;
+	struct rte_ether_hdr *ethhdr;
 	uint32_t sa_idx, i, j = 0;
 	uint16_t port_id = 0;
 	struct rte_mbuf *pkt;
@@ -505,7 +508,8 @@  ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 		port_id = route4_pkt(pkt, rt->rt4_ctx);
 		if (port_id != RTE_MAX_ETHPORTS) {
 			/* Update mac addresses */
-			update_mac_addrs(pkt, port_id);
+			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+			update_mac_addrs(ethhdr, port_id);
 			/* Update the event with the dest port */
 			ipsec_event_pre_forward(pkt, port_id);
 			ev_vector_attr_update(vec, pkt);
@@ -520,7 +524,8 @@  ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 		port_id = route6_pkt(pkt, rt->rt6_ctx);
 		if (port_id != RTE_MAX_ETHPORTS) {
 			/* Update mac addresses */
-			update_mac_addrs(pkt, port_id);
+			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+			update_mac_addrs(ethhdr, port_id);
 			/* Update the event with the dest port */
 			ipsec_event_pre_forward(pkt, port_id);
 			ev_vector_attr_update(vec, pkt);
@@ -553,7 +558,14 @@  ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 
 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 			port_id = sa->portid;
-			update_mac_addrs(pkt, port_id);
+
+			/* Fetch outer ip type and update */
+			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+			ethhdr->ether_type = (IS_IP4(sa->flags) ?
+					      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
+					      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
+			update_mac_addrs(ethhdr, port_id);
+
 			ipsec_event_pre_forward(pkt, port_id);
 			ev_vector_attr_update(vec, pkt);
 			vec->mbufs[j++] = pkt;