[v4,12/15] examples/ipsec-secgw: add app mode worker

Message ID 1582185727-6749-13-git-send-email-lbartosik@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series add eventmode to ipsec-secgw |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Lukas Bartosik [C] Feb. 20, 2020, 8:02 a.m. UTC
  Add application inbound/outbound worker thread and
IPsec application processing code for event mode.

Example ipsec-secgw command in app mode:
ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128
-w 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
--log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
-f aes-gcm.cfg --transfer-mode event --event-schedule-type parallel

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
---
 examples/ipsec-secgw/ipsec-secgw.c  |  31 +--
 examples/ipsec-secgw/ipsec-secgw.h  |  63 ++++++
 examples/ipsec-secgw/ipsec.h        |  16 --
 examples/ipsec-secgw/ipsec_worker.c | 424 +++++++++++++++++++++++++++++++++++-
 examples/ipsec-secgw/ipsec_worker.h |  35 +++
 5 files changed, 521 insertions(+), 48 deletions(-)
 create mode 100644 examples/ipsec-secgw/ipsec_worker.h
  

Comments

Akhil Goyal Feb. 24, 2020, 2:13 p.m. UTC | #1
Hi Lukasz/Anoob,

> 
> Add application inbound/outbound worker thread and
> IPsec application processing code for event mode.
> 
> Example ipsec-secgw command in app mode:
> ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128
> -w 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
> --log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
> -f aes-gcm.cfg --transfer-mode event --event-schedule-type parallel
> 
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
> ---

...

> +static inline enum pkt_type
> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
> +{
> +	struct rte_ether_hdr *eth;
> +
> +	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> +	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> +				offsetof(struct ip, ip_p));
> +		if (**nlp == IPPROTO_ESP)
> +			return PKT_TYPE_IPSEC_IPV4;
> +		else
> +			return PKT_TYPE_PLAIN_IPV4;
> +	} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
> {
> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> +				offsetof(struct ip6_hdr, ip6_nxt));
> +		if (**nlp == IPPROTO_ESP)
> +			return PKT_TYPE_IPSEC_IPV6;
> +		else
> +			return PKT_TYPE_PLAIN_IPV6;
> +	}
> +
> +	/* Unknown/Unsupported type */
> +	return PKT_TYPE_INVALID;
> +}
> +
> +static inline void
> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
> +{
> +	struct rte_ether_hdr *ethhdr;
> +
> +	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> +	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src,
> RTE_ETHER_ADDR_LEN);
> +	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst,
> RTE_ETHER_ADDR_LEN);
> +}
> 
>  static inline void
>  ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
> @@ -61,6 +101,290 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
>  	}
>  }
> 
> +static inline int
> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
> +{
> +	uint32_t res;
> +
> +	if (unlikely(sp == NULL))
> +		return 0;
> +
> +	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
> +			DEFAULT_MAX_CATEGORIES);
> +
> +	if (unlikely(res == 0)) {
> +		/* No match */
> +		return 0;
> +	}
> +
> +	if (res == DISCARD)
> +		return 0;
> +	else if (res == BYPASS) {
> +		*sa_idx = -1;
> +		return 1;
> +	}
> +
> +	*sa_idx = res - 1;
> +	return 1;
> +}
> +
> +static inline uint16_t
> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
> +{
> +	uint32_t dst_ip;
> +	uint16_t offset;
> +	uint32_t hop;
> +	int ret;
> +
> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
> +	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
> +	dst_ip = rte_be_to_cpu_32(dst_ip);
> +
> +	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
> +
> +	if (ret == 0) {
> +		/* We have a hit */
> +		return hop;
> +	}
> +
> +	/* else */
> +	return RTE_MAX_ETHPORTS;
> +}
> +
> +/* TODO: To be tested */
> +static inline uint16_t
> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
> +{
> +	uint8_t dst_ip[16];
> +	uint8_t *ip6_dst;
> +	uint16_t offset;
> +	uint32_t hop;
> +	int ret;
> +
> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
> +	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
> +	memcpy(&dst_ip[0], ip6_dst, 16);
> +
> +	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
> +
> +	if (ret == 0) {
> +		/* We have a hit */
> +		return hop;
> +	}
> +
> +	/* else */
> +	return RTE_MAX_ETHPORTS;
> +}
> +
> +static inline uint16_t
> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
> +{
> +	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
> +		return route4_pkt(pkt, rt->rt4_ctx);
> +	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
> +		return route6_pkt(pkt, rt->rt6_ctx);
> +
> +	return RTE_MAX_ETHPORTS;
> +}

Is it not possible to use the existing functions for finding routes, checking packet types and checking security policies.
It will be very difficult to manage two separate functions for same work. I can see that the pkt->data_offs 
Are not required to be updated in the inline case, but can we split the existing functions in two so that they can be 
Called in the appropriate cases.

As you have said in the cover note as well to add lookaside protocol support. I also tried adding it, and it will get very
Difficult to manage separate functions for separate code paths.

> +
> +static inline int
> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> +		struct rte_event *ev)
> +{
> +	struct ipsec_sa *sa = NULL;
> +	struct rte_mbuf *pkt;
> +	uint16_t port_id = 0;
> +	enum pkt_type type;
> +	uint32_t sa_idx;
> +	uint8_t *nlp;
> +
> +	/* Get pkt from event */
> +	pkt = ev->mbuf;
> +
> +	/* Check the packet type */
> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> +
> +	switch (type) {
> +	case PKT_TYPE_PLAIN_IPV4:
> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> +			if (unlikely(pkt->ol_flags &
> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> +				RTE_LOG(ERR, IPSEC,
> +					"Inbound security offload failed\n");
> +				goto drop_pkt_and_exit;
> +			}
> +			sa = pkt->userdata;
> +		}
> +
> +		/* Check if we have a match */
> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> +			/* No valid match */
> +			goto drop_pkt_and_exit;
> +		}
> +		break;
> +
> +	case PKT_TYPE_PLAIN_IPV6:
> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> +			if (unlikely(pkt->ol_flags &
> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> +				RTE_LOG(ERR, IPSEC,
> +					"Inbound security offload failed\n");
> +				goto drop_pkt_and_exit;
> +			}
> +			sa = pkt->userdata;
> +		}
> +
> +		/* Check if we have a match */
> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> +			/* No valid match */
> +			goto drop_pkt_and_exit;
> +		}
> +		break;
> +
> +	default:
> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
> +		goto drop_pkt_and_exit;
> +	}
> +
> +	/* Check if the packet has to be bypassed */
> +	if (sa_idx == BYPASS)
> +		goto route_and_send_pkt;
> +
> +	/* Validate sa_idx */
> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> +		goto drop_pkt_and_exit;
> +
> +	/* Else the packet has to be protected with SA */
> +
> +	/* If the packet was IPsec processed, then SA pointer should be set */
> +	if (sa == NULL)
> +		goto drop_pkt_and_exit;
> +
> +	/* SPI on the packet should match with the one in SA */
> +	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
> +		goto drop_pkt_and_exit;
> +
> +route_and_send_pkt:
> +	port_id = get_route(pkt, rt, type);
> +	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> +		/* no match */
> +		goto drop_pkt_and_exit;
> +	}
> +	/* else, we have a matching route */
> +
> +	/* Update mac addresses */
> +	update_mac_addrs(pkt, port_id);
> +
> +	/* Update the event with the dest port */
> +	ipsec_event_pre_forward(pkt, port_id);
> +	return 1;
> +
> +drop_pkt_and_exit:
> +	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
> +	rte_pktmbuf_free(pkt);
> +	ev->mbuf = NULL;
> +	return 0;
> +}
> +
> +static inline int
> +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
> +		struct rte_event *ev)
> +{
> +	struct rte_ipsec_session *sess;
> +	struct sa_ctx *sa_ctx;
> +	struct rte_mbuf *pkt;
> +	uint16_t port_id = 0;
> +	struct ipsec_sa *sa;
> +	enum pkt_type type;
> +	uint32_t sa_idx;
> +	uint8_t *nlp;
> +
> +	/* Get pkt from event */
> +	pkt = ev->mbuf;
> +
> +	/* Check the packet type */
> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> +
> +	switch (type) {
> +	case PKT_TYPE_PLAIN_IPV4:
> +		/* Check if we have a match */
> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> +			/* No valid match */
> +			goto drop_pkt_and_exit;
> +		}
> +		break;
> +	case PKT_TYPE_PLAIN_IPV6:
> +		/* Check if we have a match */
> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> +			/* No valid match */
> +			goto drop_pkt_and_exit;
> +		}
> +		break;
> +	default:
> +		/*
> +		 * Only plain IPv4 & IPv6 packets are allowed
> +		 * on protected port. Drop the rest.
> +		 */
> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
> +		goto drop_pkt_and_exit;
> +	}
> +
> +	/* Check if the packet has to be bypassed */
> +	if (sa_idx == BYPASS) {
> +		port_id = get_route(pkt, rt, type);
> +		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> +			/* no match */
> +			goto drop_pkt_and_exit;
> +		}
> +		/* else, we have a matching route */
> +		goto send_pkt;
> +	}
> +
> +	/* Validate sa_idx */
> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> +		goto drop_pkt_and_exit;
> +
> +	/* Else the packet has to be protected */
> +
> +	/* Get SA ctx*/
> +	sa_ctx = ctx->sa_ctx;
> +
> +	/* Get SA */
> +	sa = &(sa_ctx->sa[sa_idx]);
> +
> +	/* Get IPsec session */
> +	sess = ipsec_get_primary_session(sa);
> +
> +	/* Allow only inline protocol for now */
> +	if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
> +		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
> +		goto drop_pkt_and_exit;
> +	}
> +
> +	if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
> +		pkt->userdata = sess->security.ses;
> +
> +	/* Mark the packet for Tx security offload */
> +	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
> +
> +	/* Get the port to which this pkt need to be submitted */
> +	port_id = sa->portid;
> +
> +send_pkt:
> +	/* Update mac addresses */
> +	update_mac_addrs(pkt, port_id);
> +
> +	/* Update the event with the dest port */
> +	ipsec_event_pre_forward(pkt, port_id);

How is IP checksum getting updated for the processed packet.
If the hardware is not updating it, should we add a fallback mechanism for SW based
Checksum update.

> +	return 1;

It will be better to use some MACROS while returning
Like
#define PKT_FORWARD   1
#define PKT_DROPPED     0
#define PKT_POSTED       2  /*may be for lookaside cases */

> +
> +drop_pkt_and_exit:
> +	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
> +	rte_pktmbuf_free(pkt);
> +	ev->mbuf = NULL;
> +	return 0;
> +}
> +
>  /*
>   * Event mode exposes various operating modes depending on the
>   * capabilities of the event device and the operating mode
> @@ -68,7 +392,7 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
>   */
> 
>  /* Workers registered */
> -#define IPSEC_EVENTMODE_WORKERS		1
> +#define IPSEC_EVENTMODE_WORKERS		2
> 
>  /*
>   * Event mode worker
> @@ -146,7 +470,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct
> eh_event_link_info *links,
>  			}
> 
>  			/* Save security session */
> -			pkt->udata64 = (uint64_t) sess_tbl[port_id];
> +			pkt->userdata = sess_tbl[port_id];
> 
>  			/* Mark the packet for Tx security offload */
>  			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
> @@ -165,6 +489,94 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct
> eh_event_link_info *links,
>  	}
>  }
> 
> +/*
> + * Event mode worker
> + * Operating parameters : non-burst - Tx internal port - app mode
> + */
> +static void
> +ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
> +		uint8_t nb_links)
> +{
> +	struct lcore_conf_ev_tx_int_port_wrkr lconf;
> +	unsigned int nb_rx = 0;
> +	struct rte_event ev;
> +	uint32_t lcore_id;
> +	int32_t socket_id;
> +	int ret;
> +
> +	/* Check if we have links registered for this lcore */
> +	if (nb_links == 0) {
> +		/* No links registered - exit */
> +		return;
> +	}
> +
> +	/* We have valid links */
> +
> +	/* Get core ID */
> +	lcore_id = rte_lcore_id();
> +
> +	/* Get socket ID */
> +	socket_id = rte_lcore_to_socket_id(lcore_id);
> +
> +	/* Save routing table */
> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> +	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;

Session_priv_pool should also be added for both inbound and outbound

> +	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
> +	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
> +	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
> +	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
> +
> +	RTE_LOG(INFO, IPSEC,
> +		"Launching event mode worker (non-burst - Tx internal port - "
> +		"app mode) on lcore %d\n", lcore_id);
> +
> +	/* Check if it's single link */
> +	if (nb_links != 1) {
> +		RTE_LOG(INFO, IPSEC,
> +			"Multiple links not supported. Using first link\n");
> +	}
> +
> +	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
> +		links[0].event_port_id);
> +
> +	while (!force_quit) {
> +		/* Read packet from event queues */
> +		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
> +				links[0].event_port_id,
> +				&ev,     /* events */
> +				1,       /* nb_events */
> +				0        /* timeout_ticks */);
> +
> +		if (nb_rx == 0)
> +			continue;
> +

Event type should be checked here before dereferencing it.

> +		if (is_unprotected_port(ev.mbuf->port))
> +			ret = process_ipsec_ev_inbound(&lconf.inbound,
> +							&lconf.rt, &ev);
> +		else
> +			ret = process_ipsec_ev_outbound(&lconf.outbound,
> +							&lconf.rt, &ev);
> +		if (ret != 1)
> +			/* The pkt has been dropped */
> +			continue;
> +
> +		/*
> +		 * Since tx internal port is available, events can be
> +		 * directly enqueued to the adapter and it would be
> +		 * internally submitted to the eth device.
> +		 */
> +		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> +				links[0].event_port_id,
> +				&ev,	/* events */
> +				1,	/* nb_events */
> +				0	/* flags */);
> +	}
> +}
> +
>  static uint8_t
>  ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params
> *wrkrs)
>  {
> @@ -180,6 +592,14 @@ ipsec_eventmode_populate_wrkr_params(struct
> eh_app_worker_params *wrkrs)
>  	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
>  	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
>  	wrkr++;
> +	nb_wrkr_param++;
> +
> +	/* Non-burst - Tx internal port - app mode */
> +	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
> +	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
> +	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
> +	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
> +	nb_wrkr_param++;
> 
>  	return nb_wrkr_param;
>  }
> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-
> secgw/ipsec_worker.h
> new file mode 100644
> index 0000000..87b4f22
> --- /dev/null
> +++ b/examples/ipsec-secgw/ipsec_worker.h
> @@ -0,0 +1,35 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (C) 2020 Marvell International Ltd.
> + */
> +#ifndef _IPSEC_WORKER_H_
> +#define _IPSEC_WORKER_H_
> +
> +#include "ipsec.h"
> +
> +enum pkt_type {
> +	PKT_TYPE_PLAIN_IPV4 = 1,
> +	PKT_TYPE_IPSEC_IPV4,
> +	PKT_TYPE_PLAIN_IPV6,
> +	PKT_TYPE_IPSEC_IPV6,
> +	PKT_TYPE_INVALID
> +};
> +
> +struct route_table {
> +	struct rt_ctx *rt4_ctx;
> +	struct rt_ctx *rt6_ctx;
> +};
> +
> +/*
> + * Conf required by event mode worker with tx internal port
> + */
> +struct lcore_conf_ev_tx_int_port_wrkr {
> +	struct ipsec_ctx inbound;
> +	struct ipsec_ctx outbound;
> +	struct route_table rt;
> +} __rte_cache_aligned;
> +
> +void ipsec_poll_mode_worker(void);
> +
> +int ipsec_launch_one_lcore(void *args);
> +
> +#endif /* _IPSEC_WORKER_H_ */
> --
> 2.7.4
  
Lukas Bartosik [C] Feb. 25, 2020, 11:50 a.m. UTC | #2
Hi Akhil,

Please see my answers below.

Thanks,
Lukasz

On 24.02.2020 15:13, Akhil Goyal wrote:
> External Email
> 
> ----------------------------------------------------------------------
> Hi Lukasz/Anoob,
> 
>>
>> Add application inbound/outbound worker thread and
>> IPsec application processing code for event mode.
>>
>> Example ipsec-secgw command in app mode:
>> ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128
>> -w 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
>> --log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
>> -f aes-gcm.cfg --transfer-mode event --event-schedule-type parallel
>>
>> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
>> Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
>> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
>> ---
> 
> ...
> 
>> +static inline enum pkt_type
>> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
>> +{
>> +	struct rte_ether_hdr *eth;
>> +
>> +	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
>> +	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
>> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
>> +				offsetof(struct ip, ip_p));
>> +		if (**nlp == IPPROTO_ESP)
>> +			return PKT_TYPE_IPSEC_IPV4;
>> +		else
>> +			return PKT_TYPE_PLAIN_IPV4;
>> +	} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
>> {
>> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
>> +				offsetof(struct ip6_hdr, ip6_nxt));
>> +		if (**nlp == IPPROTO_ESP)
>> +			return PKT_TYPE_IPSEC_IPV6;
>> +		else
>> +			return PKT_TYPE_PLAIN_IPV6;
>> +	}
>> +
>> +	/* Unknown/Unsupported type */
>> +	return PKT_TYPE_INVALID;
>> +}
>> +
>> +static inline void
>> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
>> +{
>> +	struct rte_ether_hdr *ethhdr;
>> +
>> +	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
>> +	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src,
>> RTE_ETHER_ADDR_LEN);
>> +	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst,
>> RTE_ETHER_ADDR_LEN);
>> +}
>>
>>  static inline void
>>  ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
>> @@ -61,6 +101,290 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
>>  	}
>>  }
>>
>> +static inline int
>> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
>> +{
>> +	uint32_t res;
>> +
>> +	if (unlikely(sp == NULL))
>> +		return 0;
>> +
>> +	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
>> +			DEFAULT_MAX_CATEGORIES);
>> +
>> +	if (unlikely(res == 0)) {
>> +		/* No match */
>> +		return 0;
>> +	}
>> +
>> +	if (res == DISCARD)
>> +		return 0;
>> +	else if (res == BYPASS) {
>> +		*sa_idx = -1;
>> +		return 1;
>> +	}
>> +
>> +	*sa_idx = res - 1;
>> +	return 1;
>> +}
>> +
>> +static inline uint16_t
>> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
>> +{
>> +	uint32_t dst_ip;
>> +	uint16_t offset;
>> +	uint32_t hop;
>> +	int ret;
>> +
>> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
>> +	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
>> +	dst_ip = rte_be_to_cpu_32(dst_ip);
>> +
>> +	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
>> +
>> +	if (ret == 0) {
>> +		/* We have a hit */
>> +		return hop;
>> +	}
>> +
>> +	/* else */
>> +	return RTE_MAX_ETHPORTS;
>> +}
>> +
>> +/* TODO: To be tested */
>> +static inline uint16_t
>> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
>> +{
>> +	uint8_t dst_ip[16];
>> +	uint8_t *ip6_dst;
>> +	uint16_t offset;
>> +	uint32_t hop;
>> +	int ret;
>> +
>> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
>> +	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
>> +	memcpy(&dst_ip[0], ip6_dst, 16);
>> +
>> +	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
>> +
>> +	if (ret == 0) {
>> +		/* We have a hit */
>> +		return hop;
>> +	}
>> +
>> +	/* else */
>> +	return RTE_MAX_ETHPORTS;
>> +}
>> +
>> +static inline uint16_t
>> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
>> +{
>> +	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
>> +		return route4_pkt(pkt, rt->rt4_ctx);
>> +	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
>> +		return route6_pkt(pkt, rt->rt6_ctx);
>> +
>> +	return RTE_MAX_ETHPORTS;
>> +}
> 
> Is it not possible to use the existing functions for finding routes, checking packet types and checking security policies.
> It will be very difficult to manage two separate functions for same work. I can see that the pkt->data_offs 
> Are not required to be updated in the inline case, but can we split the existing functions in two so that they can be 
> Called in the appropriate cases.
> 
> As you have said in the cover note as well to add lookaside protocol support. I also tried adding it, and it will get very
> Difficult to manage separate functions for separate code paths.
> 

[Lukasz] This was also Konstantin's comment during review of one of previous revisions. 
The prepare_one_packet() and prepare_tx_pkt() do much more than we need and for performance reasons
we crafted new functions. For example, process_ipsec_get_pkt_type function returns nlp and whether
packet type is plain or IPsec. That's all. Prepare_one_packet() process packets in chunks and does much more -
it adjusts mbuf and packet length then it demultiplex packets into plain and IPsec flows and finally does 
inline checks. This is similar for update_mac_addrs() vs prepare_tx_pkt() and check_sp() vs inbound_sp_sa()
that prepare_tx_pkt() and inbound_sp_sa() do more that we need in event mode.
 
I understand your concern from the perspective of code maintenance but on the other hand we are concerned with performance. 
The current code is not optimized to support multiple mode processing introduced with rte_security. We can work on a common 
routines once we have other modes also added, so that we can come up with a better solution than what we have today.

>> +
>> +static inline int
>> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
>> +		struct rte_event *ev)
>> +{
>> +	struct ipsec_sa *sa = NULL;
>> +	struct rte_mbuf *pkt;
>> +	uint16_t port_id = 0;
>> +	enum pkt_type type;
>> +	uint32_t sa_idx;
>> +	uint8_t *nlp;
>> +
>> +	/* Get pkt from event */
>> +	pkt = ev->mbuf;
>> +
>> +	/* Check the packet type */
>> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
>> +
>> +	switch (type) {
>> +	case PKT_TYPE_PLAIN_IPV4:
>> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
>> +			if (unlikely(pkt->ol_flags &
>> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
>> +				RTE_LOG(ERR, IPSEC,
>> +					"Inbound security offload failed\n");
>> +				goto drop_pkt_and_exit;
>> +			}
>> +			sa = pkt->userdata;
>> +		}
>> +
>> +		/* Check if we have a match */
>> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
>> +			/* No valid match */
>> +			goto drop_pkt_and_exit;
>> +		}
>> +		break;
>> +
>> +	case PKT_TYPE_PLAIN_IPV6:
>> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
>> +			if (unlikely(pkt->ol_flags &
>> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
>> +				RTE_LOG(ERR, IPSEC,
>> +					"Inbound security offload failed\n");
>> +				goto drop_pkt_and_exit;
>> +			}
>> +			sa = pkt->userdata;
>> +		}
>> +
>> +		/* Check if we have a match */
>> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
>> +			/* No valid match */
>> +			goto drop_pkt_and_exit;
>> +		}
>> +		break;
>> +
>> +	default:
>> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
>> +		goto drop_pkt_and_exit;
>> +	}
>> +
>> +	/* Check if the packet has to be bypassed */
>> +	if (sa_idx == BYPASS)
>> +		goto route_and_send_pkt;
>> +
>> +	/* Validate sa_idx */
>> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
>> +		goto drop_pkt_and_exit;
>> +
>> +	/* Else the packet has to be protected with SA */
>> +
>> +	/* If the packet was IPsec processed, then SA pointer should be set */
>> +	if (sa == NULL)
>> +		goto drop_pkt_and_exit;
>> +
>> +	/* SPI on the packet should match with the one in SA */
>> +	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
>> +		goto drop_pkt_and_exit;
>> +
>> +route_and_send_pkt:
>> +	port_id = get_route(pkt, rt, type);
>> +	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
>> +		/* no match */
>> +		goto drop_pkt_and_exit;
>> +	}
>> +	/* else, we have a matching route */
>> +
>> +	/* Update mac addresses */
>> +	update_mac_addrs(pkt, port_id);
>> +
>> +	/* Update the event with the dest port */
>> +	ipsec_event_pre_forward(pkt, port_id);
>> +	return 1;
>> +
>> +drop_pkt_and_exit:
>> +	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
>> +	rte_pktmbuf_free(pkt);
>> +	ev->mbuf = NULL;
>> +	return 0;
>> +}
>> +
>> +static inline int
>> +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
>> +		struct rte_event *ev)
>> +{
>> +	struct rte_ipsec_session *sess;
>> +	struct sa_ctx *sa_ctx;
>> +	struct rte_mbuf *pkt;
>> +	uint16_t port_id = 0;
>> +	struct ipsec_sa *sa;
>> +	enum pkt_type type;
>> +	uint32_t sa_idx;
>> +	uint8_t *nlp;
>> +
>> +	/* Get pkt from event */
>> +	pkt = ev->mbuf;
>> +
>> +	/* Check the packet type */
>> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
>> +
>> +	switch (type) {
>> +	case PKT_TYPE_PLAIN_IPV4:
>> +		/* Check if we have a match */
>> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
>> +			/* No valid match */
>> +			goto drop_pkt_and_exit;
>> +		}
>> +		break;
>> +	case PKT_TYPE_PLAIN_IPV6:
>> +		/* Check if we have a match */
>> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
>> +			/* No valid match */
>> +			goto drop_pkt_and_exit;
>> +		}
>> +		break;
>> +	default:
>> +		/*
>> +		 * Only plain IPv4 & IPv6 packets are allowed
>> +		 * on protected port. Drop the rest.
>> +		 */
>> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
>> +		goto drop_pkt_and_exit;
>> +	}
>> +
>> +	/* Check if the packet has to be bypassed */
>> +	if (sa_idx == BYPASS) {
>> +		port_id = get_route(pkt, rt, type);
>> +		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
>> +			/* no match */
>> +			goto drop_pkt_and_exit;
>> +		}
>> +		/* else, we have a matching route */
>> +		goto send_pkt;
>> +	}
>> +
>> +	/* Validate sa_idx */
>> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
>> +		goto drop_pkt_and_exit;
>> +
>> +	/* Else the packet has to be protected */
>> +
>> +	/* Get SA ctx*/
>> +	sa_ctx = ctx->sa_ctx;
>> +
>> +	/* Get SA */
>> +	sa = &(sa_ctx->sa[sa_idx]);
>> +
>> +	/* Get IPsec session */
>> +	sess = ipsec_get_primary_session(sa);
>> +
>> +	/* Allow only inline protocol for now */
>> +	if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
>> +		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
>> +		goto drop_pkt_and_exit;
>> +	}
>> +
>> +	if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
>> +		pkt->userdata = sess->security.ses;
>> +
>> +	/* Mark the packet for Tx security offload */
>> +	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
>> +
>> +	/* Get the port to which this pkt need to be submitted */
>> +	port_id = sa->portid;
>> +
>> +send_pkt:
>> +	/* Update mac addresses */
>> +	update_mac_addrs(pkt, port_id);
>> +
>> +	/* Update the event with the dest port */
>> +	ipsec_event_pre_forward(pkt, port_id);
> 
> How is IP checksum getting updated for the processed packet.
> If the hardware is not updating it, should we add a fallback mechanism for SW based
> Checksum update.
> 

[Lukasz] In case of outbound inline protocol checksum has to be calculated by HW as final 
packet is formed by crypto device. There is no need to calculate it in SW. 

>> +	return 1;
> 
> It will be better to use some MACROS while returning
> Like
> #define PKT_FORWARD   1
> #define PKT_DROPPED     0
> #define PKT_POSTED       2  /*may be for lookaside cases */
> 
>> +
>> +drop_pkt_and_exit:
>> +	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
>> +	rte_pktmbuf_free(pkt);
>> +	ev->mbuf = NULL;
>> +	return 0;
>> +}
>> +
>>  /*
>>   * Event mode exposes various operating modes depending on the
>>   * capabilities of the event device and the operating mode
>> @@ -68,7 +392,7 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
>>   */
>>
>>  /* Workers registered */
>> -#define IPSEC_EVENTMODE_WORKERS		1
>> +#define IPSEC_EVENTMODE_WORKERS		2
>>
>>  /*
>>   * Event mode worker
>> @@ -146,7 +470,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct
>> eh_event_link_info *links,
>>  			}
>>
>>  			/* Save security session */
>> -			pkt->udata64 = (uint64_t) sess_tbl[port_id];
>> +			pkt->userdata = sess_tbl[port_id];
>>
>>  			/* Mark the packet for Tx security offload */
>>  			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
>> @@ -165,6 +489,94 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct
>> eh_event_link_info *links,
>>  	}
>>  }
>>
>> +/*
>> + * Event mode worker
>> + * Operating parameters : non-burst - Tx internal port - app mode
>> + */
>> +static void
>> +ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
>> +		uint8_t nb_links)
>> +{
>> +	struct lcore_conf_ev_tx_int_port_wrkr lconf;
>> +	unsigned int nb_rx = 0;
>> +	struct rte_event ev;
>> +	uint32_t lcore_id;
>> +	int32_t socket_id;
>> +	int ret;
>> +
>> +	/* Check if we have links registered for this lcore */
>> +	if (nb_links == 0) {
>> +		/* No links registered - exit */
>> +		return;
>> +	}
>> +
>> +	/* We have valid links */
>> +
>> +	/* Get core ID */
>> +	lcore_id = rte_lcore_id();
>> +
>> +	/* Get socket ID */
>> +	socket_id = rte_lcore_to_socket_id(lcore_id);
>> +
>> +	/* Save routing table */
>> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
>> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
>> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
>> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
>> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
>> +	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
> 
> Session_priv_pool should also be added for both inbound and outbound
> 

[Lukasz] I will add it in V5.

>> +	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
>> +	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
>> +	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
>> +	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
>> +
>> +	RTE_LOG(INFO, IPSEC,
>> +		"Launching event mode worker (non-burst - Tx internal port - "
>> +		"app mode) on lcore %d\n", lcore_id);
>> +
>> +	/* Check if it's single link */
>> +	if (nb_links != 1) {
>> +		RTE_LOG(INFO, IPSEC,
>> +			"Multiple links not supported. Using first link\n");
>> +	}
>> +
>> +	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
>> +		links[0].event_port_id);
>> +
>> +	while (!force_quit) {
>> +		/* Read packet from event queues */
>> +		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
>> +				links[0].event_port_id,
>> +				&ev,     /* events */
>> +				1,       /* nb_events */
>> +				0        /* timeout_ticks */);
>> +
>> +		if (nb_rx == 0)
>> +			continue;
>> +
> 
> Event type should be checked here before dereferencing it.
> 

[Lukasz] I will add event type check in V5.

>> +		if (is_unprotected_port(ev.mbuf->port))
>> +			ret = process_ipsec_ev_inbound(&lconf.inbound,
>> +							&lconf.rt, &ev);
>> +		else
>> +			ret = process_ipsec_ev_outbound(&lconf.outbound,
>> +							&lconf.rt, &ev);
>> +		if (ret != 1)
>> +			/* The pkt has been dropped */
>> +			continue;
>> +
>> +		/*
>> +		 * Since tx internal port is available, events can be
>> +		 * directly enqueued to the adapter and it would be
>> +		 * internally submitted to the eth device.
>> +		 */
>> +		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
>> +				links[0].event_port_id,
>> +				&ev,	/* events */
>> +				1,	/* nb_events */
>> +				0	/* flags */);
>> +	}
>> +}
>> +
>>  static uint8_t
>>  ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params
>> *wrkrs)
>>  {
>> @@ -180,6 +592,14 @@ ipsec_eventmode_populate_wrkr_params(struct
>> eh_app_worker_params *wrkrs)
>>  	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
>>  	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
>>  	wrkr++;
>> +	nb_wrkr_param++;
>> +
>> +	/* Non-burst - Tx internal port - app mode */
>> +	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
>> +	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
>> +	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
>> +	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
>> +	nb_wrkr_param++;
>>
>>  	return nb_wrkr_param;
>>  }
>> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-
>> secgw/ipsec_worker.h
>> new file mode 100644
>> index 0000000..87b4f22
>> --- /dev/null
>> +++ b/examples/ipsec-secgw/ipsec_worker.h
>> @@ -0,0 +1,35 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright (C) 2020 Marvell International Ltd.
>> + */
>> +#ifndef _IPSEC_WORKER_H_
>> +#define _IPSEC_WORKER_H_
>> +
>> +#include "ipsec.h"
>> +
>> +enum pkt_type {
>> +	PKT_TYPE_PLAIN_IPV4 = 1,
>> +	PKT_TYPE_IPSEC_IPV4,
>> +	PKT_TYPE_PLAIN_IPV6,
>> +	PKT_TYPE_IPSEC_IPV6,
>> +	PKT_TYPE_INVALID
>> +};
>> +
>> +struct route_table {
>> +	struct rt_ctx *rt4_ctx;
>> +	struct rt_ctx *rt6_ctx;
>> +};
>> +
>> +/*
>> + * Conf required by event mode worker with tx internal port
>> + */
>> +struct lcore_conf_ev_tx_int_port_wrkr {
>> +	struct ipsec_ctx inbound;
>> +	struct ipsec_ctx outbound;
>> +	struct route_table rt;
>> +} __rte_cache_aligned;
>> +
>> +void ipsec_poll_mode_worker(void);
>> +
>> +int ipsec_launch_one_lcore(void *args);
>> +
>> +#endif /* _IPSEC_WORKER_H_ */
>> --
>> 2.7.4
>
  
Anoob Joseph Feb. 25, 2020, 12:13 p.m. UTC | #3
Hi Akhil, Konstantin,

One question below.

Thanks,
Anoob

> -----Original Message-----
> From: Lukas Bartosik <lbartosik@marvell.com>
> Sent: Tuesday, February 25, 2020 5:21 PM
> To: Akhil Goyal <akhil.goyal@nxp.com>; Anoob Joseph <anoobj@marvell.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Narayana Prasad Raju
> Athreya <pathreya@marvell.com>; Ankur Dwivedi <adwivedi@marvell.com>;
> Archana Muniganti <marchana@marvell.com>; Tejasree Kondoj
> <ktejasree@marvell.com>; Vamsi Krishna Attunuru <vattunuru@marvell.com>;
> Konstantin Ananyev <konstantin.ananyev@intel.com>; dev@dpdk.org; Thomas
> Monjalon <thomas@monjalon.net>; Radu Nicolau <radu.nicolau@intel.com>
> Subject: Re: [EXT] RE: [PATCH v4 12/15] examples/ipsec-secgw: add app mode
> worker
> 
> Hi Akhil,
> 
> Please see my answers below.
> 
> Thanks,
> Lukasz
> 
> On 24.02.2020 15:13, Akhil Goyal wrote:
> > External Email
> >
> > ----------------------------------------------------------------------
> > Hi Lukasz/Anoob,
> >
> >>
> >> Add application inbound/outbound worker thread and IPsec application
> >> processing code for event mode.
> >>
> >> Example ipsec-secgw command in app mode:
> >> ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128 -w
> >> 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
> >> --log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
> >> -f aes-gcm.cfg --transfer-mode event --event-schedule-type parallel
> >>
> >> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> >> Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
> >> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
> >> ---
> >
> > ...
> >
> >> +static inline enum pkt_type
> >> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) {
> >> +	struct rte_ether_hdr *eth;
> >> +
> >> +	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> >> +	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> >> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> >> +				offsetof(struct ip, ip_p));
> >> +		if (**nlp == IPPROTO_ESP)
> >> +			return PKT_TYPE_IPSEC_IPV4;
> >> +		else
> >> +			return PKT_TYPE_PLAIN_IPV4;
> >> +	} else if (eth->ether_type ==
> >> +rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
> >> {
> >> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> >> +				offsetof(struct ip6_hdr, ip6_nxt));
> >> +		if (**nlp == IPPROTO_ESP)
> >> +			return PKT_TYPE_IPSEC_IPV6;
> >> +		else
> >> +			return PKT_TYPE_PLAIN_IPV6;
> >> +	}
> >> +
> >> +	/* Unknown/Unsupported type */
> >> +	return PKT_TYPE_INVALID;
> >> +}
> >> +
> >> +static inline void
> >> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) {
> >> +	struct rte_ether_hdr *ethhdr;
> >> +
> >> +	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> >> +	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src,
> >> RTE_ETHER_ADDR_LEN);
> >> +	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst,
> >> RTE_ETHER_ADDR_LEN);
> >> +}
> >>
> >>  static inline void
> >>  ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) @@
> >> -61,6 +101,290 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> >>  	}
> >>  }
> >>
> >> +static inline int
> >> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) {
> >> +	uint32_t res;
> >> +
> >> +	if (unlikely(sp == NULL))
> >> +		return 0;
> >> +
> >> +	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
> >> +			DEFAULT_MAX_CATEGORIES);
> >> +
> >> +	if (unlikely(res == 0)) {
> >> +		/* No match */
> >> +		return 0;
> >> +	}
> >> +
> >> +	if (res == DISCARD)
> >> +		return 0;
> >> +	else if (res == BYPASS) {
> >> +		*sa_idx = -1;
> >> +		return 1;
> >> +	}
> >> +
> >> +	*sa_idx = res - 1;
> >> +	return 1;
> >> +}
> >> +
> >> +static inline uint16_t
> >> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> >> +	uint32_t dst_ip;
> >> +	uint16_t offset;
> >> +	uint32_t hop;
> >> +	int ret;
> >> +
> >> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
> >> +	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
> >> +	dst_ip = rte_be_to_cpu_32(dst_ip);
> >> +
> >> +	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
> >> +
> >> +	if (ret == 0) {
> >> +		/* We have a hit */
> >> +		return hop;
> >> +	}
> >> +
> >> +	/* else */
> >> +	return RTE_MAX_ETHPORTS;
> >> +}
> >> +
> >> +/* TODO: To be tested */
> >> +static inline uint16_t
> >> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> >> +	uint8_t dst_ip[16];
> >> +	uint8_t *ip6_dst;
> >> +	uint16_t offset;
> >> +	uint32_t hop;
> >> +	int ret;
> >> +
> >> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
> >> +	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
> >> +	memcpy(&dst_ip[0], ip6_dst, 16);
> >> +
> >> +	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
> >> +
> >> +	if (ret == 0) {
> >> +		/* We have a hit */
> >> +		return hop;
> >> +	}
> >> +
> >> +	/* else */
> >> +	return RTE_MAX_ETHPORTS;
> >> +}
> >> +
> >> +static inline uint16_t
> >> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum
> >> +pkt_type type) {
> >> +	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
> >> +		return route4_pkt(pkt, rt->rt4_ctx);
> >> +	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
> >> +		return route6_pkt(pkt, rt->rt6_ctx);
> >> +
> >> +	return RTE_MAX_ETHPORTS;
> >> +}
> >
> > Is it not possible to use the existing functions for finding routes, checking
> packet types and checking security policies.
> > It will be very difficult to manage two separate functions for same
> > work. I can see that the pkt->data_offs Are not required to be updated
> > in the inline case, but can we split the existing functions in two so that they can
> be Called in the appropriate cases.
> >
> > As you have said in the cover note as well to add lookaside protocol
> > support. I also tried adding it, and it will get very Difficult to manage separate
> functions for separate code paths.
> >
> 
> [Lukasz] This was also Konstantin's comment during review of one of previous
> revisions.
> The prepare_one_packet() and prepare_tx_pkt() do much more than we need
> and for performance reasons we crafted new functions. For example,
> process_ipsec_get_pkt_type function returns nlp and whether packet type is
> plain or IPsec. That's all. Prepare_one_packet() process packets in chunks and
> does much more - it adjusts mbuf and packet length then it demultiplex packets
> into plain and IPsec flows and finally does inline checks. This is similar for
> update_mac_addrs() vs prepare_tx_pkt() and check_sp() vs inbound_sp_sa() that
> prepare_tx_pkt() and inbound_sp_sa() do more that we need in event mode.
> 
> I understand your concern from the perspective of code maintenance but on the
> other hand we are concerned with performance.
> The current code is not optimized to support multiple mode processing
> introduced with rte_security. We can work on a common routines once we have
> other modes also added, so that we can come up with a better solution than
> what we have today.
> 
> >> +
> >> +static inline int
> >> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> >> +		struct rte_event *ev)
> >> +{
> >> +	struct ipsec_sa *sa = NULL;
> >> +	struct rte_mbuf *pkt;
> >> +	uint16_t port_id = 0;
> >> +	enum pkt_type type;
> >> +	uint32_t sa_idx;
> >> +	uint8_t *nlp;
> >> +
> >> +	/* Get pkt from event */
> >> +	pkt = ev->mbuf;
> >> +
> >> +	/* Check the packet type */
> >> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> >> +
> >> +	switch (type) {
> >> +	case PKT_TYPE_PLAIN_IPV4:
> >> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> >> +			if (unlikely(pkt->ol_flags &
> >> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> >> +				RTE_LOG(ERR, IPSEC,
> >> +					"Inbound security offload failed\n");
> >> +				goto drop_pkt_and_exit;
> >> +			}
> >> +			sa = pkt->userdata;
> >> +		}
> >> +
> >> +		/* Check if we have a match */
> >> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> >> +			/* No valid match */
> >> +			goto drop_pkt_and_exit;
> >> +		}
> >> +		break;
> >> +
> >> +	case PKT_TYPE_PLAIN_IPV6:
> >> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> >> +			if (unlikely(pkt->ol_flags &
> >> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> >> +				RTE_LOG(ERR, IPSEC,
> >> +					"Inbound security offload failed\n");
> >> +				goto drop_pkt_and_exit;
> >> +			}
> >> +			sa = pkt->userdata;
> >> +		}
> >> +
> >> +		/* Check if we have a match */
> >> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> >> +			/* No valid match */
> >> +			goto drop_pkt_and_exit;
> >> +		}
> >> +		break;
> >> +
> >> +	default:
> >> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
> >> +		goto drop_pkt_and_exit;
> >> +	}
> >> +
> >> +	/* Check if the packet has to be bypassed */
> >> +	if (sa_idx == BYPASS)
> >> +		goto route_and_send_pkt;
> >> +
> >> +	/* Validate sa_idx */
> >> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> >> +		goto drop_pkt_and_exit;
> >> +
> >> +	/* Else the packet has to be protected with SA */
> >> +
> >> +	/* If the packet was IPsec processed, then SA pointer should be set */
> >> +	if (sa == NULL)
> >> +		goto drop_pkt_and_exit;
> >> +
> >> +	/* SPI on the packet should match with the one in SA */
> >> +	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
> >> +		goto drop_pkt_and_exit;
> >> +
> >> +route_and_send_pkt:
> >> +	port_id = get_route(pkt, rt, type);
> >> +	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> >> +		/* no match */
> >> +		goto drop_pkt_and_exit;
> >> +	}
> >> +	/* else, we have a matching route */
> >> +
> >> +	/* Update mac addresses */
> >> +	update_mac_addrs(pkt, port_id);
> >> +
> >> +	/* Update the event with the dest port */
> >> +	ipsec_event_pre_forward(pkt, port_id);
> >> +	return 1;
> >> +
> >> +drop_pkt_and_exit:
> >> +	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
> >> +	rte_pktmbuf_free(pkt);
> >> +	ev->mbuf = NULL;
> >> +	return 0;
> >> +}
> >> +
> >> +static inline int
> >> +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
> >> +		struct rte_event *ev)
> >> +{
> >> +	struct rte_ipsec_session *sess;
> >> +	struct sa_ctx *sa_ctx;
> >> +	struct rte_mbuf *pkt;
> >> +	uint16_t port_id = 0;
> >> +	struct ipsec_sa *sa;
> >> +	enum pkt_type type;
> >> +	uint32_t sa_idx;
> >> +	uint8_t *nlp;
> >> +
> >> +	/* Get pkt from event */
> >> +	pkt = ev->mbuf;
> >> +
> >> +	/* Check the packet type */
> >> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> >> +
> >> +	switch (type) {
> >> +	case PKT_TYPE_PLAIN_IPV4:
> >> +		/* Check if we have a match */
> >> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> >> +			/* No valid match */
> >> +			goto drop_pkt_and_exit;
> >> +		}
> >> +		break;
> >> +	case PKT_TYPE_PLAIN_IPV6:
> >> +		/* Check if we have a match */
> >> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> >> +			/* No valid match */
> >> +			goto drop_pkt_and_exit;
> >> +		}
> >> +		break;
> >> +	default:
> >> +		/*
> >> +		 * Only plain IPv4 & IPv6 packets are allowed
> >> +		 * on protected port. Drop the rest.
> >> +		 */
> >> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
> >> +		goto drop_pkt_and_exit;
> >> +	}
> >> +
> >> +	/* Check if the packet has to be bypassed */
> >> +	if (sa_idx == BYPASS) {
> >> +		port_id = get_route(pkt, rt, type);
> >> +		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> >> +			/* no match */
> >> +			goto drop_pkt_and_exit;
> >> +		}
> >> +		/* else, we have a matching route */
> >> +		goto send_pkt;
> >> +	}
> >> +
> >> +	/* Validate sa_idx */
> >> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> >> +		goto drop_pkt_and_exit;
> >> +
> >> +	/* Else the packet has to be protected */
> >> +
> >> +	/* Get SA ctx*/
> >> +	sa_ctx = ctx->sa_ctx;
> >> +
> >> +	/* Get SA */
> >> +	sa = &(sa_ctx->sa[sa_idx]);
> >> +
> >> +	/* Get IPsec session */
> >> +	sess = ipsec_get_primary_session(sa);
> >> +
> >> +	/* Allow only inline protocol for now */
> >> +	if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
> >> +		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
> >> +		goto drop_pkt_and_exit;
> >> +	}
> >> +
> >> +	if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
> >> +		pkt->userdata = sess->security.ses;
> >> +
> >> +	/* Mark the packet for Tx security offload */
> >> +	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
> >> +
> >> +	/* Get the port to which this pkt need to be submitted */
> >> +	port_id = sa->portid;
> >> +
> >> +send_pkt:
> >> +	/* Update mac addresses */
> >> +	update_mac_addrs(pkt, port_id);
> >> +
> >> +	/* Update the event with the dest port */
> >> +	ipsec_event_pre_forward(pkt, port_id);
> >
> > How is IP checksum getting updated for the processed packet.
> > If the hardware is not updating it, should we add a fallback mechanism
> > for SW based Checksum update.
> >
> 
> [Lukasz] In case of outbound inline protocol checksum has to be calculated by
> HW as final packet is formed by crypto device. There is no need to calculate it in
> SW.
> 
> >> +	return 1;
> >
> > It will be better to use some MACROS while returning Like
> > #define PKT_FORWARD   1
> > #define PKT_DROPPED     0
> > #define PKT_POSTED       2  /*may be for lookaside cases */
> >
> >> +
> >> +drop_pkt_and_exit:
> >> +	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
> >> +	rte_pktmbuf_free(pkt);
> >> +	ev->mbuf = NULL;
> >> +	return 0;
> >> +}
> >> +
> >>  /*
> >>   * Event mode exposes various operating modes depending on the
> >>   * capabilities of the event device and the operating mode @@ -68,7
> >> +392,7 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> >>   */
> >>
> >>  /* Workers registered */
> >> -#define IPSEC_EVENTMODE_WORKERS		1
> >> +#define IPSEC_EVENTMODE_WORKERS		2
> >>
> >>  /*
> >>   * Event mode worker
> >> @@ -146,7 +470,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct
> >> eh_event_link_info *links,
> >>  			}
> >>
> >>  			/* Save security session */
> >> -			pkt->udata64 = (uint64_t) sess_tbl[port_id];
> >> +			pkt->userdata = sess_tbl[port_id];
> >>
> >>  			/* Mark the packet for Tx security offload */
> >>  			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; @@ -165,6
> +489,94 @@
> >> ipsec_wrkr_non_burst_int_port_drv_mode(struct
> >> eh_event_link_info *links,
> >>  	}
> >>  }
> >>
> >> +/*
> >> + * Event mode worker
> >> + * Operating parameters : non-burst - Tx internal port - app mode
> >> +*/ static void ipsec_wrkr_non_burst_int_port_app_mode(struct
> >> +eh_event_link_info *links,
> >> +		uint8_t nb_links)
> >> +{
> >> +	struct lcore_conf_ev_tx_int_port_wrkr lconf;
> >> +	unsigned int nb_rx = 0;
> >> +	struct rte_event ev;
> >> +	uint32_t lcore_id;
> >> +	int32_t socket_id;
> >> +	int ret;
> >> +
> >> +	/* Check if we have links registered for this lcore */
> >> +	if (nb_links == 0) {
> >> +		/* No links registered - exit */
> >> +		return;
> >> +	}
> >> +
> >> +	/* We have valid links */
> >> +
> >> +	/* Get core ID */
> >> +	lcore_id = rte_lcore_id();
> >> +
> >> +	/* Get socket ID */
> >> +	socket_id = rte_lcore_to_socket_id(lcore_id);
> >> +
> >> +	/* Save routing table */
> >> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> >> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> >> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> >> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> >> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> >> +	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
> >
> > Session_priv_pool should also be added for both inbound and outbound
> >
> 
> [Lukasz] I will add it in V5.

[Anoob] Actually, why do need both session_pool and private_pool? I think it's a remnant from the time we had session being created when the first packet arrives. 

@Konstantin, thoughts?
 
> 
> >> +	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
> >> +	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
> >> +	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
> >> +	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
> >> +
> >> +	RTE_LOG(INFO, IPSEC,
> >> +		"Launching event mode worker (non-burst - Tx internal port - "
> >> +		"app mode) on lcore %d\n", lcore_id);
> >> +
> >> +	/* Check if it's single link */
> >> +	if (nb_links != 1) {
> >> +		RTE_LOG(INFO, IPSEC,
> >> +			"Multiple links not supported. Using first link\n");
> >> +	}
> >> +
> >> +	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
> >> +		links[0].event_port_id);
> >> +
> >> +	while (!force_quit) {
> >> +		/* Read packet from event queues */
> >> +		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
> >> +				links[0].event_port_id,
> >> +				&ev,     /* events */
> >> +				1,       /* nb_events */
> >> +				0        /* timeout_ticks */);
> >> +
> >> +		if (nb_rx == 0)
> >> +			continue;
> >> +
> >
> > Event type should be checked here before dereferencing it.
> >
> 
> [Lukasz] I will add event type check in V5.
> 
> >> +		if (is_unprotected_port(ev.mbuf->port))
> >> +			ret = process_ipsec_ev_inbound(&lconf.inbound,
> >> +							&lconf.rt, &ev);
> >> +		else
> >> +			ret = process_ipsec_ev_outbound(&lconf.outbound,
> >> +							&lconf.rt, &ev);
> >> +		if (ret != 1)
> >> +			/* The pkt has been dropped */
> >> +			continue;
> >> +
> >> +		/*
> >> +		 * Since tx internal port is available, events can be
> >> +		 * directly enqueued to the adapter and it would be
> >> +		 * internally submitted to the eth device.
> >> +		 */
> >> +		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> >> +				links[0].event_port_id,
> >> +				&ev,	/* events */
> >> +				1,	/* nb_events */
> >> +				0	/* flags */);
> >> +	}
> >> +}
> >> +
> >>  static uint8_t
> >>  ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params
> >> *wrkrs)
> >>  {
> >> @@ -180,6 +592,14 @@ ipsec_eventmode_populate_wrkr_params(struct
> >> eh_app_worker_params *wrkrs)
> >>  	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
> >>  	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
> >>  	wrkr++;
> >> +	nb_wrkr_param++;
> >> +
> >> +	/* Non-burst - Tx internal port - app mode */
> >> +	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
> >> +	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
> >> +	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
> >> +	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
> >> +	nb_wrkr_param++;
> >>
> >>  	return nb_wrkr_param;
> >>  }
> >> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-
> >> secgw/ipsec_worker.h new file mode 100644 index 0000000..87b4f22
> >> --- /dev/null
> >> +++ b/examples/ipsec-secgw/ipsec_worker.h
> >> @@ -0,0 +1,35 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright (C) 2020 Marvell International Ltd.
> >> + */
> >> +#ifndef _IPSEC_WORKER_H_
> >> +#define _IPSEC_WORKER_H_
> >> +
> >> +#include "ipsec.h"
> >> +
> >> +enum pkt_type {
> >> +	PKT_TYPE_PLAIN_IPV4 = 1,
> >> +	PKT_TYPE_IPSEC_IPV4,
> >> +	PKT_TYPE_PLAIN_IPV6,
> >> +	PKT_TYPE_IPSEC_IPV6,
> >> +	PKT_TYPE_INVALID
> >> +};
> >> +
> >> +struct route_table {
> >> +	struct rt_ctx *rt4_ctx;
> >> +	struct rt_ctx *rt6_ctx;
> >> +};
> >> +
> >> +/*
> >> + * Conf required by event mode worker with tx internal port  */
> >> +struct lcore_conf_ev_tx_int_port_wrkr {
> >> +	struct ipsec_ctx inbound;
> >> +	struct ipsec_ctx outbound;
> >> +	struct route_table rt;
> >> +} __rte_cache_aligned;
> >> +
> >> +void ipsec_poll_mode_worker(void);
> >> +
> >> +int ipsec_launch_one_lcore(void *args);
> >> +
> >> +#endif /* _IPSEC_WORKER_H_ */
> >> --
> >> 2.7.4
> >
  
Ananyev, Konstantin Feb. 25, 2020, 4:03 p.m. UTC | #4
> > >> Add application inbound/outbound worker thread and IPsec application
> > >> processing code for event mode.
> > >>
> > >> Example ipsec-secgw command in app mode:
> > >> ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128 -w
> > >> 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
> > >> --log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
> > >> -f aes-gcm.cfg --transfer-mode event --event-schedule-type parallel
> > >>
> > >> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > >> Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
> > >> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
> > >> ---
> > >
> > > ...
> > >
> > >> +static inline enum pkt_type
> > >> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) {
> > >> +	struct rte_ether_hdr *eth;
> > >> +
> > >> +	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> > >> +	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> > >> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> > >> +				offsetof(struct ip, ip_p));
> > >> +		if (**nlp == IPPROTO_ESP)
> > >> +			return PKT_TYPE_IPSEC_IPV4;
> > >> +		else
> > >> +			return PKT_TYPE_PLAIN_IPV4;
> > >> +	} else if (eth->ether_type ==
> > >> +rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
> > >> {
> > >> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> > >> +				offsetof(struct ip6_hdr, ip6_nxt));
> > >> +		if (**nlp == IPPROTO_ESP)
> > >> +			return PKT_TYPE_IPSEC_IPV6;
> > >> +		else
> > >> +			return PKT_TYPE_PLAIN_IPV6;
> > >> +	}
> > >> +
> > >> +	/* Unknown/Unsupported type */
> > >> +	return PKT_TYPE_INVALID;
> > >> +}
> > >> +
> > >> +static inline void
> > >> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) {
> > >> +	struct rte_ether_hdr *ethhdr;
> > >> +
> > >> +	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> > >> +	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src,
> > >> RTE_ETHER_ADDR_LEN);
> > >> +	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst,
> > >> RTE_ETHER_ADDR_LEN);
> > >> +}
> > >>
> > >>  static inline void
> > >>  ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) @@
> > >> -61,6 +101,290 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> > >>  	}
> > >>  }
> > >>
> > >> +static inline int
> > >> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) {
> > >> +	uint32_t res;
> > >> +
> > >> +	if (unlikely(sp == NULL))
> > >> +		return 0;
> > >> +
> > >> +	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
> > >> +			DEFAULT_MAX_CATEGORIES);
> > >> +
> > >> +	if (unlikely(res == 0)) {
> > >> +		/* No match */
> > >> +		return 0;
> > >> +	}
> > >> +
> > >> +	if (res == DISCARD)
> > >> +		return 0;
> > >> +	else if (res == BYPASS) {
> > >> +		*sa_idx = -1;
> > >> +		return 1;
> > >> +	}
> > >> +
> > >> +	*sa_idx = res - 1;
> > >> +	return 1;
> > >> +}
> > >> +
> > >> +static inline uint16_t
> > >> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> > >> +	uint32_t dst_ip;
> > >> +	uint16_t offset;
> > >> +	uint32_t hop;
> > >> +	int ret;
> > >> +
> > >> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
> > >> +	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
> > >> +	dst_ip = rte_be_to_cpu_32(dst_ip);
> > >> +
> > >> +	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
> > >> +
> > >> +	if (ret == 0) {
> > >> +		/* We have a hit */
> > >> +		return hop;
> > >> +	}
> > >> +
> > >> +	/* else */
> > >> +	return RTE_MAX_ETHPORTS;
> > >> +}
> > >> +
> > >> +/* TODO: To be tested */
> > >> +static inline uint16_t
> > >> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> > >> +	uint8_t dst_ip[16];
> > >> +	uint8_t *ip6_dst;
> > >> +	uint16_t offset;
> > >> +	uint32_t hop;
> > >> +	int ret;
> > >> +
> > >> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
> > >> +	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
> > >> +	memcpy(&dst_ip[0], ip6_dst, 16);
> > >> +
> > >> +	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
> > >> +
> > >> +	if (ret == 0) {
> > >> +		/* We have a hit */
> > >> +		return hop;
> > >> +	}
> > >> +
> > >> +	/* else */
> > >> +	return RTE_MAX_ETHPORTS;
> > >> +}
> > >> +
> > >> +static inline uint16_t
> > >> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum
> > >> +pkt_type type) {
> > >> +	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
> > >> +		return route4_pkt(pkt, rt->rt4_ctx);
> > >> +	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
> > >> +		return route6_pkt(pkt, rt->rt6_ctx);
> > >> +
> > >> +	return RTE_MAX_ETHPORTS;
> > >> +}
> > >
> > > Is it not possible to use the existing functions for finding routes, checking
> > packet types and checking security policies.
> > > It will be very difficult to manage two separate functions for same
> > > work. I can see that the pkt->data_offs Are not required to be updated
> > > in the inline case, but can we split the existing functions in two so that they can
> > be Called in the appropriate cases.
> > >
> > > As you have said in the cover note as well to add lookaside protocol
> > > support. I also tried adding it, and it will get very Difficult to manage separate
> > functions for separate code paths.
> > >
> >
> > [Lukasz] This was also Konstantin's comment during review of one of previous
> > revisions.
> > The prepare_one_packet() and prepare_tx_pkt() do much more than we need
> > and for performance reasons we crafted new functions. For example,
> > process_ipsec_get_pkt_type function returns nlp and whether packet type is
> > plain or IPsec. That's all. Prepare_one_packet() process packets in chunks and
> > does much more - it adjusts mbuf and packet length then it demultiplex packets
> > into plain and IPsec flows and finally does inline checks. This is similar for
> > update_mac_addrs() vs prepare_tx_pkt() and check_sp() vs inbound_sp_sa() that
> > prepare_tx_pkt() and inbound_sp_sa() do more that we need in event mode.
> >
> > I understand your concern from the perspective of code maintenance but on the
> > other hand we are concerned with performance.
> > The current code is not optimized to support multiple mode processing
> > introduced with rte_security. We can work on a common routines once we have
> > other modes also added, so that we can come up with a better solution than
> > what we have today.
> >
> > >> +
> > >> +static inline int
> > >> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> > >> +		struct rte_event *ev)
> > >> +{
> > >> +	struct ipsec_sa *sa = NULL;
> > >> +	struct rte_mbuf *pkt;
> > >> +	uint16_t port_id = 0;
> > >> +	enum pkt_type type;
> > >> +	uint32_t sa_idx;
> > >> +	uint8_t *nlp;
> > >> +
> > >> +	/* Get pkt from event */
> > >> +	pkt = ev->mbuf;
> > >> +
> > >> +	/* Check the packet type */
> > >> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> > >> +
> > >> +	switch (type) {
> > >> +	case PKT_TYPE_PLAIN_IPV4:
> > >> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> > >> +			if (unlikely(pkt->ol_flags &
> > >> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> > >> +				RTE_LOG(ERR, IPSEC,
> > >> +					"Inbound security offload failed\n");
> > >> +				goto drop_pkt_and_exit;
> > >> +			}
> > >> +			sa = pkt->userdata;
> > >> +		}
> > >> +
> > >> +		/* Check if we have a match */
> > >> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> > >> +			/* No valid match */
> > >> +			goto drop_pkt_and_exit;
> > >> +		}
> > >> +		break;
> > >> +
> > >> +	case PKT_TYPE_PLAIN_IPV6:
> > >> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> > >> +			if (unlikely(pkt->ol_flags &
> > >> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> > >> +				RTE_LOG(ERR, IPSEC,
> > >> +					"Inbound security offload failed\n");
> > >> +				goto drop_pkt_and_exit;
> > >> +			}
> > >> +			sa = pkt->userdata;
> > >> +		}
> > >> +
> > >> +		/* Check if we have a match */
> > >> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> > >> +			/* No valid match */
> > >> +			goto drop_pkt_and_exit;
> > >> +		}
> > >> +		break;
> > >> +
> > >> +	default:
> > >> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
> > >> +		goto drop_pkt_and_exit;
> > >> +	}
> > >> +
> > >> +	/* Check if the packet has to be bypassed */
> > >> +	if (sa_idx == BYPASS)
> > >> +		goto route_and_send_pkt;
> > >> +
> > >> +	/* Validate sa_idx */
> > >> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> > >> +		goto drop_pkt_and_exit;
> > >> +
> > >> +	/* Else the packet has to be protected with SA */
> > >> +
> > >> +	/* If the packet was IPsec processed, then SA pointer should be set */
> > >> +	if (sa == NULL)
> > >> +		goto drop_pkt_and_exit;
> > >> +
> > >> +	/* SPI on the packet should match with the one in SA */
> > >> +	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
> > >> +		goto drop_pkt_and_exit;
> > >> +
> > >> +route_and_send_pkt:
> > >> +	port_id = get_route(pkt, rt, type);
> > >> +	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> > >> +		/* no match */
> > >> +		goto drop_pkt_and_exit;
> > >> +	}
> > >> +	/* else, we have a matching route */
> > >> +
> > >> +	/* Update mac addresses */
> > >> +	update_mac_addrs(pkt, port_id);
> > >> +
> > >> +	/* Update the event with the dest port */
> > >> +	ipsec_event_pre_forward(pkt, port_id);
> > >> +	return 1;
> > >> +
> > >> +drop_pkt_and_exit:
> > >> +	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
> > >> +	rte_pktmbuf_free(pkt);
> > >> +	ev->mbuf = NULL;
> > >> +	return 0;
> > >> +}
> > >> +
> > >> +static inline int
> > >> +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
> > >> +		struct rte_event *ev)
> > >> +{
> > >> +	struct rte_ipsec_session *sess;
> > >> +	struct sa_ctx *sa_ctx;
> > >> +	struct rte_mbuf *pkt;
> > >> +	uint16_t port_id = 0;
> > >> +	struct ipsec_sa *sa;
> > >> +	enum pkt_type type;
> > >> +	uint32_t sa_idx;
> > >> +	uint8_t *nlp;
> > >> +
> > >> +	/* Get pkt from event */
> > >> +	pkt = ev->mbuf;
> > >> +
> > >> +	/* Check the packet type */
> > >> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> > >> +
> > >> +	switch (type) {
> > >> +	case PKT_TYPE_PLAIN_IPV4:
> > >> +		/* Check if we have a match */
> > >> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> > >> +			/* No valid match */
> > >> +			goto drop_pkt_and_exit;
> > >> +		}
> > >> +		break;
> > >> +	case PKT_TYPE_PLAIN_IPV6:
> > >> +		/* Check if we have a match */
> > >> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> > >> +			/* No valid match */
> > >> +			goto drop_pkt_and_exit;
> > >> +		}
> > >> +		break;
> > >> +	default:
> > >> +		/*
> > >> +		 * Only plain IPv4 & IPv6 packets are allowed
> > >> +		 * on protected port. Drop the rest.
> > >> +		 */
> > >> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
> > >> +		goto drop_pkt_and_exit;
> > >> +	}
> > >> +
> > >> +	/* Check if the packet has to be bypassed */
> > >> +	if (sa_idx == BYPASS) {
> > >> +		port_id = get_route(pkt, rt, type);
> > >> +		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> > >> +			/* no match */
> > >> +			goto drop_pkt_and_exit;
> > >> +		}
> > >> +		/* else, we have a matching route */
> > >> +		goto send_pkt;
> > >> +	}
> > >> +
> > >> +	/* Validate sa_idx */
> > >> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> > >> +		goto drop_pkt_and_exit;
> > >> +
> > >> +	/* Else the packet has to be protected */
> > >> +
> > >> +	/* Get SA ctx*/
> > >> +	sa_ctx = ctx->sa_ctx;
> > >> +
> > >> +	/* Get SA */
> > >> +	sa = &(sa_ctx->sa[sa_idx]);
> > >> +
> > >> +	/* Get IPsec session */
> > >> +	sess = ipsec_get_primary_session(sa);
> > >> +
> > >> +	/* Allow only inline protocol for now */
> > >> +	if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
> > >> +		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
> > >> +		goto drop_pkt_and_exit;
> > >> +	}
> > >> +
> > >> +	if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
> > >> +		pkt->userdata = sess->security.ses;
> > >> +
> > >> +	/* Mark the packet for Tx security offload */
> > >> +	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
> > >> +
> > >> +	/* Get the port to which this pkt need to be submitted */
> > >> +	port_id = sa->portid;
> > >> +
> > >> +send_pkt:
> > >> +	/* Update mac addresses */
> > >> +	update_mac_addrs(pkt, port_id);
> > >> +
> > >> +	/* Update the event with the dest port */
> > >> +	ipsec_event_pre_forward(pkt, port_id);
> > >
> > > How is IP checksum getting updated for the processed packet.
> > > If the hardware is not updating it, should we add a fallback mechanism
> > > for SW based Checksum update.
> > >
> >
> > [Lukasz] In case of outbound inline protocol checksum has to be calculated by
> > HW as final packet is formed by crypto device. There is no need to calculate it in
> > SW.
> >
> > >> +	return 1;
> > >
> > > It will be better to use some MACROS while returning Like
> > > #define PKT_FORWARD   1
> > > #define PKT_DROPPED     0
> > > #define PKT_POSTED       2  /*may be for lookaside cases */
> > >
> > >> +
> > >> +drop_pkt_and_exit:
> > >> +	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
> > >> +	rte_pktmbuf_free(pkt);
> > >> +	ev->mbuf = NULL;
> > >> +	return 0;
> > >> +}
> > >> +
> > >>  /*
> > >>   * Event mode exposes various operating modes depending on the
> > >>   * capabilities of the event device and the operating mode @@ -68,7
> > >> +392,7 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> > >>   */
> > >>
> > >>  /* Workers registered */
> > >> -#define IPSEC_EVENTMODE_WORKERS		1
> > >> +#define IPSEC_EVENTMODE_WORKERS		2
> > >>
> > >>  /*
> > >>   * Event mode worker
> > >> @@ -146,7 +470,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct
> > >> eh_event_link_info *links,
> > >>  			}
> > >>
> > >>  			/* Save security session */
> > >> -			pkt->udata64 = (uint64_t) sess_tbl[port_id];
> > >> +			pkt->userdata = sess_tbl[port_id];
> > >>
> > >>  			/* Mark the packet for Tx security offload */
> > >>  			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; @@ -165,6
> > +489,94 @@
> > >> ipsec_wrkr_non_burst_int_port_drv_mode(struct
> > >> eh_event_link_info *links,
> > >>  	}
> > >>  }
> > >>
> > >> +/*
> > >> + * Event mode worker
> > >> + * Operating parameters : non-burst - Tx internal port - app mode
> > >> +*/ static void ipsec_wrkr_non_burst_int_port_app_mode(struct
> > >> +eh_event_link_info *links,
> > >> +		uint8_t nb_links)
> > >> +{
> > >> +	struct lcore_conf_ev_tx_int_port_wrkr lconf;
> > >> +	unsigned int nb_rx = 0;
> > >> +	struct rte_event ev;
> > >> +	uint32_t lcore_id;
> > >> +	int32_t socket_id;
> > >> +	int ret;
> > >> +
> > >> +	/* Check if we have links registered for this lcore */
> > >> +	if (nb_links == 0) {
> > >> +		/* No links registered - exit */
> > >> +		return;
> > >> +	}
> > >> +
> > >> +	/* We have valid links */
> > >> +
> > >> +	/* Get core ID */
> > >> +	lcore_id = rte_lcore_id();
> > >> +
> > >> +	/* Get socket ID */
> > >> +	socket_id = rte_lcore_to_socket_id(lcore_id);
> > >> +
> > >> +	/* Save routing table */
> > >> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> > >> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> > >> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> > >> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> > >> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> > >> +	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
> > >
> > > Session_priv_pool should also be added for both inbound and outbound
> > >
> >
> > [Lukasz] I will add it in V5.
> 
> [Anoob] Actually, why do need both session_pool and private_pool? I think it's a remnant from the time we had session being created when
> the first packet arrives.
> 
> @Konstantin, thoughts?

I think we do need it for lksd sessions.
See create_lookaside_session() in ipsec.c

> 
> >
> > >> +	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
> > >> +	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
> > >> +	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
> > >> +	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
> > >> +
> > >> +	RTE_LOG(INFO, IPSEC,
> > >> +		"Launching event mode worker (non-burst - Tx internal port - "
> > >> +		"app mode) on lcore %d\n", lcore_id);
> > >> +
> > >> +	/* Check if it's single link */
> > >> +	if (nb_links != 1) {
> > >> +		RTE_LOG(INFO, IPSEC,
> > >> +			"Multiple links not supported. Using first link\n");
> > >> +	}
> > >> +
> > >> +	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
> > >> +		links[0].event_port_id);
> > >> +
> > >> +	while (!force_quit) {
> > >> +		/* Read packet from event queues */
> > >> +		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
> > >> +				links[0].event_port_id,
> > >> +				&ev,     /* events */
> > >> +				1,       /* nb_events */
> > >> +				0        /* timeout_ticks */);
> > >> +
> > >> +		if (nb_rx == 0)
> > >> +			continue;
> > >> +
> > >
> > > Event type should be checked here before dereferencing it.
> > >
> >
> > [Lukasz] I will add event type check in V5.
> >
> > >> +		if (is_unprotected_port(ev.mbuf->port))
> > >> +			ret = process_ipsec_ev_inbound(&lconf.inbound,
> > >> +							&lconf.rt, &ev);
> > >> +		else
> > >> +			ret = process_ipsec_ev_outbound(&lconf.outbound,
> > >> +							&lconf.rt, &ev);
> > >> +		if (ret != 1)
> > >> +			/* The pkt has been dropped */
> > >> +			continue;
> > >> +
> > >> +		/*
> > >> +		 * Since tx internal port is available, events can be
> > >> +		 * directly enqueued to the adapter and it would be
> > >> +		 * internally submitted to the eth device.
> > >> +		 */
> > >> +		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> > >> +				links[0].event_port_id,
> > >> +				&ev,	/* events */
> > >> +				1,	/* nb_events */
> > >> +				0	/* flags */);
> > >> +	}
> > >> +}
> > >> +
> > >>  static uint8_t
> > >>  ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params
> > >> *wrkrs)
> > >>  {
> > >> @@ -180,6 +592,14 @@ ipsec_eventmode_populate_wrkr_params(struct
> > >> eh_app_worker_params *wrkrs)
> > >>  	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
> > >>  	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
> > >>  	wrkr++;
> > >> +	nb_wrkr_param++;
> > >> +
> > >> +	/* Non-burst - Tx internal port - app mode */
> > >> +	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
> > >> +	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
> > >> +	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
> > >> +	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
> > >> +	nb_wrkr_param++;
> > >>
> > >>  	return nb_wrkr_param;
> > >>  }
> > >> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-
> > >> secgw/ipsec_worker.h new file mode 100644 index 0000000..87b4f22
> > >> --- /dev/null
> > >> +++ b/examples/ipsec-secgw/ipsec_worker.h
> > >> @@ -0,0 +1,35 @@
> > >> +/* SPDX-License-Identifier: BSD-3-Clause
> > >> + * Copyright (C) 2020 Marvell International Ltd.
> > >> + */
> > >> +#ifndef _IPSEC_WORKER_H_
> > >> +#define _IPSEC_WORKER_H_
> > >> +
> > >> +#include "ipsec.h"
> > >> +
> > >> +enum pkt_type {
> > >> +	PKT_TYPE_PLAIN_IPV4 = 1,
> > >> +	PKT_TYPE_IPSEC_IPV4,
> > >> +	PKT_TYPE_PLAIN_IPV6,
> > >> +	PKT_TYPE_IPSEC_IPV6,
> > >> +	PKT_TYPE_INVALID
> > >> +};
> > >> +
> > >> +struct route_table {
> > >> +	struct rt_ctx *rt4_ctx;
> > >> +	struct rt_ctx *rt6_ctx;
> > >> +};
> > >> +
> > >> +/*
> > >> + * Conf required by event mode worker with tx internal port  */
> > >> +struct lcore_conf_ev_tx_int_port_wrkr {
> > >> +	struct ipsec_ctx inbound;
> > >> +	struct ipsec_ctx outbound;
> > >> +	struct route_table rt;
> > >> +} __rte_cache_aligned;
> > >> +
> > >> +void ipsec_poll_mode_worker(void);
> > >> +
> > >> +int ipsec_launch_one_lcore(void *args);
> > >> +
> > >> +#endif /* _IPSEC_WORKER_H_ */
> > >> --
> > >> 2.7.4
> > >
  
Anoob Joseph Feb. 26, 2020, 4:33 a.m. UTC | #5
Hi Konstantin,

Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Tuesday, February 25, 2020 9:34 PM
> To: Anoob Joseph <anoobj@marvell.com>; Akhil Goyal
> <akhil.goyal@nxp.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Narayana Prasad Raju
> Athreya <pathreya@marvell.com>; Ankur Dwivedi
> <adwivedi@marvell.com>; Archana Muniganti <marchana@marvell.com>;
> Tejasree Kondoj <ktejasree@marvell.com>; Vamsi Krishna Attunuru
> <vattunuru@marvell.com>; dev@dpdk.org; Thomas Monjalon
> <thomas@monjalon.net>; Nicolau, Radu <radu.nicolau@intel.com>; Lukas
> Bartosik <lbartosik@marvell.com>
> Subject: RE: [EXT] RE: [PATCH v4 12/15] examples/ipsec-secgw: add app
> mode worker
> 
> > > >> Add application inbound/outbound worker thread and IPsec
> > > >> application processing code for event mode.
> > > >>
> > > >> Example ipsec-secgw command in app mode:
> > > >> ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128 -w
> > > >> 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
> > > >> --log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
> > > >> -f aes-gcm.cfg --transfer-mode event --event-schedule-type
> > > >> parallel
> > > >>
> > > >> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > > >> Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
> > > >> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
> > > >> ---
> > > >
> > > > ...
> > > >
> > > >> +static inline enum pkt_type
> > > >> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) {
> > > >> +	struct rte_ether_hdr *eth;
> > > >> +
> > > >> +	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> > > >> +	if (eth->ether_type ==
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> > > >> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> > > >> +				offsetof(struct ip, ip_p));
> > > >> +		if (**nlp == IPPROTO_ESP)
> > > >> +			return PKT_TYPE_IPSEC_IPV4;
> > > >> +		else
> > > >> +			return PKT_TYPE_PLAIN_IPV4;
> > > >> +	} else if (eth->ether_type ==
> > > >> +rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
> > > >> {
> > > >> +		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> > > >> +				offsetof(struct ip6_hdr, ip6_nxt));
> > > >> +		if (**nlp == IPPROTO_ESP)
> > > >> +			return PKT_TYPE_IPSEC_IPV6;
> > > >> +		else
> > > >> +			return PKT_TYPE_PLAIN_IPV6;
> > > >> +	}
> > > >> +
> > > >> +	/* Unknown/Unsupported type */
> > > >> +	return PKT_TYPE_INVALID;
> > > >> +}
> > > >> +
> > > >> +static inline void
> > > >> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) {
> > > >> +	struct rte_ether_hdr *ethhdr;
> > > >> +
> > > >> +	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> > > >> +	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src,
> > > >> RTE_ETHER_ADDR_LEN);
> > > >> +	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst,
> > > >> RTE_ETHER_ADDR_LEN);
> > > >> +}
> > > >>
> > > >>  static inline void
> > > >>  ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int
> > > >> port_id) @@
> > > >> -61,6 +101,290 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> > > >>  	}
> > > >>  }
> > > >>
> > > >> +static inline int
> > > >> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) {
> > > >> +	uint32_t res;
> > > >> +
> > > >> +	if (unlikely(sp == NULL))
> > > >> +		return 0;
> > > >> +
> > > >> +	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
> > > >> +			DEFAULT_MAX_CATEGORIES);
> > > >> +
> > > >> +	if (unlikely(res == 0)) {
> > > >> +		/* No match */
> > > >> +		return 0;
> > > >> +	}
> > > >> +
> > > >> +	if (res == DISCARD)
> > > >> +		return 0;
> > > >> +	else if (res == BYPASS) {
> > > >> +		*sa_idx = -1;
> > > >> +		return 1;
> > > >> +	}
> > > >> +
> > > >> +	*sa_idx = res - 1;
> > > >> +	return 1;
> > > >> +}
> > > >> +
> > > >> +static inline uint16_t
> > > >> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> > > >> +	uint32_t dst_ip;
> > > >> +	uint16_t offset;
> > > >> +	uint32_t hop;
> > > >> +	int ret;
> > > >> +
> > > >> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
> > > >> +	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
> > > >> +	dst_ip = rte_be_to_cpu_32(dst_ip);
> > > >> +
> > > >> +	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip,
> &hop);
> > > >> +
> > > >> +	if (ret == 0) {
> > > >> +		/* We have a hit */
> > > >> +		return hop;
> > > >> +	}
> > > >> +
> > > >> +	/* else */
> > > >> +	return RTE_MAX_ETHPORTS;
> > > >> +}
> > > >> +
> > > >> +/* TODO: To be tested */
> > > >> +static inline uint16_t
> > > >> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> > > >> +	uint8_t dst_ip[16];
> > > >> +	uint8_t *ip6_dst;
> > > >> +	uint16_t offset;
> > > >> +	uint32_t hop;
> > > >> +	int ret;
> > > >> +
> > > >> +	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr,
> ip6_dst);
> > > >> +	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
> > > >> +	memcpy(&dst_ip[0], ip6_dst, 16);
> > > >> +
> > > >> +	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip,
> &hop);
> > > >> +
> > > >> +	if (ret == 0) {
> > > >> +		/* We have a hit */
> > > >> +		return hop;
> > > >> +	}
> > > >> +
> > > >> +	/* else */
> > > >> +	return RTE_MAX_ETHPORTS;
> > > >> +}
> > > >> +
> > > >> +static inline uint16_t
> > > >> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum
> > > >> +pkt_type type) {
> > > >> +	if (type == PKT_TYPE_PLAIN_IPV4 || type ==
> PKT_TYPE_IPSEC_IPV4)
> > > >> +		return route4_pkt(pkt, rt->rt4_ctx);
> > > >> +	else if (type == PKT_TYPE_PLAIN_IPV6 || type ==
> PKT_TYPE_IPSEC_IPV6)
> > > >> +		return route6_pkt(pkt, rt->rt6_ctx);
> > > >> +
> > > >> +	return RTE_MAX_ETHPORTS;
> > > >> +}
> > > >
> > > > Is it not possible to use the existing functions for finding
> > > > routes, checking
> > > packet types and checking security policies.
> > > > It will be very difficult to manage two separate functions for
> > > > same work. I can see that the pkt->data_offs Are not required to
> > > > be updated in the inline case, but can we split the existing
> > > > functions in two so that they can
> > > be Called in the appropriate cases.
> > > >
> > > > As you have said in the cover note as well to add lookaside
> > > > protocol support. I also tried adding it, and it will get very
> > > > Difficult to manage separate
> > > functions for separate code paths.
> > > >
> > >
> > > [Lukasz] This was also Konstantin's comment during review of one of
> > > previous revisions.
> > > The prepare_one_packet() and prepare_tx_pkt() do much more than we
> > > need and for performance reasons we crafted new functions. For
> > > example, process_ipsec_get_pkt_type function returns nlp and whether
> > > packet type is plain or IPsec. That's all. Prepare_one_packet()
> > > process packets in chunks and does much more - it adjusts mbuf and
> > > packet length then it demultiplex packets into plain and IPsec flows
> > > and finally does inline checks. This is similar for
> > > update_mac_addrs() vs prepare_tx_pkt() and check_sp() vs
> > > inbound_sp_sa() that
> > > prepare_tx_pkt() and inbound_sp_sa() do more that we need in event
> mode.
> > >
> > > I understand your concern from the perspective of code maintenance
> > > but on the other hand we are concerned with performance.
> > > The current code is not optimized to support multiple mode
> > > processing introduced with rte_security. We can work on a common
> > > routines once we have other modes also added, so that we can come up
> > > with a better solution than what we have today.
> > >
> > > >> +
> > > >> +static inline int
> > > >> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table
> *rt,
> > > >> +		struct rte_event *ev)
> > > >> +{
> > > >> +	struct ipsec_sa *sa = NULL;
> > > >> +	struct rte_mbuf *pkt;
> > > >> +	uint16_t port_id = 0;
> > > >> +	enum pkt_type type;
> > > >> +	uint32_t sa_idx;
> > > >> +	uint8_t *nlp;
> > > >> +
> > > >> +	/* Get pkt from event */
> > > >> +	pkt = ev->mbuf;
> > > >> +
> > > >> +	/* Check the packet type */
> > > >> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> > > >> +
> > > >> +	switch (type) {
> > > >> +	case PKT_TYPE_PLAIN_IPV4:
> > > >> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> > > >> +			if (unlikely(pkt->ol_flags &
> > > >> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> > > >> +				RTE_LOG(ERR, IPSEC,
> > > >> +					"Inbound security offload
> failed\n");
> > > >> +				goto drop_pkt_and_exit;
> > > >> +			}
> > > >> +			sa = pkt->userdata;
> > > >> +		}
> > > >> +
> > > >> +		/* Check if we have a match */
> > > >> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> > > >> +			/* No valid match */
> > > >> +			goto drop_pkt_and_exit;
> > > >> +		}
> > > >> +		break;
> > > >> +
> > > >> +	case PKT_TYPE_PLAIN_IPV6:
> > > >> +		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> > > >> +			if (unlikely(pkt->ol_flags &
> > > >> +				     PKT_RX_SEC_OFFLOAD_FAILED)) {
> > > >> +				RTE_LOG(ERR, IPSEC,
> > > >> +					"Inbound security offload
> failed\n");
> > > >> +				goto drop_pkt_and_exit;
> > > >> +			}
> > > >> +			sa = pkt->userdata;
> > > >> +		}
> > > >> +
> > > >> +		/* Check if we have a match */
> > > >> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> > > >> +			/* No valid match */
> > > >> +			goto drop_pkt_and_exit;
> > > >> +		}
> > > >> +		break;
> > > >> +
> > > >> +	default:
> > > >> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type =
> %d\n", type);
> > > >> +		goto drop_pkt_and_exit;
> > > >> +	}
> > > >> +
> > > >> +	/* Check if the packet has to be bypassed */
> > > >> +	if (sa_idx == BYPASS)
> > > >> +		goto route_and_send_pkt;
> > > >> +
> > > >> +	/* Validate sa_idx */
> > > >> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> > > >> +		goto drop_pkt_and_exit;
> > > >> +
> > > >> +	/* Else the packet has to be protected with SA */
> > > >> +
> > > >> +	/* If the packet was IPsec processed, then SA pointer should
> be set */
> > > >> +	if (sa == NULL)
> > > >> +		goto drop_pkt_and_exit;
> > > >> +
> > > >> +	/* SPI on the packet should match with the one in SA */
> > > >> +	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
> > > >> +		goto drop_pkt_and_exit;
> > > >> +
> > > >> +route_and_send_pkt:
> > > >> +	port_id = get_route(pkt, rt, type);
> > > >> +	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> > > >> +		/* no match */
> > > >> +		goto drop_pkt_and_exit;
> > > >> +	}
> > > >> +	/* else, we have a matching route */
> > > >> +
> > > >> +	/* Update mac addresses */
> > > >> +	update_mac_addrs(pkt, port_id);
> > > >> +
> > > >> +	/* Update the event with the dest port */
> > > >> +	ipsec_event_pre_forward(pkt, port_id);
> > > >> +	return 1;
> > > >> +
> > > >> +drop_pkt_and_exit:
> > > >> +	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
> > > >> +	rte_pktmbuf_free(pkt);
> > > >> +	ev->mbuf = NULL;
> > > >> +	return 0;
> > > >> +}
> > > >> +
> > > >> +static inline int
> > > >> +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct
> route_table *rt,
> > > >> +		struct rte_event *ev)
> > > >> +{
> > > >> +	struct rte_ipsec_session *sess;
> > > >> +	struct sa_ctx *sa_ctx;
> > > >> +	struct rte_mbuf *pkt;
> > > >> +	uint16_t port_id = 0;
> > > >> +	struct ipsec_sa *sa;
> > > >> +	enum pkt_type type;
> > > >> +	uint32_t sa_idx;
> > > >> +	uint8_t *nlp;
> > > >> +
> > > >> +	/* Get pkt from event */
> > > >> +	pkt = ev->mbuf;
> > > >> +
> > > >> +	/* Check the packet type */
> > > >> +	type = process_ipsec_get_pkt_type(pkt, &nlp);
> > > >> +
> > > >> +	switch (type) {
> > > >> +	case PKT_TYPE_PLAIN_IPV4:
> > > >> +		/* Check if we have a match */
> > > >> +		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> > > >> +			/* No valid match */
> > > >> +			goto drop_pkt_and_exit;
> > > >> +		}
> > > >> +		break;
> > > >> +	case PKT_TYPE_PLAIN_IPV6:
> > > >> +		/* Check if we have a match */
> > > >> +		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> > > >> +			/* No valid match */
> > > >> +			goto drop_pkt_and_exit;
> > > >> +		}
> > > >> +		break;
> > > >> +	default:
> > > >> +		/*
> > > >> +		 * Only plain IPv4 & IPv6 packets are allowed
> > > >> +		 * on protected port. Drop the rest.
> > > >> +		 */
> > > >> +		RTE_LOG(ERR, IPSEC, "Unsupported packet type =
> %d\n", type);
> > > >> +		goto drop_pkt_and_exit;
> > > >> +	}
> > > >> +
> > > >> +	/* Check if the packet has to be bypassed */
> > > >> +	if (sa_idx == BYPASS) {
> > > >> +		port_id = get_route(pkt, rt, type);
> > > >> +		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> > > >> +			/* no match */
> > > >> +			goto drop_pkt_and_exit;
> > > >> +		}
> > > >> +		/* else, we have a matching route */
> > > >> +		goto send_pkt;
> > > >> +	}
> > > >> +
> > > >> +	/* Validate sa_idx */
> > > >> +	if (sa_idx >= ctx->sa_ctx->nb_sa)
> > > >> +		goto drop_pkt_and_exit;
> > > >> +
> > > >> +	/* Else the packet has to be protected */
> > > >> +
> > > >> +	/* Get SA ctx*/
> > > >> +	sa_ctx = ctx->sa_ctx;
> > > >> +
> > > >> +	/* Get SA */
> > > >> +	sa = &(sa_ctx->sa[sa_idx]);
> > > >> +
> > > >> +	/* Get IPsec session */
> > > >> +	sess = ipsec_get_primary_session(sa);
> > > >> +
> > > >> +	/* Allow only inline protocol for now */
> > > >> +	if (sess->type !=
> RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
> > > >> +		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
> > > >> +		goto drop_pkt_and_exit;
> > > >> +	}
> > > >> +
> > > >> +	if (sess->security.ol_flags &
> RTE_SECURITY_TX_OLOAD_NEED_MDATA)
> > > >> +		pkt->userdata = sess->security.ses;
> > > >> +
> > > >> +	/* Mark the packet for Tx security offload */
> > > >> +	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
> > > >> +
> > > >> +	/* Get the port to which this pkt need to be submitted */
> > > >> +	port_id = sa->portid;
> > > >> +
> > > >> +send_pkt:
> > > >> +	/* Update mac addresses */
> > > >> +	update_mac_addrs(pkt, port_id);
> > > >> +
> > > >> +	/* Update the event with the dest port */
> > > >> +	ipsec_event_pre_forward(pkt, port_id);
> > > >
> > > > How is IP checksum getting updated for the processed packet.
> > > > If the hardware is not updating it, should we add a fallback
> > > > mechanism for SW based Checksum update.
> > > >
> > >
> > > [Lukasz] In case of outbound inline protocol checksum has to be
> > > calculated by HW as final packet is formed by crypto device. There
> > > is no need to calculate it in SW.
> > >
> > > >> +	return 1;
> > > >
> > > > It will be better to use some MACROS while returning Like
> > > > #define PKT_FORWARD   1
> > > > #define PKT_DROPPED     0
> > > > #define PKT_POSTED       2  /*may be for lookaside cases */
> > > >
> > > >> +
> > > >> +drop_pkt_and_exit:
> > > >> +	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
> > > >> +	rte_pktmbuf_free(pkt);
> > > >> +	ev->mbuf = NULL;
> > > >> +	return 0;
> > > >> +}
> > > >> +
> > > >>  /*
> > > >>   * Event mode exposes various operating modes depending on the
> > > >>   * capabilities of the event device and the operating mode @@
> > > >> -68,7
> > > >> +392,7 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> > > >>   */
> > > >>
> > > >>  /* Workers registered */
> > > >> -#define IPSEC_EVENTMODE_WORKERS		1
> > > >> +#define IPSEC_EVENTMODE_WORKERS		2
> > > >>
> > > >>  /*
> > > >>   * Event mode worker
> > > >> @@ -146,7 +470,7 @@
> ipsec_wrkr_non_burst_int_port_drv_mode(struct
> > > >> eh_event_link_info *links,
> > > >>  			}
> > > >>
> > > >>  			/* Save security session */
> > > >> -			pkt->udata64 = (uint64_t) sess_tbl[port_id];
> > > >> +			pkt->userdata = sess_tbl[port_id];
> > > >>
> > > >>  			/* Mark the packet for Tx security offload */
> > > >>  			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; @@ -165,6
> > > +489,94 @@
> > > >> ipsec_wrkr_non_burst_int_port_drv_mode(struct
> > > >> eh_event_link_info *links,
> > > >>  	}
> > > >>  }
> > > >>
> > > >> +/*
> > > >> + * Event mode worker
> > > >> + * Operating parameters : non-burst - Tx internal port - app
> > > >> +mode */ static void
> > > >> +ipsec_wrkr_non_burst_int_port_app_mode(struct
> > > >> +eh_event_link_info *links,
> > > >> +		uint8_t nb_links)
> > > >> +{
> > > >> +	struct lcore_conf_ev_tx_int_port_wrkr lconf;
> > > >> +	unsigned int nb_rx = 0;
> > > >> +	struct rte_event ev;
> > > >> +	uint32_t lcore_id;
> > > >> +	int32_t socket_id;
> > > >> +	int ret;
> > > >> +
> > > >> +	/* Check if we have links registered for this lcore */
> > > >> +	if (nb_links == 0) {
> > > >> +		/* No links registered - exit */
> > > >> +		return;
> > > >> +	}
> > > >> +
> > > >> +	/* We have valid links */
> > > >> +
> > > >> +	/* Get core ID */
> > > >> +	lcore_id = rte_lcore_id();
> > > >> +
> > > >> +	/* Get socket ID */
> > > >> +	socket_id = rte_lcore_to_socket_id(lcore_id);
> > > >> +
> > > >> +	/* Save routing table */
> > > >> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> > > >> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> > > >> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> > > >> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> > > >> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> > > >> +	lconf.inbound.session_pool =
> > > >> +socket_ctx[socket_id].session_pool;
> > > >
> > > > Session_priv_pool should also be added for both inbound and
> > > > outbound
> > > >
> > >
> > > [Lukasz] I will add it in V5.
> >
> > [Anoob] Actually, why do need both session_pool and private_pool? I
> > think it's a remnant from the time we had session being created when the
> first packet arrives.
> >
> > @Konstantin, thoughts?
> 
> I think we do need it for lksd sessions.
> See create_lookaside_session() in ipsec.c

[Anoob] You are right. It seems for lookaside, we still create session only when first packet arrives. The fix was done only for inline.

Said that, do you think we should fix the same for lookaside as well? Often, session creation is treated as a control path entity, and ipsec-secgw doesn't support changing sessions on the fly as well. But in ipsec-secgw, we create sessions in the data path. Also, once we do this, both inline & lookaside will have similar kind of treatment as well.

Do you think there is any value in retaining the current behavior? If not I can take this up following the merge.
 
> 
> >
> > >
> > > >> +	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
> > > >> +	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
> > > >> +	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
> > > >> +	lconf.outbound.session_pool =
> > > >> +socket_ctx[socket_id].session_pool;
> > > >> +
> > > >> +	RTE_LOG(INFO, IPSEC,
> > > >> +		"Launching event mode worker (non-burst - Tx
> internal port - "
> > > >> +		"app mode) on lcore %d\n", lcore_id);
> > > >> +
> > > >> +	/* Check if it's single link */
> > > >> +	if (nb_links != 1) {
> > > >> +		RTE_LOG(INFO, IPSEC,
> > > >> +			"Multiple links not supported. Using first
> link\n");
> > > >> +	}
> > > >> +
> > > >> +	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n",
> lcore_id,
> > > >> +		links[0].event_port_id);
> > > >> +
> > > >> +	while (!force_quit) {
> > > >> +		/* Read packet from event queues */
> > > >> +		nb_rx =
> rte_event_dequeue_burst(links[0].eventdev_id,
> > > >> +				links[0].event_port_id,
> > > >> +				&ev,     /* events */
> > > >> +				1,       /* nb_events */
> > > >> +				0        /* timeout_ticks */);
> > > >> +
> > > >> +		if (nb_rx == 0)
> > > >> +			continue;
> > > >> +
> > > >
> > > > Event type should be checked here before dereferencing it.
> > > >
> > >
> > > [Lukasz] I will add event type check in V5.
> > >
> > > >> +		if (is_unprotected_port(ev.mbuf->port))
> > > >> +			ret =
> process_ipsec_ev_inbound(&lconf.inbound,
> > > >> +							&lconf.rt,
> &ev);
> > > >> +		else
> > > >> +			ret =
> process_ipsec_ev_outbound(&lconf.outbound,
> > > >> +							&lconf.rt,
> &ev);
> > > >> +		if (ret != 1)
> > > >> +			/* The pkt has been dropped */
> > > >> +			continue;
> > > >> +
> > > >> +		/*
> > > >> +		 * Since tx internal port is available, events can be
> > > >> +		 * directly enqueued to the adapter and it would be
> > > >> +		 * internally submitted to the eth device.
> > > >> +		 */
> > > >> +
> 	rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> > > >> +				links[0].event_port_id,
> > > >> +				&ev,	/* events */
> > > >> +				1,	/* nb_events */
> > > >> +				0	/* flags */);
> > > >> +	}
> > > >> +}
> > > >> +
> > > >>  static uint8_t
> > > >>  ipsec_eventmode_populate_wrkr_params(struct
> eh_app_worker_params
> > > >> *wrkrs)
> > > >>  {
> > > >> @@ -180,6 +592,14 @@
> ipsec_eventmode_populate_wrkr_params(struct
> > > >> eh_app_worker_params *wrkrs)
> > > >>  	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
> > > >>  	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
> > > >>  	wrkr++;
> > > >> +	nb_wrkr_param++;
> > > >> +
> > > >> +	/* Non-burst - Tx internal port - app mode */
> > > >> +	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
> > > >> +	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
> > > >> +	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
> > > >> +	wrkr->worker_thread =
> ipsec_wrkr_non_burst_int_port_app_mode;
> > > >> +	nb_wrkr_param++;
> > > >>
> > > >>  	return nb_wrkr_param;
> > > >>  }
> > > >> diff --git a/examples/ipsec-secgw/ipsec_worker.h
> > > >> b/examples/ipsec- secgw/ipsec_worker.h new file mode 100644
> index
> > > >> 0000000..87b4f22
> > > >> --- /dev/null
> > > >> +++ b/examples/ipsec-secgw/ipsec_worker.h
> > > >> @@ -0,0 +1,35 @@
> > > >> +/* SPDX-License-Identifier: BSD-3-Clause
> > > >> + * Copyright (C) 2020 Marvell International Ltd.
> > > >> + */
> > > >> +#ifndef _IPSEC_WORKER_H_
> > > >> +#define _IPSEC_WORKER_H_
> > > >> +
> > > >> +#include "ipsec.h"
> > > >> +
> > > >> +enum pkt_type {
> > > >> +	PKT_TYPE_PLAIN_IPV4 = 1,
> > > >> +	PKT_TYPE_IPSEC_IPV4,
> > > >> +	PKT_TYPE_PLAIN_IPV6,
> > > >> +	PKT_TYPE_IPSEC_IPV6,
> > > >> +	PKT_TYPE_INVALID
> > > >> +};
> > > >> +
> > > >> +struct route_table {
> > > >> +	struct rt_ctx *rt4_ctx;
> > > >> +	struct rt_ctx *rt6_ctx;
> > > >> +};
> > > >> +
> > > >> +/*
> > > >> + * Conf required by event mode worker with tx internal port  */
> > > >> +struct lcore_conf_ev_tx_int_port_wrkr {
> > > >> +	struct ipsec_ctx inbound;
> > > >> +	struct ipsec_ctx outbound;
> > > >> +	struct route_table rt;
> > > >> +} __rte_cache_aligned;
> > > >> +
> > > >> +void ipsec_poll_mode_worker(void);
> > > >> +
> > > >> +int ipsec_launch_one_lcore(void *args);
> > > >> +
> > > >> +#endif /* _IPSEC_WORKER_H_ */
> > > >> --
> > > >> 2.7.4
> > > >
  
Akhil Goyal Feb. 26, 2020, 5:55 a.m. UTC | #6
Hi Anoob,

> > > > >> +	/* Get core ID */
> > > > >> +	lcore_id = rte_lcore_id();
> > > > >> +
> > > > >> +	/* Get socket ID */
> > > > >> +	socket_id = rte_lcore_to_socket_id(lcore_id);
> > > > >> +
> > > > >> +	/* Save routing table */
> > > > >> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> > > > >> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> > > > >> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> > > > >> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> > > > >> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> > > > >> +	lconf.inbound.session_pool =
> > > > >> +socket_ctx[socket_id].session_pool;
> > > > >
> > > > > Session_priv_pool should also be added for both inbound and
> > > > > outbound
> > > > >
> > > >
> > > > [Lukasz] I will add it in V5.
> > >
> > > [Anoob] Actually, why do need both session_pool and private_pool? I
> > > think it's a remnant from the time we had session being created when the
> > first packet arrives.
> > >
> > > @Konstantin, thoughts?
> >
> > I think we do need it for lksd sessions.
> > See create_lookaside_session() in ipsec.c
> 
> [Anoob] You are right. It seems for lookaside, we still create session only when
> first packet arrives. The fix was done only for inline.
> 
> Said that, do you think we should fix the same for lookaside as well? Often,
> session creation is treated as a control path entity, and ipsec-secgw doesn't
> support changing sessions on the fly as well. But in ipsec-secgw, we create
> sessions in the data path. Also, once we do this, both inline & lookaside will have
> similar kind of treatment as well.
> 
> Do you think there is any value in retaining the current behavior? If not I can
> take this up following the merge.
> 

Yes we need that for lookaside cases.

And yes session creation was added in control path for inline cases only. We can move that
Part for lookaside cases as well.

Earlier the patch was submitted for both but had issues in lookaside cases, so Intel just fixed the
Inline cases as that was necessary for the inline cases(first packet was getting dropped).

Regards,
Akhil
  
Akhil Goyal Feb. 26, 2020, 6:04 a.m. UTC | #7
Hi Lukasz,

> >
> > Is it not possible to use the existing functions for finding routes, checking
> packet types and checking security policies.
> > It will be very difficult to manage two separate functions for same work. I can
> see that the pkt->data_offs
> > Are not required to be updated in the inline case, but can we split the existing
> functions in two so that they can be
> > Called in the appropriate cases.
> >
> > As you have said in the cover note as well to add lookaside protocol support. I
> also tried adding it, and it will get very
> > Difficult to manage separate functions for separate code paths.
> >
> 
> [Lukasz] This was also Konstantin's comment during review of one of previous
> revisions.
> The prepare_one_packet() and prepare_tx_pkt() do much more than we need
> and for performance reasons
> we crafted new functions. For example, process_ipsec_get_pkt_type function
> returns nlp and whether
> packet type is plain or IPsec. That's all. Prepare_one_packet() process packets in
> chunks and does much more -
> it adjusts mbuf and packet length then it demultiplex packets into plain and IPsec
> flows and finally does
> inline checks. This is similar for update_mac_addrs() vs prepare_tx_pkt() and
> check_sp() vs inbound_sp_sa()
> that prepare_tx_pkt() and inbound_sp_sa() do more that we need in event mode.
> 
> I understand your concern from the perspective of code maintenance but on the
> other hand we are concerned with performance.
> The current code is not optimized to support multiple mode processing
> introduced with rte_security. We can work on a common
> routines once we have other modes also added, so that we can come up with a
> better solution than what we have today.
> 

Yes that is correct, but we should split the existing functions so that the part which is common
In both mode should stay common and we do not have duplicate code in the app.

I believe we should take care of this when we add lookaside cases. We shall remove all duplicate
Code. Ideally it should be part of this patchset. But we can postpone it to the lookaside case addition.


> 
> >> +	return 1;
> >
> > It will be better to use some MACROS while returning
> > Like
> > #define PKT_FORWARD   1
> > #define PKT_DROPPED     0
> > #define PKT_POSTED       2  /*may be for lookaside cases */
> >

I think you missed this comment.

> >> +
> >> +drop_pkt_and_exit:
> >> +	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
> >> +	rte_pktmbuf_free(pkt);
> >> +	ev->mbuf = NULL;
> >> +	return 0;
> >> +}
> >> +
  
Lukas Bartosik [C] Feb. 26, 2020, 10:32 a.m. UTC | #8
Hi Akhil,

Please see my answer below.

Thanks,
Lukasz

On 26.02.2020 07:04, Akhil Goyal wrote:
> Hi Lukasz,
> 
>>>
>>> Is it not possible to use the existing functions for finding routes, checking
>> packet types and checking security policies.
>>> It will be very difficult to manage two separate functions for same work. I can
>> see that the pkt->data_offs
>>> Are not required to be updated in the inline case, but can we split the existing
>> functions in two so that they can be
>>> Called in the appropriate cases.
>>>
>>> As you have said in the cover note as well to add lookaside protocol support. I
>> also tried adding it, and it will get very
>>> Difficult to manage separate functions for separate code paths.
>>>
>>
>> [Lukasz] This was also Konstantin's comment during review of one of previous
>> revisions.
>> The prepare_one_packet() and prepare_tx_pkt() do much more than we need
>> and for performance reasons
>> we crafted new functions. For example, process_ipsec_get_pkt_type function
>> returns nlp and whether
>> packet type is plain or IPsec. That's all. Prepare_one_packet() process packets in
>> chunks and does much more -
>> it adjusts mbuf and packet length then it demultiplex packets into plain and IPsec
>> flows and finally does
>> inline checks. This is similar for update_mac_addrs() vs prepare_tx_pkt() and
>> check_sp() vs inbound_sp_sa()
>> that prepare_tx_pkt() and inbound_sp_sa() do more that we need in event mode.
>>
>> I understand your concern from the perspective of code maintenance but on the
>> other hand we are concerned with performance.
>> The current code is not optimized to support multiple mode processing
>> introduced with rte_security. We can work on a common
>> routines once we have other modes also added, so that we can come up with a
>> better solution than what we have today.
>>
> 
> Yes that is correct, but we should split the existing functions so that the part which is common
> In both mode should stay common and we do not have duplicate code in the app.
> 
> I believe we should take care of this when we add lookaside cases. We shall remove all duplicate
> Code. Ideally it should be part of this patchset. But we can postpone it to the lookaside case addition.
> 
> 
>>
>>>> +	return 1;
>>>
>>> It will be better to use some MACROS while returning
>>> Like
>>> #define PKT_FORWARD   1
>>> #define PKT_DROPPED     0
>>> #define PKT_POSTED       2  /*may be for lookaside cases */
>>>
> 
> I think you missed this comment.
> 

[Lukasz] Thank you for pointing out that I missed the comment. I will use macros when returning instead of magic numbers.

>>>> +
>>>> +drop_pkt_and_exit:
>>>> +	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
>>>> +	rte_pktmbuf_free(pkt);
>>>> +	ev->mbuf = NULL;
>>>> +	return 0;
>>>> +}
>>>> +
  
Ananyev, Konstantin Feb. 26, 2020, 12:36 p.m. UTC | #9
> > > > > >> +	/* Get core ID */
> > > > > >> +	lcore_id = rte_lcore_id();
> > > > > >> +
> > > > > >> +	/* Get socket ID */
> > > > > >> +	socket_id = rte_lcore_to_socket_id(lcore_id);
> > > > > >> +
> > > > > >> +	/* Save routing table */
> > > > > >> +	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> > > > > >> +	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> > > > > >> +	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> > > > > >> +	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> > > > > >> +	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> > > > > >> +	lconf.inbound.session_pool =
> > > > > >> +socket_ctx[socket_id].session_pool;
> > > > > >
> > > > > > Session_priv_pool should also be added for both inbound and
> > > > > > outbound
> > > > > >
> > > > >
> > > > > [Lukasz] I will add it in V5.
> > > >
> > > > [Anoob] Actually, why do need both session_pool and private_pool? I
> > > > think it's a remnant from the time we had session being created when the
> > > first packet arrives.
> > > >
> > > > @Konstantin, thoughts?
> > >
> > > I think we do need it for lksd sessions.
> > > See create_lookaside_session() in ipsec.c
> >
> > [Anoob] You are right. It seems for lookaside, we still create session only when
> > first packet arrives. The fix was done only for inline.
> >
> > Said that, do you think we should fix the same for lookaside as well? Often,
> > session creation is treated as a control path entity, and ipsec-secgw doesn't
> > support changing sessions on the fly as well. But in ipsec-secgw, we create
> > sessions in the data path. Also, once we do this, both inline & lookaside will have
> > similar kind of treatment as well.
> >
> > Do you think there is any value in retaining the current behavior? If not I can
> > take this up following the merge.
> >
> 
> Yes we need that for lookaside cases.
> 
> And yes session creation was added in control path for inline cases only. We can move that
> Part for lookaside cases as well.
> 
> Earlier the patch was submitted for both but had issues in lookaside cases, so Intel just fixed the
> Inline cases as that was necessary for the inline cases(first packet was getting dropped).
> 

Yep, as Akhil pointed out it was some problems with lksd-proto cases as I remember.
I wouldn't object if we'll change the code to create all sessions at startup.
Though, I think  we probably would still need a pool for private sessions.
Konstantin
  
Akhil Goyal Feb. 27, 2020, 12:07 p.m. UTC | #10
> 
> Hi Lukasz,
> 
> > >
> > > Is it not possible to use the existing functions for finding routes, checking
> > packet types and checking security policies.
> > > It will be very difficult to manage two separate functions for same work. I
> can
> > see that the pkt->data_offs
> > > Are not required to be updated in the inline case, but can we split the existing
> > functions in two so that they can be
> > > Called in the appropriate cases.
> > >
> > > As you have said in the cover note as well to add lookaside protocol support.
> I
> > also tried adding it, and it will get very
> > > Difficult to manage separate functions for separate code paths.
> > >
> >
> > [Lukasz] This was also Konstantin's comment during review of one of previous
> > revisions.
> > The prepare_one_packet() and prepare_tx_pkt() do much more than we need
> > and for performance reasons
> > we crafted new functions. For example, process_ipsec_get_pkt_type function
> > returns nlp and whether
> > packet type is plain or IPsec. That's all. Prepare_one_packet() process packets
> in
> > chunks and does much more -
> > it adjusts mbuf and packet length then it demultiplex packets into plain and
> IPsec
> > flows and finally does
> > inline checks. This is similar for update_mac_addrs() vs prepare_tx_pkt() and
> > check_sp() vs inbound_sp_sa()
> > that prepare_tx_pkt() and inbound_sp_sa() do more that we need in event
> mode.
> >
> > I understand your concern from the perspective of code maintenance but on
> the
> > other hand we are concerned with performance.
> > The current code is not optimized to support multiple mode processing
> > introduced with rte_security. We can work on a common
> > routines once we have other modes also added, so that we can come up with
> a
> > better solution than what we have today.
> >
> 
> Yes that is correct, but we should split the existing functions so that the part
> which is common
> In both mode should stay common and we do not have duplicate code in the app.
> 
> I believe we should take care of this when we add lookaside cases. We shall
> remove all duplicate
> Code. Ideally it should be part of this patchset. But we can postpone it to the
> lookaside case addition.
> 
> 

I believe the route(4/6)_pkts and route(4/6)_pkt can be made uniform quite easily.
Now you can call either send_single_pkt() or rte_event_eth_tx_adapter_enqueue() from
the caller of route4_pkts.
I don’t think this will impact the performance at all.
Instead of having 3 for loops, now there will be only 2 and nothing else is getting changed for
anybody. In fact we can reduce 1 more, if we can call send pkts from inside the route4_pkts.
I think that can also be done, but it may increase the lookup duration as there may be cache miss.
But that need to be experimented. What say??

static inline void
route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t *hop[],
                uint8_t nb_pkts)
{
        uint32_t dst_ip;
        uint16_t i, offset;

        if (nb_pkts == 0)
                return;

        /* Need to do an LPM lookup for non-inline packets. Inline packets will
         * have port ID in the SA
         */

        for (i = 0; i < nb_pkts; i++) {
                if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
                        /* Security offload not enabled. So an LPM lookup is
                         * required to get the hop
                         */
                        offset = offsetof(struct ip, ip_dst);
                        dst_ip = *rte_pktmbuf_mtod_offset(pkts[i],
                                        uint32_t *, offset);
                        dst_ip = rte_be_to_cpu_32(dst_ip);
                        if (rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, hop[i]))
                                rte_pktmbuf_free(pkts[i]);
                } else {
                        *hop[i] = get_hop_for_offload_pkt(pkts[i], 0);
                }
        }
}
  
Lukas Bartosik [C] Feb. 27, 2020, 2:31 p.m. UTC | #11
Hi Akhil,

Please see my answer below.

Thanks,
Lukasz

On 27.02.2020 13:07, Akhil Goyal wrote:
>>
>> Hi Lukasz,
>>
>>>>
>>>> Is it not possible to use the existing functions for finding routes, checking
>>> packet types and checking security policies.
>>>> It will be very difficult to manage two separate functions for same work. I
>> can
>>> see that the pkt->data_offs
>>>> Are not required to be updated in the inline case, but can we split the existing
>>> functions in two so that they can be
>>>> Called in the appropriate cases.
>>>>
>>>> As you have said in the cover note as well to add lookaside protocol support.
>> I
>>> also tried adding it, and it will get very
>>>> Difficult to manage separate functions for separate code paths.
>>>>
>>>
>>> [Lukasz] This was also Konstantin's comment during review of one of previous
>>> revisions.
>>> The prepare_one_packet() and prepare_tx_pkt() do much more than we need
>>> and for performance reasons
>>> we crafted new functions. For example, process_ipsec_get_pkt_type function
>>> returns nlp and whether
>>> packet type is plain or IPsec. That's all. Prepare_one_packet() process packets
>> in
>>> chunks and does much more -
>>> it adjusts mbuf and packet length then it demultiplex packets into plain and
>> IPsec
>>> flows and finally does
>>> inline checks. This is similar for update_mac_addrs() vs prepare_tx_pkt() and
>>> check_sp() vs inbound_sp_sa()
>>> that prepare_tx_pkt() and inbound_sp_sa() do more that we need in event
>> mode.
>>>
>>> I understand your concern from the perspective of code maintenance but on
>> the
>>> other hand we are concerned with performance.
>>> The current code is not optimized to support multiple mode processing
>>> introduced with rte_security. We can work on a common
>>> routines once we have other modes also added, so that we can come up with
>> a
>>> better solution than what we have today.
>>>
>>
>> Yes that is correct, but we should split the existing functions so that the part
>> which is common
>> In both mode should stay common and we do not have duplicate code in the app.
>>
>> I believe we should take care of this when we add lookaside cases. We shall
>> remove all duplicate
>> Code. Ideally it should be part of this patchset. But we can postpone it to the
>> lookaside case addition.
>>
>>
> 
> I believe the route(4/6)_pkts and route(4/6)_pkt can be made uniform quite easily.
> Now you can call either send_single_pkt() or rte_event_eth_tx_adapter_enqueue() from
> the caller of route4_pkts.
> I don’t think this will impact the performance at all.
> Instead of having 3 for loops, now there will be only 2 and nothing else is getting changed for
> anybody. In fact we can reduce 1 more, if we can call send pkts from inside the route4_pkts.
> I think that can also be done, but it may increase the lookup duration as there may be cache miss.
> But that need to be experimented. What say??
> 
> static inline void
> route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t *hop[],
>                 uint8_t nb_pkts)
> {
>         uint32_t dst_ip;
>         uint16_t i, offset;
> 
>         if (nb_pkts == 0)
>                 return;
> 
>         /* Need to do an LPM lookup for non-inline packets. Inline packets will
>          * have port ID in the SA
>          */
> 
>         for (i = 0; i < nb_pkts; i++) {
>                 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
>                         /* Security offload not enabled. So an LPM lookup is
>                          * required to get the hop
>                          */
>                         offset = offsetof(struct ip, ip_dst);
>                         dst_ip = *rte_pktmbuf_mtod_offset(pkts[i],
>                                         uint32_t *, offset);
>                         dst_ip = rte_be_to_cpu_32(dst_ip);
>                         if (rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, hop[i]))
>                                 rte_pktmbuf_free(pkts[i]);
>                 } else {
>                         *hop[i] = get_hop_for_offload_pkt(pkts[i], 0);
>                 }
>         }
> }
> 

[Lukasz] Thank you for your suggestion. Looking at the proposed change I have major concern related
to performance. Current rout4_pkts uses rte_lpm_lookup_bulk() which can benefit from SIMD instructions.
Replacing it with rte_lpm_lookup might introduce substantial performance degradation. I will start 
experimenting with processing functions (routing packets, checking packet type, checking sp policies) to make
them as much common as possible between poll and event modes. As agreed the plan is to make processing functions common
with the addition of lookaside event mode. In the meantime I will send V5 event mode patches which address your other comments.
  

Patch

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index bebda38..c98620e 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -50,13 +50,12 @@ 
 
 #include "event_helper.h"
 #include "ipsec.h"
+#include "ipsec_worker.h"
 #include "parser.h"
 #include "sad.h"
 
 volatile bool force_quit;
 
-#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
-
 #define MAX_JUMBO_PKT_LEN  9600
 
 #define MEMPOOL_CACHE_SIZE 256
@@ -86,29 +85,6 @@  volatile bool force_quit;
 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 
-#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
-	(((uint64_t)((a) & 0xff) << 56) | \
-	((uint64_t)((b) & 0xff) << 48) | \
-	((uint64_t)((c) & 0xff) << 40) | \
-	((uint64_t)((d) & 0xff) << 32) | \
-	((uint64_t)((e) & 0xff) << 24) | \
-	((uint64_t)((f) & 0xff) << 16) | \
-	((uint64_t)((g) & 0xff) << 8)  | \
-	((uint64_t)(h) & 0xff))
-#else
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
-	(((uint64_t)((h) & 0xff) << 56) | \
-	((uint64_t)((g) & 0xff) << 48) | \
-	((uint64_t)((f) & 0xff) << 40) | \
-	((uint64_t)((e) & 0xff) << 32) | \
-	((uint64_t)((d) & 0xff) << 24) | \
-	((uint64_t)((c) & 0xff) << 16) | \
-	((uint64_t)((b) & 0xff) << 8) | \
-	((uint64_t)(a) & 0xff))
-#endif
-#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
-
 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
 		(addr)->addr_bytes[2], (addr)->addr_bytes[3], \
@@ -120,11 +96,6 @@  static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 
 #define MTU_TO_FRAMELEN(x)	((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
-/* port/source ethernet addr and destination ethernet addr */
-struct ethaddr_info {
-	uint64_t src, dst;
-};
-
 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index a07a920..4b53cb5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -8,6 +8,69 @@ 
 
 #define NB_SOCKETS 4
 
+#define MAX_PKT_BURST 32
+
+#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
+
+#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
+	(((uint64_t)((a) & 0xff) << 56) | \
+	((uint64_t)((b) & 0xff) << 48) | \
+	((uint64_t)((c) & 0xff) << 40) | \
+	((uint64_t)((d) & 0xff) << 32) | \
+	((uint64_t)((e) & 0xff) << 24) | \
+	((uint64_t)((f) & 0xff) << 16) | \
+	((uint64_t)((g) & 0xff) << 8)  | \
+	((uint64_t)(h) & 0xff))
+#else
+#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
+	(((uint64_t)((h) & 0xff) << 56) | \
+	((uint64_t)((g) & 0xff) << 48) | \
+	((uint64_t)((f) & 0xff) << 40) | \
+	((uint64_t)((e) & 0xff) << 32) | \
+	((uint64_t)((d) & 0xff) << 24) | \
+	((uint64_t)((c) & 0xff) << 16) | \
+	((uint64_t)((b) & 0xff) << 8) | \
+	((uint64_t)(a) & 0xff))
+#endif
+
+#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
+
+struct traffic_type {
+	const uint8_t *data[MAX_PKT_BURST * 2];
+	struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
+	void *saptr[MAX_PKT_BURST * 2];
+	uint32_t res[MAX_PKT_BURST * 2];
+	uint32_t num;
+};
+
+struct ipsec_traffic {
+	struct traffic_type ipsec;
+	struct traffic_type ip4;
+	struct traffic_type ip6;
+};
+
+/* Fields optimized for devices without burst */
+struct traffic_type_nb {
+	const uint8_t *data;
+	struct rte_mbuf *pkt;
+	uint32_t res;
+	uint32_t num;
+};
+
+struct ipsec_traffic_nb {
+	struct traffic_type_nb ipsec;
+	struct traffic_type_nb ip4;
+	struct traffic_type_nb ip6;
+};
+
+/* port/source ethernet addr and destination ethernet addr */
+struct ethaddr_info {
+	uint64_t src, dst;
+};
+
+extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
+
 /* Port mask to identify the unprotected ports */
 extern uint32_t unprotected_port_mask;
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index ad913bf..f8f29f9 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -15,11 +15,9 @@ 
 
 #include "ipsec-secgw.h"
 
-#define RTE_LOGTYPE_IPSEC       RTE_LOGTYPE_USER1
 #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
 #define RTE_LOGTYPE_IPSEC_IPIP  RTE_LOGTYPE_USER3
 
-#define MAX_PKT_BURST 32
 #define MAX_INFLIGHT 128
 #define MAX_QP_PER_LCORE 256
 
@@ -259,20 +257,6 @@  struct cnt_blk {
 	uint32_t cnt;
 } __attribute__((packed));
 
-struct traffic_type {
-	const uint8_t *data[MAX_PKT_BURST * 2];
-	struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
-	void *saptr[MAX_PKT_BURST * 2];
-	uint32_t res[MAX_PKT_BURST * 2];
-	uint32_t num;
-};
-
-struct ipsec_traffic {
-	struct traffic_type ipsec;
-	struct traffic_type ip4;
-	struct traffic_type ip6;
-};
-
 /* Socket ctx */
 extern struct socket_ctx socket_ctx[NB_SOCKETS];
 
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index b7a1ef9..6313c98 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -2,11 +2,51 @@ 
  * Copyright(c) 2010-2016 Intel Corporation
  * Copyright (C) 2020 Marvell International Ltd.
  */
+#include <rte_acl.h>
 #include <rte_event_eth_tx_adapter.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
 
 #include "event_helper.h"
 #include "ipsec.h"
 #include "ipsec-secgw.h"
+#include "ipsec_worker.h"
+
+static inline enum pkt_type
+process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
+{
+	struct rte_ether_hdr *eth;
+
+	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
+				offsetof(struct ip, ip_p));
+		if (**nlp == IPPROTO_ESP)
+			return PKT_TYPE_IPSEC_IPV4;
+		else
+			return PKT_TYPE_PLAIN_IPV4;
+	} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
+				offsetof(struct ip6_hdr, ip6_nxt));
+		if (**nlp == IPPROTO_ESP)
+			return PKT_TYPE_IPSEC_IPV6;
+		else
+			return PKT_TYPE_PLAIN_IPV6;
+	}
+
+	/* Unknown/Unsupported type */
+	return PKT_TYPE_INVALID;
+}
+
+static inline void
+update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
+{
+	struct rte_ether_hdr *ethhdr;
+
+	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
+	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
+}
 
 static inline void
 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
@@ -61,6 +101,290 @@  prepare_out_sessions_tbl(struct sa_ctx *sa_out,
 	}
 }
 
+static inline int
+check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
+{
+	uint32_t res;
+
+	if (unlikely(sp == NULL))
+		return 0;
+
+	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
+			DEFAULT_MAX_CATEGORIES);
+
+	if (unlikely(res == 0)) {
+		/* No match */
+		return 0;
+	}
+
+	if (res == DISCARD)
+		return 0;
+	else if (res == BYPASS) {
+		*sa_idx = -1;
+		return 1;
+	}
+
+	*sa_idx = res - 1;
+	return 1;
+}
+
+static inline uint16_t
+route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
+{
+	uint32_t dst_ip;
+	uint16_t offset;
+	uint32_t hop;
+	int ret;
+
+	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
+	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
+	dst_ip = rte_be_to_cpu_32(dst_ip);
+
+	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
+
+	if (ret == 0) {
+		/* We have a hit */
+		return hop;
+	}
+
+	/* else */
+	return RTE_MAX_ETHPORTS;
+}
+
+/* TODO: To be tested */
+static inline uint16_t
+route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
+{
+	uint8_t dst_ip[16];
+	uint8_t *ip6_dst;
+	uint16_t offset;
+	uint32_t hop;
+	int ret;
+
+	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
+	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
+	memcpy(&dst_ip[0], ip6_dst, 16);
+
+	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
+
+	if (ret == 0) {
+		/* We have a hit */
+		return hop;
+	}
+
+	/* else */
+	return RTE_MAX_ETHPORTS;
+}
+
+static inline uint16_t
+get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
+{
+	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
+		return route4_pkt(pkt, rt->rt4_ctx);
+	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
+		return route6_pkt(pkt, rt->rt6_ctx);
+
+	return RTE_MAX_ETHPORTS;
+}
+
+static inline int
+process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
+		struct rte_event *ev)
+{
+	struct ipsec_sa *sa = NULL;
+	struct rte_mbuf *pkt;
+	uint16_t port_id = 0;
+	enum pkt_type type;
+	uint32_t sa_idx;
+	uint8_t *nlp;
+
+	/* Get pkt from event */
+	pkt = ev->mbuf;
+
+	/* Check the packet type */
+	type = process_ipsec_get_pkt_type(pkt, &nlp);
+
+	switch (type) {
+	case PKT_TYPE_PLAIN_IPV4:
+		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+			if (unlikely(pkt->ol_flags &
+				     PKT_RX_SEC_OFFLOAD_FAILED)) {
+				RTE_LOG(ERR, IPSEC,
+					"Inbound security offload failed\n");
+				goto drop_pkt_and_exit;
+			}
+			sa = pkt->userdata;
+		}
+
+		/* Check if we have a match */
+		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
+			/* No valid match */
+			goto drop_pkt_and_exit;
+		}
+		break;
+
+	case PKT_TYPE_PLAIN_IPV6:
+		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+			if (unlikely(pkt->ol_flags &
+				     PKT_RX_SEC_OFFLOAD_FAILED)) {
+				RTE_LOG(ERR, IPSEC,
+					"Inbound security offload failed\n");
+				goto drop_pkt_and_exit;
+			}
+			sa = pkt->userdata;
+		}
+
+		/* Check if we have a match */
+		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
+			/* No valid match */
+			goto drop_pkt_and_exit;
+		}
+		break;
+
+	default:
+		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
+		goto drop_pkt_and_exit;
+	}
+
+	/* Check if the packet has to be bypassed */
+	if (sa_idx == BYPASS)
+		goto route_and_send_pkt;
+
+	/* Validate sa_idx */
+	if (sa_idx >= ctx->sa_ctx->nb_sa)
+		goto drop_pkt_and_exit;
+
+	/* Else the packet has to be protected with SA */
+
+	/* If the packet was IPsec processed, then SA pointer should be set */
+	if (sa == NULL)
+		goto drop_pkt_and_exit;
+
+	/* SPI on the packet should match with the one in SA */
+	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
+		goto drop_pkt_and_exit;
+
+route_and_send_pkt:
+	port_id = get_route(pkt, rt, type);
+	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
+		/* no match */
+		goto drop_pkt_and_exit;
+	}
+	/* else, we have a matching route */
+
+	/* Update mac addresses */
+	update_mac_addrs(pkt, port_id);
+
+	/* Update the event with the dest port */
+	ipsec_event_pre_forward(pkt, port_id);
+	return 1;
+
+drop_pkt_and_exit:
+	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
+	rte_pktmbuf_free(pkt);
+	ev->mbuf = NULL;
+	return 0;
+}
+
+static inline int
+process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
+		struct rte_event *ev)
+{
+	struct rte_ipsec_session *sess;
+	struct sa_ctx *sa_ctx;
+	struct rte_mbuf *pkt;
+	uint16_t port_id = 0;
+	struct ipsec_sa *sa;
+	enum pkt_type type;
+	uint32_t sa_idx;
+	uint8_t *nlp;
+
+	/* Get pkt from event */
+	pkt = ev->mbuf;
+
+	/* Check the packet type */
+	type = process_ipsec_get_pkt_type(pkt, &nlp);
+
+	switch (type) {
+	case PKT_TYPE_PLAIN_IPV4:
+		/* Check if we have a match */
+		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
+			/* No valid match */
+			goto drop_pkt_and_exit;
+		}
+		break;
+	case PKT_TYPE_PLAIN_IPV6:
+		/* Check if we have a match */
+		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
+			/* No valid match */
+			goto drop_pkt_and_exit;
+		}
+		break;
+	default:
+		/*
+		 * Only plain IPv4 & IPv6 packets are allowed
+		 * on protected port. Drop the rest.
+		 */
+		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
+		goto drop_pkt_and_exit;
+	}
+
+	/* Check if the packet has to be bypassed */
+	if (sa_idx == BYPASS) {
+		port_id = get_route(pkt, rt, type);
+		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
+			/* no match */
+			goto drop_pkt_and_exit;
+		}
+		/* else, we have a matching route */
+		goto send_pkt;
+	}
+
+	/* Validate sa_idx */
+	if (sa_idx >= ctx->sa_ctx->nb_sa)
+		goto drop_pkt_and_exit;
+
+	/* Else the packet has to be protected */
+
+	/* Get SA ctx*/
+	sa_ctx = ctx->sa_ctx;
+
+	/* Get SA */
+	sa = &(sa_ctx->sa[sa_idx]);
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	/* Allow only inline protocol for now */
+	if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+		goto drop_pkt_and_exit;
+	}
+
+	if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
+		pkt->userdata = sess->security.ses;
+
+	/* Mark the packet for Tx security offload */
+	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
+
+	/* Get the port to which this pkt need to be submitted */
+	port_id = sa->portid;
+
+send_pkt:
+	/* Update mac addresses */
+	update_mac_addrs(pkt, port_id);
+
+	/* Update the event with the dest port */
+	ipsec_event_pre_forward(pkt, port_id);
+	return 1;
+
+drop_pkt_and_exit:
+	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
+	rte_pktmbuf_free(pkt);
+	ev->mbuf = NULL;
+	return 0;
+}
+
 /*
  * Event mode exposes various operating modes depending on the
  * capabilities of the event device and the operating mode
@@ -68,7 +392,7 @@  prepare_out_sessions_tbl(struct sa_ctx *sa_out,
  */
 
 /* Workers registered */
-#define IPSEC_EVENTMODE_WORKERS		1
+#define IPSEC_EVENTMODE_WORKERS		2
 
 /*
  * Event mode worker
@@ -146,7 +470,7 @@  ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 			}
 
 			/* Save security session */
-			pkt->udata64 = (uint64_t) sess_tbl[port_id];
+			pkt->userdata = sess_tbl[port_id];
 
 			/* Mark the packet for Tx security offload */
 			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
@@ -165,6 +489,94 @@  ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 	}
 }
 
+/*
+ * Event mode worker
+ * Operating parameters : non-burst - Tx internal port - app mode
+ */
+static void
+ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
+		uint8_t nb_links)
+{
+	struct lcore_conf_ev_tx_int_port_wrkr lconf;
+	unsigned int nb_rx = 0;
+	struct rte_event ev;
+	uint32_t lcore_id;
+	int32_t socket_id;
+	int ret;
+
+	/* Check if we have links registered for this lcore */
+	if (nb_links == 0) {
+		/* No links registered - exit */
+		return;
+	}
+
+	/* We have valid links */
+
+	/* Get core ID */
+	lcore_id = rte_lcore_id();
+
+	/* Get socket ID */
+	socket_id = rte_lcore_to_socket_id(lcore_id);
+
+	/* Save routing table */
+	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
+	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
+	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
+	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
+	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
+	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
+	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
+	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
+	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
+	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
+
+	RTE_LOG(INFO, IPSEC,
+		"Launching event mode worker (non-burst - Tx internal port - "
+		"app mode) on lcore %d\n", lcore_id);
+
+	/* Check if it's single link */
+	if (nb_links != 1) {
+		RTE_LOG(INFO, IPSEC,
+			"Multiple links not supported. Using first link\n");
+	}
+
+	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
+		links[0].event_port_id);
+
+	while (!force_quit) {
+		/* Read packet from event queues */
+		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
+				links[0].event_port_id,
+				&ev,     /* events */
+				1,       /* nb_events */
+				0        /* timeout_ticks */);
+
+		if (nb_rx == 0)
+			continue;
+
+		if (is_unprotected_port(ev.mbuf->port))
+			ret = process_ipsec_ev_inbound(&lconf.inbound,
+							&lconf.rt, &ev);
+		else
+			ret = process_ipsec_ev_outbound(&lconf.outbound,
+							&lconf.rt, &ev);
+		if (ret != 1)
+			/* The pkt has been dropped */
+			continue;
+
+		/*
+		 * Since tx internal port is available, events can be
+		 * directly enqueued to the adapter and it would be
+		 * internally submitted to the eth device.
+		 */
+		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+				links[0].event_port_id,
+				&ev,	/* events */
+				1,	/* nb_events */
+				0	/* flags */);
+	}
+}
+
 static uint8_t
 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
 {
@@ -180,6 +592,14 @@  ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
 	wrkr++;
+	nb_wrkr_param++;
+
+	/* Non-burst - Tx internal port - app mode */
+	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
+	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
+	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
+	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
+	nb_wrkr_param++;
 
 	return nb_wrkr_param;
 }
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
new file mode 100644
index 0000000..87b4f22
--- /dev/null
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -0,0 +1,35 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#ifndef _IPSEC_WORKER_H_
+#define _IPSEC_WORKER_H_
+
+#include "ipsec.h"
+
+enum pkt_type {
+	PKT_TYPE_PLAIN_IPV4 = 1,
+	PKT_TYPE_IPSEC_IPV4,
+	PKT_TYPE_PLAIN_IPV6,
+	PKT_TYPE_IPSEC_IPV6,
+	PKT_TYPE_INVALID
+};
+
+struct route_table {
+	struct rt_ctx *rt4_ctx;
+	struct rt_ctx *rt6_ctx;
+};
+
+/*
+ * Conf required by event mode worker with tx internal port
+ */
+struct lcore_conf_ev_tx_int_port_wrkr {
+	struct ipsec_ctx inbound;
+	struct ipsec_ctx outbound;
+	struct route_table rt;
+} __rte_cache_aligned;
+
+void ipsec_poll_mode_worker(void);
+
+int ipsec_launch_one_lcore(void *args);
+
+#endif /* _IPSEC_WORKER_H_ */