[1/3] crypto/cnxk: move IPsec SA creation to common

Message ID 20220620071807.951128-2-ktejasree@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series support new full context firmware |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Tejasree Kondoj June 20, 2022, 7:18 a.m. UTC
  From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Move the IPsec SA creation to common.
The code can be used by fastpath also to create the SAs
Add changes to support new full context microcode

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c      | 398 +++++++++++++++
 drivers/common/cnxk/cnxk_security.h      |  11 +
 drivers/common/cnxk/roc_cpt.c            |  93 ++++
 drivers/common/cnxk/roc_cpt.h            |   3 +
 drivers/common/cnxk/roc_ie_on.h          |  21 +-
 drivers/common/cnxk/version.map          |   3 +
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c |  24 +
 drivers/crypto/cnxk/cn9k_ipsec.c         | 594 +++--------------------
 drivers/crypto/cnxk/cn9k_ipsec_la_ops.h  |  16 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h |   1 +
 10 files changed, 631 insertions(+), 533 deletions(-)
  

Patch

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 72ee5ee91f..dca8742be3 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -971,3 +971,401 @@  cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
 	rlens->max_extended_len = partial_len + roundup_len + roundup_byte;
 	return 0;
 }
+
+static inline int
+on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
+		    struct rte_crypto_sym_xform *crypto_xform,
+		    struct roc_ie_on_sa_ctl *ctl)
+{
+	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+	int aes_key_len = 0;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		auth_xform = crypto_xform;
+		cipher_xform = crypto_xform->next;
+	} else {
+		cipher_xform = crypto_xform;
+		auth_xform = crypto_xform->next;
+	}
+
+	if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
+	else
+		ctl->direction = ROC_IE_SA_DIR_INBOUND;
+
+	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
+			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+		else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
+			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
+		else
+			return -EINVAL;
+	}
+
+	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
+		ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+	} else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
+	else
+		return -EINVAL;
+
+	if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_AH;
+	else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
+	else
+		return -EINVAL;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		switch (crypto_xform->aead.algo) {
+		case RTE_CRYPTO_AEAD_AES_GCM:
+			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
+			aes_key_len = crypto_xform->aead.key.length;
+			break;
+		default:
+			plt_err("Unsupported AEAD algorithm");
+			return -ENOTSUP;
+		}
+	} else {
+		if (cipher_xform != NULL) {
+			switch (cipher_xform->cipher.algo) {
+			case RTE_CRYPTO_CIPHER_NULL:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
+				break;
+			case RTE_CRYPTO_CIPHER_AES_CBC:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
+				aes_key_len = cipher_xform->cipher.key.length;
+				break;
+			case RTE_CRYPTO_CIPHER_AES_CTR:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
+				aes_key_len = cipher_xform->cipher.key.length;
+				break;
+			default:
+				plt_err("Unsupported cipher algorithm");
+				return -ENOTSUP;
+			}
+		}
+
+		switch (auth_xform->auth.algo) {
+		case RTE_CRYPTO_AUTH_NULL:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
+			break;
+		case RTE_CRYPTO_AUTH_MD5_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_MD5;
+			break;
+		case RTE_CRYPTO_AUTH_SHA1_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
+			break;
+		case RTE_CRYPTO_AUTH_SHA224_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_224;
+			break;
+		case RTE_CRYPTO_AUTH_SHA256_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_256;
+			break;
+		case RTE_CRYPTO_AUTH_SHA384_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_384;
+			break;
+		case RTE_CRYPTO_AUTH_SHA512_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_512;
+			break;
+		case RTE_CRYPTO_AUTH_AES_GMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_GMAC;
+			aes_key_len = auth_xform->auth.key.length;
+			break;
+		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_XCBC_128;
+			break;
+		default:
+			plt_err("Unsupported auth algorithm");
+			return -ENOTSUP;
+		}
+	}
+
+	/* Set AES key length */
+	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CBC ||
+	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CCM ||
+	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CTR ||
+	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
+	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
+		switch (aes_key_len) {
+		case 16:
+			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
+			break;
+		case 24:
+			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
+			break;
+		case 32:
+			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
+			break;
+		default:
+			plt_err("Invalid AES key length");
+			return -EINVAL;
+		}
+	}
+
+	if (ipsec->options.esn)
+		ctl->esn_en = 1;
+
+	if (ipsec->options.udp_encap == 1)
+		ctl->encap_type = ROC_IE_ON_SA_ENCAP_UDP;
+
+	ctl->copy_df = ipsec->options.copy_df;
+
+	ctl->spi = rte_cpu_to_be_32(ipsec->spi);
+
+	rte_io_wmb();
+
+	ctl->valid = 1;
+
+	return 0;
+}
+
+static inline int
+on_fill_ipsec_common_sa(struct rte_security_ipsec_xform *ipsec,
+			struct rte_crypto_sym_xform *crypto_xform,
+			struct roc_ie_on_common_sa *common_sa)
+{
+	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+	const uint8_t *cipher_key;
+	int cipher_key_len = 0;
+	int ret;
+
+	ret = on_ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl);
+	if (ret)
+		return ret;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		auth_xform = crypto_xform;
+		cipher_xform = crypto_xform->next;
+	} else {
+		cipher_xform = crypto_xform;
+		auth_xform = crypto_xform->next;
+	}
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
+		cipher_key = crypto_xform->aead.key.data;
+		cipher_key_len = crypto_xform->aead.key.length;
+	} else {
+		if (cipher_xform) {
+			cipher_key = cipher_xform->cipher.key.data;
+			cipher_key_len = cipher_xform->cipher.key.length;
+		}
+
+		if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
+			cipher_key = auth_xform->auth.key.data;
+			cipher_key_len = auth_xform->auth.key.length;
+		}
+	}
+
+	if (cipher_key_len != 0)
+		memcpy(common_sa->cipher_key, cipher_key, cipher_key_len);
+
+	return 0;
+}
+
+int
+cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			     struct rte_crypto_sym_xform *crypto_xform,
+			     struct roc_ie_on_outb_sa *out_sa)
+{
+	struct roc_ie_on_ip_template *template = NULL;
+	struct rte_crypto_sym_xform *auth_xform;
+	struct roc_ie_on_sa_ctl *ctl;
+	struct rte_ipv6_hdr *ip6;
+	struct rte_ipv4_hdr *ip4;
+	const uint8_t *auth_key;
+	int auth_key_len = 0;
+	size_t ctx_len;
+	int ret;
+
+	ctl = &out_sa->common_sa.ctl;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+		auth_xform = crypto_xform;
+	else
+		auth_xform = crypto_xform->next;
+
+	ret = on_fill_ipsec_common_sa(ipsec, crypto_xform, &out_sa->common_sa);
+	if (ret)
+		return ret;
+
+	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
+	    ctl->auth_type == ROC_IE_ON_SA_AUTH_NULL ||
+	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
+		template = &out_sa->aes_gcm.template;
+		ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template);
+	} else {
+		switch (ctl->auth_type) {
+		case ROC_IE_ON_SA_AUTH_SHA1:
+			template = &out_sa->sha1.template;
+			ctx_len = offsetof(struct roc_ie_on_outb_sa,
+					   sha1.template);
+			break;
+		case ROC_IE_ON_SA_AUTH_SHA2_256:
+		case ROC_IE_ON_SA_AUTH_SHA2_384:
+		case ROC_IE_ON_SA_AUTH_SHA2_512:
+			template = &out_sa->sha2.template;
+			ctx_len = offsetof(struct roc_ie_on_outb_sa,
+					   sha2.template);
+			break;
+		case ROC_IE_ON_SA_AUTH_AES_XCBC_128:
+			template = &out_sa->aes_xcbc.template;
+			ctx_len = offsetof(struct roc_ie_on_outb_sa,
+					   aes_xcbc.template);
+			break;
+		default:
+			plt_err("Unsupported auth algorithm");
+			return -EINVAL;
+		}
+	}
+
+	ip4 = (struct rte_ipv4_hdr *)&template->ip4.ipv4_hdr;
+	if (ipsec->options.udp_encap) {
+		ip4->next_proto_id = IPPROTO_UDP;
+		template->ip4.udp_src = rte_be_to_cpu_16(4500);
+		template->ip4.udp_dst = rte_be_to_cpu_16(4500);
+	} else {
+		if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+			ip4->next_proto_id = IPPROTO_AH;
+		else
+			ip4->next_proto_id = IPPROTO_ESP;
+	}
+
+	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+			uint16_t frag_off = 0;
+
+			ctx_len += sizeof(template->ip4);
+
+			ip4->version_ihl = RTE_IPV4_VHL_DEF;
+			ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
+			ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
+			if (ipsec->tunnel.ipv4.df)
+				frag_off |= RTE_IPV4_HDR_DF_FLAG;
+			ip4->fragment_offset = rte_cpu_to_be_16(frag_off);
+
+			memcpy(&ip4->src_addr, &ipsec->tunnel.ipv4.src_ip,
+			       sizeof(struct in_addr));
+			memcpy(&ip4->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
+			       sizeof(struct in_addr));
+		} else if (ipsec->tunnel.type ==
+			   RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
+			ctx_len += sizeof(template->ip6);
+
+			ip6 = (struct rte_ipv6_hdr *)&template->ip6.ipv6_hdr;
+			if (ipsec->options.udp_encap) {
+				ip6->proto = IPPROTO_UDP;
+				template->ip6.udp_src = rte_be_to_cpu_16(4500);
+				template->ip6.udp_dst = rte_be_to_cpu_16(4500);
+			} else {
+				ip6->proto = (ipsec->proto ==
+					      RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
+						     IPPROTO_ESP :
+						     IPPROTO_AH;
+			}
+			ip6->vtc_flow =
+				rte_cpu_to_be_32(0x60000000 |
+						 ((ipsec->tunnel.ipv6.dscp
+						   << RTE_IPV6_HDR_TC_SHIFT) &
+						  RTE_IPV6_HDR_TC_MASK) |
+						 ((ipsec->tunnel.ipv6.flabel
+						   << RTE_IPV6_HDR_FL_SHIFT) &
+						  RTE_IPV6_HDR_FL_MASK));
+			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
+			memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
+			       sizeof(struct in6_addr));
+			memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
+			       sizeof(struct in6_addr));
+		}
+	} else
+		ctx_len += sizeof(template->ip4);
+
+	ctx_len += RTE_ALIGN_CEIL(ctx_len, 8);
+
+	if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+		auth_key = auth_xform->auth.key.data;
+		auth_key_len = auth_xform->auth.key.length;
+
+		switch (auth_xform->auth.algo) {
+		case RTE_CRYPTO_AUTH_AES_GMAC:
+		case RTE_CRYPTO_AUTH_NULL:
+			break;
+		case RTE_CRYPTO_AUTH_SHA1_HMAC:
+			memcpy(out_sa->sha1.hmac_key, auth_key, auth_key_len);
+			break;
+		case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		case RTE_CRYPTO_AUTH_SHA512_HMAC:
+			memcpy(out_sa->sha2.hmac_key, auth_key, auth_key_len);
+			break;
+		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+			memcpy(out_sa->aes_xcbc.key, auth_key, auth_key_len);
+			break;
+		default:
+			plt_err("Unsupported auth algorithm %u",
+				auth_xform->auth.algo);
+			return -ENOTSUP;
+		}
+	}
+
+	return ctx_len;
+}
+
+int
+cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			    struct rte_crypto_sym_xform *crypto_xform,
+			    struct roc_ie_on_inb_sa *in_sa)
+{
+	struct rte_crypto_sym_xform *auth_xform = crypto_xform;
+	const uint8_t *auth_key;
+	int auth_key_len = 0;
+	size_t ctx_len = 0;
+	int ret;
+
+	ret = on_fill_ipsec_common_sa(ipsec, crypto_xform, &in_sa->common_sa);
+	if (ret)
+		return ret;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
+	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
+	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+		ctx_len = offsetof(struct roc_ie_on_inb_sa,
+				   sha1_or_gcm.hmac_key[0]);
+	} else {
+		auth_key = auth_xform->auth.key.data;
+		auth_key_len = auth_xform->auth.key.length;
+
+		switch (auth_xform->auth.algo) {
+		case RTE_CRYPTO_AUTH_NULL:
+			break;
+		case RTE_CRYPTO_AUTH_SHA1_HMAC:
+			memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key,
+			       auth_key_len);
+			ctx_len = offsetof(struct roc_ie_on_inb_sa,
+					   sha1_or_gcm.selector);
+			break;
+		case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		case RTE_CRYPTO_AUTH_SHA512_HMAC:
+			memcpy(in_sa->sha2.hmac_key, auth_key, auth_key_len);
+			ctx_len = offsetof(struct roc_ie_on_inb_sa,
+					   sha2.selector);
+			break;
+		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+			memcpy(in_sa->aes_xcbc.key, auth_key, auth_key_len);
+			ctx_len = offsetof(struct roc_ie_on_inb_sa,
+					   aes_xcbc.selector);
+			break;
+		default:
+			plt_err("Unsupported auth algorithm %u",
+				auth_xform->auth.algo);
+			return -ENOTSUP;
+		}
+	}
+
+	return ctx_len;
+}
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index 02cdad269c..4e477ec53f 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -59,4 +59,15 @@  cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
 bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa);
 bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa);
 
+/* [CN9K] */
+int __roc_api
+cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			    struct rte_crypto_sym_xform *crypto_xform,
+			    struct roc_ie_on_inb_sa *in_sa);
+
+int __roc_api
+cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			     struct rte_crypto_sym_xform *crypto_xform,
+			     struct roc_ie_on_outb_sa *out_sa);
+
 #endif /* _CNXK_SECURITY_H__ */
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 742723ad1d..e5b179e8e1 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -981,3 +981,96 @@  roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 
 	return 0;
 }
+
+int
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
+		     uint16_t ctx_len, uint8_t egrp)
+{
+	union cpt_res_s res, *hw_res;
+	struct cpt_inst_s inst;
+	uint64_t lmt_status;
+	int ret = 0;
+
+	hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
+	if (unlikely(hw_res == NULL)) {
+		plt_err("Couldn't allocate memory for result address");
+		return -ENOMEM;
+	}
+
+	hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
+
+	inst.w4.s.opcode_major = opcode;
+	inst.w4.s.opcode_minor = ctx_len >> 3;
+	inst.w4.s.param1 = 0;
+	inst.w4.s.param2 = 0;
+	inst.w4.s.dlen = ctx_len;
+	inst.dptr = rte_mempool_virt2iova(sa);
+	inst.rptr = 0;
+	inst.w7.s.cptr = rte_mempool_virt2iova(sa);
+	inst.w7.s.egrp = egrp;
+
+	inst.w0.u64 = 0;
+	inst.w2.u64 = 0;
+	inst.w3.u64 = 0;
+	inst.res_addr = (uintptr_t)hw_res;
+
+	rte_io_wmb();
+
+	do {
+		/* Copy CPT command to LMTLINE */
+		roc_lmt_mov64((void *)lf->lmt_base, &inst);
+		lmt_status = roc_lmt_submit_ldeor(lf->io_addr);
+	} while (lmt_status == 0);
+
+	const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
+
+	/* Wait until CPT instruction completes */
+	do {
+		res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+		if (unlikely(plt_tsc_cycles() > timeout)) {
+			plt_err("Request timed out");
+			ret = -ETIMEDOUT;
+			goto free;
+		}
+	} while (res.cn9k.compcode == CPT_COMP_NOT_DONE);
+
+	if (unlikely(res.cn9k.compcode != CPT_COMP_GOOD)) {
+		ret = res.cn9k.compcode;
+		switch (ret) {
+		case CPT_COMP_INSTERR:
+			plt_err("Request failed with instruction error");
+			break;
+		case CPT_COMP_FAULT:
+			plt_err("Request failed with DMA fault");
+			break;
+		case CPT_COMP_HWERR:
+			plt_err("Request failed with hardware error");
+			break;
+		default:
+			plt_err("Request failed with unknown hardware completion code : 0x%x",
+				ret);
+		}
+		ret = -EINVAL;
+		goto free;
+	}
+
+	if (unlikely(res.cn9k.uc_compcode != ROC_IE_ON_UCC_SUCCESS)) {
+		ret = res.cn9k.uc_compcode;
+		switch (ret) {
+		case ROC_IE_ON_AUTH_UNSUPPORTED:
+			plt_err("Invalid auth type");
+			break;
+		case ROC_IE_ON_ENCRYPT_UNSUPPORTED:
+			plt_err("Invalid encrypt type");
+			break;
+		default:
+			plt_err("Request failed with unknown microcode completion code : 0x%x",
+				ret);
+		}
+		ret = -ENOTSUP;
+	}
+
+free:
+	plt_free(hw_res);
+	return ret;
+}
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 99cb8b2862..1b2032b547 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -181,4 +181,7 @@  void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa,
+				   uint8_t opcode, uint16_t ctx_len,
+				   uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 7dd7b6595f..37f711c643 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -23,7 +23,7 @@  enum roc_ie_on_ucc_ipsec {
 };
 
 /* Helper macros */
-#define ROC_IE_ON_INB_RPTR_HDR 0x8
+#define ROC_IE_ON_INB_RPTR_HDR 16
 #define ROC_IE_ON_MAX_IV_LEN   16
 #define ROC_IE_ON_PER_PKT_IV   BIT(43)
 
@@ -67,9 +67,17 @@  enum {
 struct roc_ie_on_outb_hdr {
 	uint32_t ip_id;
 	uint32_t seq;
+	uint32_t esn;
+	uint32_t df_tos;
 	uint8_t iv[16];
 };
 
+struct roc_ie_on_inb_hdr {
+	uint32_t sa_index;
+	uint64_t seq;
+	uint32_t pad;
+};
+
 union roc_ie_on_bit_perfect_iv {
 	uint8_t aes_iv[16];
 	uint8_t des_iv[8];
@@ -113,7 +121,7 @@  struct roc_ie_on_ip_template {
 union roc_on_ipsec_outb_param1 {
 	uint16_t u16;
 	struct {
-		uint16_t frag_num : 4;
+		uint16_t l2hdr_len : 4;
 		uint16_t rsvd_4_6 : 3;
 		uint16_t gre_select : 1;
 		uint16_t dsiv : 1;
@@ -171,8 +179,13 @@  struct roc_ie_on_common_sa {
 	union roc_ie_on_bit_perfect_iv iv;
 
 	/* w7 */
-	uint32_t esn_hi;
-	uint32_t esn_low;
+	union {
+		uint64_t u64;
+		struct {
+			uint32_t th;
+			uint32_t tl;
+		};
+	} seq_t;
 };
 
 struct roc_ie_on_outb_sa {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index a77f3f6e3c..db61fe575d 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -23,6 +23,8 @@  INTERNAL {
 	cnxk_ot_ipsec_outb_sa_fill;
 	cnxk_ot_ipsec_inb_sa_valid;
 	cnxk_ot_ipsec_outb_sa_valid;
+	cnxk_on_ipsec_inb_sa_create;
+	cnxk_on_ipsec_outb_sa_create;
 	roc_ae_ec_grp_get;
 	roc_ae_ec_grp_put;
 	roc_ae_fpm_get;
@@ -72,6 +74,7 @@  INTERNAL {
 	roc_cpt_parse_hdr_dump;
 	roc_cpt_rxc_time_cfg;
 	roc_cpt_ctx_write;
+	roc_on_cpt_ctx_write;
 	roc_dpi_configure;
 	roc_dpi_dev_fini;
 	roc_dpi_dev_init;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index eccaf398df..7720730120 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -43,7 +43,9 @@  cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
 		       struct cpt_inst_s *inst)
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct roc_ie_on_common_sa *common_sa;
 	struct cn9k_sec_session *priv;
+	struct roc_ie_on_sa_ctl *ctl;
 	struct cn9k_ipsec_sa *sa;
 
 	if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
@@ -64,6 +66,12 @@  cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
 
 	infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
 
+	common_sa = &sa->in_sa.common_sa;
+	ctl = &common_sa->ctl;
+
+	if (ctl->esn_en)
+		infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_INB_ESN;
+
 	return process_inb_sa(op, sa, inst);
 }
 
@@ -491,14 +499,28 @@  cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 {
 	struct rte_crypto_sym_op *sym_op = cop->sym;
 	struct rte_mbuf *m = sym_op->m_src;
+	struct cn9k_sec_session *priv;
+	struct cn9k_ipsec_sa *sa;
 	struct rte_ipv6_hdr *ip6;
 	struct rte_ipv4_hdr *ip;
 	uint16_t m_len = 0;
 	char *data;
 
+	priv = get_sec_session_private_data(cop->sym->sec_session);
+	sa = &priv->sa;
+
 	if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
+		struct roc_ie_on_common_sa *common_sa = &sa->in_sa.common_sa;
+
 		data = rte_pktmbuf_mtod(m, char *);
+		if (infl_req->op_flags == CPT_OP_FLAGS_IPSEC_INB_ESN) {
+			struct roc_ie_on_inb_hdr *inb_hdr =
+				(struct roc_ie_on_inb_hdr *)data;
+			uint64_t seq = rte_be_to_cpu_64(inb_hdr->seq);
 
+			if (seq > common_sa->seq_t.u64)
+				common_sa->seq_t.u64 = seq;
+		}
 		ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
 
 		if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
@@ -515,6 +537,8 @@  cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 		m->data_len = m_len;
 		m->pkt_len = m_len;
 		m->data_off += ROC_IE_ON_INB_RPTR_HDR;
+	} else {
+		rte_pktmbuf_adj(m, sa->custom_hdr_len);
 	}
 }
 
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 82b8dae786..85f3f26c32 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -15,331 +15,26 @@ 
 
 #include "roc_api.h"
 
-static inline int
-cn9k_cpt_enq_sa_write(struct cn9k_ipsec_sa *sa, struct cnxk_cpt_qp *qp,
-		      uint8_t opcode, size_t ctx_len)
-{
-	struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
-	uint64_t lmtline = qp->lmtline.lmt_base;
-	uint64_t io_addr = qp->lmtline.io_addr;
-	uint64_t lmt_status, time_out;
-	struct cpt_cn9k_res_s *res;
-	struct cpt_inst_s inst;
-	uint64_t *mdata;
-	int ret = 0;
-
-	if (unlikely(rte_mempool_get(qp->meta_info.pool, (void **)&mdata) < 0))
-		return -ENOMEM;
-
-	res = (struct cpt_cn9k_res_s *)RTE_PTR_ALIGN(mdata, 16);
-	res->compcode = CPT_COMP_NOT_DONE;
-
-	inst.w4.s.opcode_major = opcode;
-	inst.w4.s.opcode_minor = ctx_len >> 3;
-	inst.w4.s.param1 = 0;
-	inst.w4.s.param2 = 0;
-	inst.w4.s.dlen = ctx_len;
-	inst.dptr = rte_mempool_virt2iova(sa);
-	inst.rptr = 0;
-	inst.w7.s.cptr = rte_mempool_virt2iova(sa);
-	inst.w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-
-	inst.w0.u64 = 0;
-	inst.w2.u64 = 0;
-	inst.w3.u64 = 0;
-	inst.res_addr = rte_mempool_virt2iova(res);
-
-	rte_io_wmb();
-
-	do {
-		/* Copy CPT command to LMTLINE */
-		roc_lmt_mov64((void *)lmtline, &inst);
-		lmt_status = roc_lmt_submit_ldeor(io_addr);
-	} while (lmt_status == 0);
-
-	time_out = rte_get_timer_cycles() +
-		   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
-
-	while (res->compcode == CPT_COMP_NOT_DONE) {
-		if (rte_get_timer_cycles() > time_out) {
-			rte_mempool_put(qp->meta_info.pool, mdata);
-			plt_err("Request timed out");
-			return -ETIMEDOUT;
-		}
-		rte_io_rmb();
-	}
-
-	if (unlikely(res->compcode != CPT_COMP_GOOD)) {
-		ret = res->compcode;
-		switch (ret) {
-		case CPT_COMP_INSTERR:
-			plt_err("Request failed with instruction error");
-			break;
-		case CPT_COMP_FAULT:
-			plt_err("Request failed with DMA fault");
-			break;
-		case CPT_COMP_HWERR:
-			plt_err("Request failed with hardware error");
-			break;
-		default:
-			plt_err("Request failed with unknown hardware "
-				"completion code : 0x%x",
-				ret);
-		}
-		ret = -EINVAL;
-		goto mempool_put;
-	}
-
-	if (unlikely(res->uc_compcode != ROC_IE_ON_UCC_SUCCESS)) {
-		ret = res->uc_compcode;
-		switch (ret) {
-		case ROC_IE_ON_AUTH_UNSUPPORTED:
-			plt_err("Invalid auth type");
-			break;
-		case ROC_IE_ON_ENCRYPT_UNSUPPORTED:
-			plt_err("Invalid encrypt type");
-			break;
-		default:
-			plt_err("Request failed with unknown microcode "
-				"completion code : 0x%x",
-				ret);
-		}
-		ret = -ENOTSUP;
-	}
-
-mempool_put:
-	rte_mempool_put(qp->meta_info.pool, mdata);
-	return ret;
-}
-
-static inline int
-ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
-		 struct rte_crypto_sym_xform *crypto_xform,
-		 struct roc_ie_on_sa_ctl *ctl)
-{
-	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
-	int aes_key_len = 0;
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		auth_xform = crypto_xform;
-		cipher_xform = crypto_xform->next;
-	} else {
-		cipher_xform = crypto_xform;
-		auth_xform = crypto_xform->next;
-	}
-
-	if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
-		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
-	else
-		ctl->direction = ROC_IE_SA_DIR_INBOUND;
-
-	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
-		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
-			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
-		else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
-			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
-		else
-			return -EINVAL;
-	}
-
-	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
-		ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
-	} else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
-	else
-		return -EINVAL;
-
-	if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_AH;
-	else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
-	else
-		return -EINVAL;
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		switch (crypto_xform->aead.algo) {
-		case RTE_CRYPTO_AEAD_AES_GCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
-			aes_key_len = crypto_xform->aead.key.length;
-			break;
-		default:
-			plt_err("Unsupported AEAD algorithm");
-			return -ENOTSUP;
-		}
-	} else {
-		if (cipher_xform != NULL) {
-			switch (cipher_xform->cipher.algo) {
-			case RTE_CRYPTO_CIPHER_NULL:
-				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
-				break;
-			case RTE_CRYPTO_CIPHER_AES_CBC:
-				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
-				aes_key_len = cipher_xform->cipher.key.length;
-				break;
-			case RTE_CRYPTO_CIPHER_AES_CTR:
-				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
-				aes_key_len = cipher_xform->cipher.key.length;
-				break;
-			default:
-				plt_err("Unsupported cipher algorithm");
-				return -ENOTSUP;
-			}
-		}
-
-		switch (auth_xform->auth.algo) {
-		case RTE_CRYPTO_AUTH_NULL:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			break;
-		case RTE_CRYPTO_AUTH_MD5_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_MD5;
-			break;
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
-			break;
-		case RTE_CRYPTO_AUTH_SHA224_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_224;
-			break;
-		case RTE_CRYPTO_AUTH_SHA256_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_256;
-			break;
-		case RTE_CRYPTO_AUTH_SHA384_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_384;
-			break;
-		case RTE_CRYPTO_AUTH_SHA512_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_512;
-			break;
-		case RTE_CRYPTO_AUTH_AES_GMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_GMAC;
-			aes_key_len = auth_xform->auth.key.length;
-			break;
-		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_XCBC_128;
-			break;
-		default:
-			plt_err("Unsupported auth algorithm");
-			return -ENOTSUP;
-		}
-	}
-
-	/* Set AES key length */
-	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CBC ||
-	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CCM ||
-	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CTR ||
-	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
-	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
-		switch (aes_key_len) {
-		case 16:
-			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
-			break;
-		case 24:
-			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
-			break;
-		case 32:
-			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
-			break;
-		default:
-			plt_err("Invalid AES key length");
-			return -EINVAL;
-		}
-	}
-
-	if (ipsec->options.esn)
-		ctl->esn_en = 1;
-
-	if (ipsec->options.udp_encap == 1)
-		ctl->encap_type = ROC_IE_ON_SA_ENCAP_UDP;
-
-	ctl->copy_df = ipsec->options.copy_df;
-
-	ctl->spi = rte_cpu_to_be_32(ipsec->spi);
-
-	rte_io_wmb();
-
-	ctl->valid = 1;
-
-	return 0;
-}
-
-static inline int
-fill_ipsec_common_sa(struct rte_security_ipsec_xform *ipsec,
-		     struct rte_crypto_sym_xform *crypto_xform,
-		     struct roc_ie_on_common_sa *common_sa)
-{
-	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
-	const uint8_t *cipher_key;
-	int cipher_key_len = 0;
-	int ret;
-
-	ret = ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl);
-	if (ret)
-		return ret;
-
-	if (ipsec->esn.value) {
-		common_sa->esn_low = ipsec->esn.low;
-		common_sa->esn_hi = ipsec->esn.hi;
-	}
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		auth_xform = crypto_xform;
-		cipher_xform = crypto_xform->next;
-	} else {
-		cipher_xform = crypto_xform;
-		auth_xform = crypto_xform->next;
-	}
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
-			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
-		cipher_key = crypto_xform->aead.key.data;
-		cipher_key_len = crypto_xform->aead.key.length;
-	} else {
-		if (cipher_xform) {
-			cipher_key = cipher_xform->cipher.key.data;
-			cipher_key_len = cipher_xform->cipher.key.length;
-		}
-
-		if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
-			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
-			cipher_key = auth_xform->auth.key.data;
-			cipher_key_len = auth_xform->auth.key.length;
-		}
-	}
-
-	if (cipher_key_len != 0)
-		memcpy(common_sa->cipher_key, cipher_key, cipher_key_len);
-
-	return 0;
-}
-
 static int
 cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 			  struct rte_security_ipsec_xform *ipsec,
 			  struct rte_crypto_sym_xform *crypto_xform,
 			  struct rte_security_session *sec_sess)
 {
-	struct roc_ie_on_ip_template *template = NULL;
 	struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
-	struct rte_crypto_sym_xform *auth_xform;
 	union roc_on_ipsec_outb_param1 param1;
 	struct cnxk_cpt_inst_tmpl *inst_tmpl;
-	struct roc_ie_on_outb_sa *out_sa;
 	struct cn9k_sec_session *sess;
-	struct roc_ie_on_sa_ctl *ctl;
 	struct cn9k_ipsec_sa *sa;
-	struct rte_ipv6_hdr *ip6;
-	struct rte_ipv4_hdr *ip4;
-	const uint8_t *auth_key;
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
-	int auth_key_len = 0;
 	size_t ctx_len;
+	uint8_t opcode;
+	uint8_t egrp;
 	int ret;
 
 	sess = get_sec_session_private_data(sec_sess);
 	sa = &sess->sa;
-	out_sa = &sa->out_sa;
-	ctl = &out_sa->common_sa.ctl;
 
 	memset(sa, 0, sizeof(struct cn9k_ipsec_sa));
 
@@ -353,153 +48,16 @@  cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	if (ipsec->esn.value)
 		sa->esn = ipsec->esn.value;
 
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
-		auth_xform = crypto_xform;
-	else
-		auth_xform = crypto_xform->next;
-
-	ret = fill_ipsec_common_sa(ipsec, crypto_xform, &out_sa->common_sa);
-	if (ret)
-		return ret;
-
 	ret = cnxk_ipsec_outb_rlens_get(&sa->rlens, ipsec, crypto_xform);
 	if (ret)
 		return ret;
 
-	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
-	    ctl->auth_type == ROC_IE_ON_SA_AUTH_NULL ||
-	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
-		template = &out_sa->aes_gcm.template;
-		ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template);
-	} else {
-		switch (ctl->auth_type) {
-		case ROC_IE_ON_SA_AUTH_SHA1:
-			template = &out_sa->sha1.template;
-			ctx_len = offsetof(struct roc_ie_on_outb_sa,
-					   sha1.template);
-			break;
-		case ROC_IE_ON_SA_AUTH_SHA2_256:
-		case ROC_IE_ON_SA_AUTH_SHA2_384:
-		case ROC_IE_ON_SA_AUTH_SHA2_512:
-			template = &out_sa->sha2.template;
-			ctx_len = offsetof(struct roc_ie_on_outb_sa,
-					   sha2.template);
-			break;
-		case ROC_IE_ON_SA_AUTH_AES_XCBC_128:
-			template = &out_sa->aes_xcbc.template;
-			ctx_len = offsetof(struct roc_ie_on_outb_sa,
-					   aes_xcbc.template);
-			break;
-		default:
-			plt_err("Unsupported auth algorithm");
-			return -EINVAL;
-		}
-	}
-
-	ip4 = (struct rte_ipv4_hdr *)&template->ip4.ipv4_hdr;
-	if (ipsec->options.udp_encap) {
-		ip4->next_proto_id = IPPROTO_UDP;
-		template->ip4.udp_src = rte_be_to_cpu_16(4500);
-		template->ip4.udp_dst = rte_be_to_cpu_16(4500);
-	} else {
-		if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
-			ip4->next_proto_id = IPPROTO_AH;
-		else
-			ip4->next_proto_id = IPPROTO_ESP;
-	}
-
-	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
-		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
-			uint16_t frag_off = 0;
-			ctx_len += sizeof(template->ip4);
-
-			ip4->version_ihl = RTE_IPV4_VHL_DEF;
-			ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
-			ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
-			if (ipsec->tunnel.ipv4.df)
-				frag_off |= RTE_IPV4_HDR_DF_FLAG;
-			ip4->fragment_offset = rte_cpu_to_be_16(frag_off);
-
-			memcpy(&ip4->src_addr, &ipsec->tunnel.ipv4.src_ip,
-			       sizeof(struct in_addr));
-			memcpy(&ip4->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
-			       sizeof(struct in_addr));
-		} else if (ipsec->tunnel.type ==
-			   RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
-			ctx_len += sizeof(template->ip6);
-
-			ip6 = (struct rte_ipv6_hdr *)&template->ip6.ipv6_hdr;
-			if (ipsec->options.udp_encap) {
-				ip6->proto = IPPROTO_UDP;
-				template->ip6.udp_src = rte_be_to_cpu_16(4500);
-				template->ip6.udp_dst = rte_be_to_cpu_16(4500);
-			} else {
-				ip6->proto = (ipsec->proto ==
-					      RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
-						     IPPROTO_ESP :
-						     IPPROTO_AH;
-			}
-			ip6->vtc_flow =
-				rte_cpu_to_be_32(0x60000000 |
-						 ((ipsec->tunnel.ipv6.dscp
-						   << RTE_IPV6_HDR_TC_SHIFT) &
-						  RTE_IPV6_HDR_TC_MASK) |
-						 ((ipsec->tunnel.ipv6.flabel
-						   << RTE_IPV6_HDR_FL_SHIFT) &
-						  RTE_IPV6_HDR_FL_MASK));
-			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
-			memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
-			       sizeof(struct in6_addr));
-			memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
-			       sizeof(struct in6_addr));
-		}
-	} else
-		ctx_len += sizeof(template->ip4);
-
-	ctx_len += RTE_ALIGN_CEIL(ctx_len, 8);
-
-	if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
-		auth_key = auth_xform->auth.key.data;
-		auth_key_len = auth_xform->auth.key.length;
-
-		switch (auth_xform->auth.algo) {
-		case RTE_CRYPTO_AUTH_AES_GMAC:
-		case RTE_CRYPTO_AUTH_NULL:
-			break;
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			memcpy(out_sa->sha1.hmac_key, auth_key, auth_key_len);
-			break;
-		case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		case RTE_CRYPTO_AUTH_SHA512_HMAC:
-			memcpy(out_sa->sha2.hmac_key, auth_key, auth_key_len);
-			break;
-		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
-			memcpy(out_sa->aes_xcbc.key, auth_key, auth_key_len);
-			break;
-		default:
-			plt_err("Unsupported auth algorithm %u",
-				auth_xform->auth.algo);
-			return -ENOTSUP;
-		}
-	}
-
-	inst_tmpl = &sa->inst;
-
-	w4.u64 = 0;
-	w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC;
-	w4.s.opcode_minor = ctx_len >> 3;
-
-	param1.u16 = 0;
-	param1.s.ikev2 = 1;
-
-	sa->custom_hdr_len = sizeof(struct roc_ie_on_outb_hdr) -
-			     ROC_IE_ON_MAX_IV_LEN;
+	sa->custom_hdr_len =
+		sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN;
 
 #ifdef LA_IPSEC_DEBUG
 	/* Use IV from application in debug mode */
 	if (ipsec->options.iv_gen_disable == 1) {
-		param1.s.per_pkt_iv = ROC_IE_ON_IV_SRC_FROM_DPTR;
 		sa->custom_hdr_len = sizeof(struct roc_ie_on_outb_hdr);
 
 		if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
@@ -520,17 +78,49 @@  cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	}
 #endif
 
-	w4.s.param1 = param1.u16;
+	ret = cnxk_on_ipsec_outb_sa_create(ipsec, crypto_xform, &sa->out_sa);
 
-	inst_tmpl->w4 = w4.u64;
+	if (ret < 0)
+		return ret;
+
+	ctx_len = ret;
+	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
+	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
+	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->out_sa, opcode,
+				   ctx_len, egrp);
+
+	if (ret)
+		return ret;
+
+	w4.u64 = 0;
+	w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC;
+	w4.s.opcode_minor = ctx_len >> 3;
+
+	param1.u16 = 0;
+	param1.s.ikev2 = 1;
+
+#ifdef LA_IPSEC_DEBUG
+	/* Use IV from application in debug mode */
+	if (ipsec->options.iv_gen_disable == 1)
+		param1.s.per_pkt_iv = ROC_IE_ON_IV_SRC_FROM_DPTR;
+#else
+	if (ipsec->options.iv_gen_disable != 0) {
+		plt_err("Application provided IV is not supported");
+		return -ENOTSUP;
+	}
+#endif
+
+	w4.s.param1 = param1.u16;
 
 	w7.u64 = 0;
-	w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	w7.s.cptr = rte_mempool_virt2iova(out_sa);
+	w7.s.egrp = egrp;
+	w7.s.cptr = rte_mempool_virt2iova(&sa->out_sa);
+
+	inst_tmpl = &sa->inst;
+	inst_tmpl->w4 = w4.u64;
 	inst_tmpl->w7 = w7.u64;
 
-	return cn9k_cpt_enq_sa_write(
-		sa, qp, ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND, ctx_len);
+	return 0;
 }
 
 static int
@@ -539,71 +129,54 @@  cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 			 struct rte_crypto_sym_xform *crypto_xform,
 			 struct rte_security_session *sec_sess)
 {
-	struct rte_crypto_sym_xform *auth_xform = crypto_xform;
 	struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
-	union roc_on_ipsec_inb_param2 param2;
 	struct cnxk_cpt_inst_tmpl *inst_tmpl;
-	struct roc_ie_on_inb_sa *in_sa;
+	union roc_on_ipsec_inb_param2 param2;
 	struct cn9k_sec_session *sess;
 	struct cn9k_ipsec_sa *sa;
-	const uint8_t *auth_key;
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
-	int auth_key_len = 0;
 	size_t ctx_len = 0;
-	int ret;
+	uint8_t opcode;
+	uint8_t egrp;
+	int ret = 0;
 
 	sess = get_sec_session_private_data(sec_sess);
 	sa = &sess->sa;
-	in_sa = &sa->in_sa;
 
 	memset(sa, 0, sizeof(struct cn9k_ipsec_sa));
 
 	sa->dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
 	sa->replay_win_sz = ipsec->replay_win_sz;
 
-	ret = fill_ipsec_common_sa(ipsec, crypto_xform, &in_sa->common_sa);
-	if (ret)
-		return ret;
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
-	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
-	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
-		ctx_len = offsetof(struct roc_ie_on_inb_sa,
-				   sha1_or_gcm.hmac_key[0]);
-	} else {
-		auth_key = auth_xform->auth.key.data;
-		auth_key_len = auth_xform->auth.key.length;
-
-		switch (auth_xform->auth.algo) {
-		case RTE_CRYPTO_AUTH_NULL:
-			break;
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key,
-			       auth_key_len);
-			ctx_len = offsetof(struct roc_ie_on_inb_sa,
-					   sha1_or_gcm.selector);
-			break;
-		case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		case RTE_CRYPTO_AUTH_SHA512_HMAC:
-			memcpy(in_sa->sha2.hmac_key, auth_key, auth_key_len);
-			ctx_len = offsetof(struct roc_ie_on_inb_sa,
-					   sha2.selector);
-			break;
-		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
-			memcpy(in_sa->aes_xcbc.key, auth_key, auth_key_len);
-			ctx_len = offsetof(struct roc_ie_on_inb_sa,
-					   aes_xcbc.selector);
-			break;
-		default:
-			plt_err("Unsupported auth algorithm %u",
-				auth_xform->auth.algo);
+	if (sa->replay_win_sz) {
+		if (sa->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
+			plt_err("Replay window size:%u is not supported",
+				sa->replay_win_sz);
 			return -ENOTSUP;
 		}
+
+		/* Set window bottom to 1, base and top to size of window */
+		sa->ar.winb = 1;
+		sa->ar.wint = sa->replay_win_sz;
+		sa->ar.base = sa->replay_win_sz;
+
+		sa->in_sa.common_sa.seq_t.tl = sa->seq_lo;
+		sa->in_sa.common_sa.seq_t.th = sa->seq_hi;
 	}
 
-	inst_tmpl = &sa->inst;
+	ret = cnxk_on_ipsec_inb_sa_create(ipsec, crypto_xform, &sa->in_sa);
+
+	if (ret < 0)
+		return ret;
+
+	ctx_len = ret;
+	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
+	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
+	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->in_sa, opcode, ctx_len,
+				   egrp);
+	if (ret)
+		return ret;
 
 	w4.u64 = 0;
 	w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC;
@@ -613,31 +186,14 @@  cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	param2.s.ikev2 = 1;
 	w4.s.param2 = param2.u16;
 
-	inst_tmpl->w4 = w4.u64;
+	w7.s.egrp = egrp;
+	w7.s.cptr = rte_mempool_virt2iova(&sa->in_sa);
 
-	w7.u64 = 0;
-	w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	w7.s.cptr = rte_mempool_virt2iova(in_sa);
+	inst_tmpl = &sa->inst;
+	inst_tmpl->w4 = w4.u64;
 	inst_tmpl->w7 = w7.u64;
 
-	if (sa->replay_win_sz) {
-		if (sa->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
-			plt_err("Replay window size:%u is not supported",
-				sa->replay_win_sz);
-			return -ENOTSUP;
-		}
-
-		/* Set window bottom to 1, base and top to size of window */
-		sa->ar.winb = 1;
-		sa->ar.wint = sa->replay_win_sz;
-		sa->ar.base = sa->replay_win_sz;
-
-		in_sa->common_sa.esn_low = sa->seq_lo;
-		in_sa->common_sa.esn_hi = sa->seq_hi;
-	}
-
-	return cn9k_cpt_enq_sa_write(
-		sa, qp, ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND, ctx_len);
+	return 0;
 }
 
 static inline int
diff --git a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
index df89aaca4e..bbb4404a89 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
+++ b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
@@ -20,7 +20,7 @@  ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
 	enc_payload_len = RTE_ALIGN_CEIL(plen + sa->rlens.roundup_len,
 					 sa->rlens.roundup_byte);
 
-	return sa->rlens.partial_len + enc_payload_len;
+	return sa->custom_hdr_len + sa->rlens.partial_len + enc_payload_len;
 }
 
 static __rte_always_inline int
@@ -41,8 +41,8 @@  ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
 	ctl = &common_sa->ctl;
 
 	esn = ctl->esn_en;
-	esn_low = rte_be_to_cpu_32(common_sa->esn_low);
-	esn_hi = rte_be_to_cpu_32(common_sa->esn_hi);
+	esn_low = rte_be_to_cpu_32(common_sa->seq_t.tl);
+	esn_hi = rte_be_to_cpu_32(common_sa->seq_t.th);
 
 	esp = rte_pktmbuf_mtod_offset(m, void *, sizeof(struct rte_ipv4_hdr));
 	seql = rte_be_to_cpu_32(esp->seq);
@@ -62,8 +62,8 @@  ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
 	if (esn && !ret) {
 		seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
 		if (seq > seq_in_sa) {
-			common_sa->esn_low = rte_cpu_to_be_32(seql);
-			common_sa->esn_hi = rte_cpu_to_be_32(seqh);
+			common_sa->seq_t.tl = rte_cpu_to_be_32(seql);
+			common_sa->seq_t.th = rte_cpu_to_be_32(seqh);
 		}
 	}
 
@@ -77,13 +77,10 @@  process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 	const unsigned int hdr_len = sa->custom_hdr_len;
 	struct rte_crypto_sym_op *sym_op = cop->sym;
 	struct rte_mbuf *m_src = sym_op->m_src;
-	struct roc_ie_on_outb_sa *out_sa;
 	struct roc_ie_on_outb_hdr *hdr;
 	uint32_t dlen, rlen;
 	int32_t extend_tail;
 
-	out_sa = &sa->out_sa;
-
 	dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
 	rlen = ipsec_po_out_rlen_get(sa, dlen - hdr_len);
 
@@ -114,8 +111,7 @@  process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 
 	hdr->seq = rte_cpu_to_be_32(sa->seq_lo);
 	hdr->ip_id = rte_cpu_to_be_32(sa->ip_id);
-
-	out_sa->common_sa.esn_hi = sa->seq_hi;
+	hdr->esn = rte_cpu_to_be_32(sa->seq_hi);
 
 	sa->ip_id++;
 	sa->esn++;
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 7ece0214dc..ec99e6d660 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -33,6 +33,7 @@  struct cpt_qp_meta_info {
 #define CPT_OP_FLAGS_METABUF	       (1 << 1)
 #define CPT_OP_FLAGS_AUTH_VERIFY       (1 << 0)
 #define CPT_OP_FLAGS_IPSEC_DIR_INBOUND (1 << 2)
+#define CPT_OP_FLAGS_IPSEC_INB_ESN     (1 << 3)
 
 struct cpt_inflight_req {
 	union cpt_res_s res;