ipsec: include high order bytes of esn in pkt len

Message ID 1556636155-26299-1-git-send-email-lbartosik@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series ipsec: include high order bytes of esn in pkt len |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK
ci/mellanox-Performance-Testing success Performance Testing PASS
ci/intel-Performance-Testing success Performance Testing PASS

Commit Message

Lukas Bartosik [C] April 30, 2019, 2:55 p.m. UTC
  When esn is used then high-order 32 bits are included in ICV
calculation however are not transmitted. Update packet length
to be consistent with auth data offset and length before crypto
operation. High-order 32 bits of esn will be removed from packet
length in crypto post processing.

Change-Id: I5ba50e341059a8d6a5e4ce7c626dfe7b9173740b
Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
---
 lib/librte_ipsec/esp_inb.c  | 56 +++++++++++++++++++++++++++++++++++++--------
 lib/librte_ipsec/esp_outb.c | 31 +++++++++++++++++++++++++
 lib/librte_ipsec/sa.c       |  4 ++--
 lib/librte_ipsec/sa.h       |  8 +++++++
 4 files changed, 87 insertions(+), 12 deletions(-)
  

Comments

Ananyev, Konstantin April 30, 2019, 3:05 p.m. UTC | #1
> -----Original Message-----
> From: Lukasz Bartosik [mailto:lbartosik@marvell.com]
> Sent: Tuesday, April 30, 2019 3:56 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; anoobj@marvell.com; Lukasz Bartosik <lbartosik@marvell.com>
> Subject: [PATCH] ipsec: include high order bytes of esn in pkt len
> 
> When esn is used then high-order 32 bits are included in ICV
> calculation however are not transmitted. Update packet length
> to be consistent with auth data offset and length before crypto
> operation. High-order 32 bits of esn will be removed from packet
> length in crypto post processing.

Hi Lukasz,
Why you want to do it?
I deliberately didn't include SQH bits into the pkt_len/data_len,
because it is a temporary data and we are going to drop it anyway. 
Konstantin

> 
> Change-Id: I5ba50e341059a8d6a5e4ce7c626dfe7b9173740b
> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
> ---
>  lib/librte_ipsec/esp_inb.c  | 56 +++++++++++++++++++++++++++++++++++++--------
>  lib/librte_ipsec/esp_outb.c | 31 +++++++++++++++++++++++++
>  lib/librte_ipsec/sa.c       |  4 ++--
>  lib/librte_ipsec/sa.h       |  8 +++++++
>  4 files changed, 87 insertions(+), 12 deletions(-)
> 
> diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
> index 4e0e12a..eb899e3 100644
> --- a/lib/librte_ipsec/esp_inb.c
> +++ b/lib/librte_ipsec/esp_inb.c
> @@ -16,7 +16,8 @@
>  #include "pad.h"
> 
>  typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
> -	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
> +	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
> +	uint8_t is_inline);
> 
>  /*
>   * helper function to fill crypto_sym op for cipher+auth algorithms.
> @@ -181,6 +182,15 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
>  	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
>  	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
> 
> +	/*
> +	 * if esn is used then high-order 32 bits are also used in ICV
> +	 * calculation but are not transmitted, update packet length
> +	 * to be consistent with auth data length and offset, this will
> +	 * be subtracted from packet length in post crypto processing
> +	 */
> +	mb->pkt_len += sa->sqh_len;
> +	ml->data_len += sa->sqh_len;
> +
>  	inb_pkt_xprepare(sa, sqn, icv);
>  	return plen;
>  }
> @@ -373,14 +383,20 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
>   */
>  static inline uint16_t
>  tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
> -	uint32_t sqn[], uint32_t dr[], uint16_t num)
> +	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>  {
>  	uint32_t adj, i, k, tl;
>  	uint32_t hl[num];
>  	struct esp_tail espt[num];
>  	struct rte_mbuf *ml[num];
> 
> -	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
> +	/*
> +	 * remove high-order 32 bits of esn from packet length
> +	 * which was added before crypto processing, this doesn't
> +	 * apply to inline case
> +	 */
> +	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
> +				(is_inline ? 0 : sa->sqh_len);
>  	const uint32_t cofs = sa->ctp.cipher.offset;
> 
>  	/*
> @@ -420,7 +436,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>   */
>  static inline uint16_t
>  trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
> -	uint32_t sqn[], uint32_t dr[], uint16_t num)
> +	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>  {
>  	char *np;
>  	uint32_t i, k, l2, tl;
> @@ -428,7 +444,13 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>  	struct esp_tail espt[num];
>  	struct rte_mbuf *ml[num];
> 
> -	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
> +	/*
> +	 * remove high-order 32 bits of esn from packet length
> +	 * which was added before crypto processing, this doesn't
> +	 * apply to inline case
> +	 */
> +	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
> +				(is_inline ? 0 : sa->sqh_len);
>  	const uint32_t cofs = sa->ctp.cipher.offset;
> 
>  	/*
> @@ -496,8 +518,8 @@ esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
>   * process group of ESP inbound packets.
>   */
>  static inline uint16_t
> -esp_inb_pkt_process(const struct rte_ipsec_session *ss,
> -	struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
> +esp_inb_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> +	uint16_t num, uint8_t is_inline, esp_inb_process_t process)
>  {
>  	uint32_t k, n;
>  	struct rte_ipsec_sa *sa;
> @@ -507,7 +529,7 @@ esp_inb_pkt_process(const struct rte_ipsec_session *ss,
>  	sa = ss->sa;
> 
>  	/* process packets, extract seq numbers */
> -	k = process(sa, mb, sqn, dr, num);
> +	k = process(sa, mb, sqn, dr, num, is_inline);
> 
>  	/* handle unprocessed mbufs */
>  	if (k != num && k != 0)
> @@ -533,7 +555,14 @@ uint16_t
>  esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num)
>  {
> -	return esp_inb_pkt_process(ss, mb, num, tun_process);
> +	return esp_inb_pkt_process(ss, mb, num, 0, tun_process);
> +}
> +
> +uint16_t
> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_inb_pkt_process(ss, mb, num, 1, tun_process);
>  }
> 
>  /*
> @@ -543,5 +572,12 @@ uint16_t
>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num)
>  {
> -	return esp_inb_pkt_process(ss, mb, num, trs_process);
> +	return esp_inb_pkt_process(ss, mb, num, 0, trs_process);
> +}
> +
> +uint16_t
> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_inb_pkt_process(ss, mb, num, 1, trs_process);
>  }
> diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
> index c798bc4..71a595e 100644
> --- a/lib/librte_ipsec/esp_outb.c
> +++ b/lib/librte_ipsec/esp_outb.c
> @@ -221,6 +221,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	uint32_t i, k, n;
>  	uint64_t sqn;
>  	rte_be64_t sqc;
> +	struct rte_mbuf *ml;
>  	struct rte_ipsec_sa *sa;
>  	struct rte_cryptodev_sym_session *cs;
>  	union sym_op_data icv;
> @@ -246,6 +247,19 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> 
>  		/* success, setup crypto op */
>  		if (rc >= 0) {
> +			/*
> +			 * if esn is used then high-order 32 bits are also
> +			 * used in ICV calculation but are not transmitted,
> +			 * update packet length to be consistent with auth
> +			 * data length and offset, this will be subtracted
> +			 * from packet length in post crypto processing
> +			 */
> +			if (sa->sqh_len) {
> +				mb[i]->pkt_len += sa->sqh_len;
> +				ml = rte_pktmbuf_lastseg(mb[i]);
> +				ml->data_len += sa->sqh_len;
> +			}
> +
>  			outb_pkt_xprepare(sa, sqc, &icv);
>  			lksd_none_cop_prepare(cop[k], cs, mb[i]);
>  			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
> @@ -356,6 +370,7 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	uint32_t i, k, n, l2, l3;
>  	uint64_t sqn;
>  	rte_be64_t sqc;
> +	struct rte_mbuf *ml;
>  	struct rte_ipsec_sa *sa;
>  	struct rte_cryptodev_sym_session *cs;
>  	union sym_op_data icv;
> @@ -384,6 +399,19 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> 
>  		/* success, setup crypto op */
>  		if (rc >= 0) {
> +			/*
> +			 * if esn is used then high-order 32 bits are also
> +			 * used in ICV calculation but are not transmitted,
> +			 * update packet length to be consistent with auth
> +			 * data length and offset, this will be subtracted
> +			 * from packet length in post crypto processing
> +			 */
> +			if (sa->sqh_len) {
> +				mb[i]->pkt_len += sa->sqh_len;
> +				ml = rte_pktmbuf_lastseg(mb[i]);
> +				ml->data_len += sa->sqh_len;
> +			}
> +
>  			outb_pkt_xprepare(sa, sqc, &icv);
>  			lksd_none_cop_prepare(cop[k], cs, mb[i]);
>  			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
> @@ -425,6 +453,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	for (i = 0; i != num; i++) {
>  		if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
>  			ml = rte_pktmbuf_lastseg(mb[i]);
> +			/* remove high-order 32 bits of esn from packet len */
> +			mb[i]->pkt_len -= sa->sqh_len;
> +			ml->data_len -= sa->sqh_len;
>  			icv = rte_pktmbuf_mtod_offset(ml, void *,
>  				ml->data_len - icv_len);
>  			remove_sqh(icv, icv_len);
> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
> index 846e317..ff01358 100644
> --- a/lib/librte_ipsec/sa.c
> +++ b/lib/librte_ipsec/sa.c
> @@ -610,10 +610,10 @@ inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
>  	switch (sa->type & msk) {
>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
> -		pf->process = esp_inb_tun_pkt_process;
> +		pf->process = inline_inb_tun_pkt_process;
>  		break;
>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
> -		pf->process = esp_inb_trs_pkt_process;
> +		pf->process = inline_inb_trs_pkt_process;
>  		break;
>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
> diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
> index ffb5fb4..20c0a65 100644
> --- a/lib/librte_ipsec/sa.h
> +++ b/lib/librte_ipsec/sa.h
> @@ -143,9 +143,17 @@ esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num);
> 
>  uint16_t
> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num);
> +
> +uint16_t
>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num);
> 
> +uint16_t
> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num);
> +
>  /* outbound processing */
> 
>  uint16_t
> --
> 2.7.4
  
Lukas Bartosik [C] May 7, 2019, 2:48 p.m. UTC | #2
On 30.04.2019 17:38, Lukas Bartosik wrote:
> External Email
> 
> ----------------------------------------------------------------------
> 
> 
> 
> ________________________________
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Tuesday, April 30, 2019 5:05 PM
> To: Lukas Bartosik
> Cc: dev@dpdk.org; Anoob Joseph
> Subject: RE: [PATCH] ipsec: include high order bytes of esn in pkt len
> 
> 
> 
>> -----Original Message-----
>> From: Lukasz Bartosik [mailto:lbartosik@marvell.com]
>> Sent: Tuesday, April 30, 2019 3:56 PM
>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
>> Cc: dev@dpdk.org; anoobj@marvell.com; Lukasz Bartosik <lbartosik@marvell.com>
>> Subject: [PATCH] ipsec: include high order bytes of esn in pkt len
>>
>> When esn is used then high-order 32 bits are included in ICV
>> calculation however are not transmitted. Update packet length
>> to be consistent with auth data offset and length before crypto
>> operation. High-order 32 bits of esn will be removed from packet
>> length in crypto post processing.
> 
> Hi Lukasz,
> Why you want to do it?
> I deliberately didn't include SQH bits into the pkt_len/data_len,
> because it is a temporary data and we are going to drop it anyway.
> Konstantin
> 
> Hi Konstantin,
> Our OcteonTx crypto driver validates pkt_len with auth data length/offset and it complains
> because it is told to authenticate more data that a packet holds (according to pkt_len).
> I came across this when running IPSec tests which use esn.
> I understand that sqh 32 bits are temporary and included only for ICV calculation however
> not including them in pkt_len for crypto processing is inconsistent in my opinion.
> Thanks,
> Lukasz
> 

Hi Konstantin,

I should have elaborated more. When 32 high bits of esn are not included in
packet length then auth offset and data point to data which is outside packet
(according to packet length).
This makes crypto request (auth data length and offset) incoherent with a packet
which the crypto request points to. 

This is my argument for including 32 high bits of esn into packet length even
though the inclusion is only temporary.

Thanks,
Lukasz

>>
>> Change-Id: I5ba50e341059a8d6a5e4ce7c626dfe7b9173740b
>> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
>> ---
>>  lib/librte_ipsec/esp_inb.c  | 56 +++++++++++++++++++++++++++++++++++++--------
>>  lib/librte_ipsec/esp_outb.c | 31 +++++++++++++++++++++++++
>>  lib/librte_ipsec/sa.c       |  4 ++--
>>  lib/librte_ipsec/sa.h       |  8 +++++++
>>  4 files changed, 87 insertions(+), 12 deletions(-)
>>
>> diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
>> index 4e0e12a..eb899e3 100644
>> --- a/lib/librte_ipsec/esp_inb.c
>> +++ b/lib/librte_ipsec/esp_inb.c
>> @@ -16,7 +16,8 @@
>>  #include "pad.h"
>>
>>  typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
>> -     struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
>> +     struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
>> +     uint8_t is_inline);
>>
>>  /*
>>   * helper function to fill crypto_sym op for cipher+auth algorithms.
>> @@ -181,6 +182,15 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
>>        icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
>>        icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
>>
>> +     /*
>> +      * if esn is used then high-order 32 bits are also used in ICV
>> +      * calculation but are not transmitted, update packet length
>> +      * to be consistent with auth data length and offset, this will
>> +      * be subtracted from packet length in post crypto processing
>> +      */
>> +     mb->pkt_len += sa->sqh_len;
>> +     ml->data_len += sa->sqh_len;
>> +
>>        inb_pkt_xprepare(sa, sqn, icv);
>>        return plen;
>>  }
>> @@ -373,14 +383,20 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
>>   */
>>  static inline uint16_t
>>  tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>> -     uint32_t sqn[], uint32_t dr[], uint16_t num)
>> +     uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>>  {
>>        uint32_t adj, i, k, tl;
>>        uint32_t hl[num];
>>        struct esp_tail espt[num];
>>        struct rte_mbuf *ml[num];
>>
>> -     const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
>> +     /*
>> +      * remove high-order 32 bits of esn from packet length
>> +      * which was added before crypto processing, this doesn't
>> +      * apply to inline case
>> +      */
>> +     const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
>> +                             (is_inline ? 0 : sa->sqh_len);
>>        const uint32_t cofs = sa->ctp.cipher.offset;
>>
>>        /*
>> @@ -420,7 +436,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>>   */
>>  static inline uint16_t
>>  trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>> -     uint32_t sqn[], uint32_t dr[], uint16_t num)
>> +     uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>>  {
>>        char *np;
>>        uint32_t i, k, l2, tl;
>> @@ -428,7 +444,13 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>>        struct esp_tail espt[num];
>>        struct rte_mbuf *ml[num];
>>
>> -     const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
>> +     /*
>> +      * remove high-order 32 bits of esn from packet length
>> +      * which was added before crypto processing, this doesn't
>> +      * apply to inline case
>> +      */
>> +     const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
>> +                             (is_inline ? 0 : sa->sqh_len);
>>        const uint32_t cofs = sa->ctp.cipher.offset;
>>
>>        /*
>> @@ -496,8 +518,8 @@ esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
>>   * process group of ESP inbound packets.
>>   */
>>  static inline uint16_t
>> -esp_inb_pkt_process(const struct rte_ipsec_session *ss,
>> -     struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
>> +esp_inb_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>> +     uint16_t num, uint8_t is_inline, esp_inb_process_t process)
>>  {
>>        uint32_t k, n;
>>        struct rte_ipsec_sa *sa;
>> @@ -507,7 +529,7 @@ esp_inb_pkt_process(const struct rte_ipsec_session *ss,
>>        sa = ss->sa;
>>
>>        /* process packets, extract seq numbers */
>> -     k = process(sa, mb, sqn, dr, num);
>> +     k = process(sa, mb, sqn, dr, num, is_inline);
>>
>>        /* handle unprocessed mbufs */
>>        if (k != num && k != 0)
>> @@ -533,7 +555,14 @@ uint16_t
>>  esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>>        struct rte_mbuf *mb[], uint16_t num)
>>  {
>> -     return esp_inb_pkt_process(ss, mb, num, tun_process);
>> +     return esp_inb_pkt_process(ss, mb, num, 0, tun_process);
>> +}
>> +
>> +uint16_t
>> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>> +     struct rte_mbuf *mb[], uint16_t num)
>> +{
>> +     return esp_inb_pkt_process(ss, mb, num, 1, tun_process);
>>  }
>>
>>  /*
>> @@ -543,5 +572,12 @@ uint16_t
>>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>>        struct rte_mbuf *mb[], uint16_t num)
>>  {
>> -     return esp_inb_pkt_process(ss, mb, num, trs_process);
>> +     return esp_inb_pkt_process(ss, mb, num, 0, trs_process);
>> +}
>> +
>> +uint16_t
>> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>> +     struct rte_mbuf *mb[], uint16_t num)
>> +{
>> +     return esp_inb_pkt_process(ss, mb, num, 1, trs_process);
>>  }
>> diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
>> index c798bc4..71a595e 100644
>> --- a/lib/librte_ipsec/esp_outb.c
>> +++ b/lib/librte_ipsec/esp_outb.c
>> @@ -221,6 +221,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>        uint32_t i, k, n;
>>        uint64_t sqn;
>>        rte_be64_t sqc;
>> +     struct rte_mbuf *ml;
>>        struct rte_ipsec_sa *sa;
>>        struct rte_cryptodev_sym_session *cs;
>>        union sym_op_data icv;
>> @@ -246,6 +247,19 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>
>>                /* success, setup crypto op */
>>                if (rc >= 0) {
>> +                     /*
>> +                      * if esn is used then high-order 32 bits are also
>> +                      * used in ICV calculation but are not transmitted,
>> +                      * update packet length to be consistent with auth
>> +                      * data length and offset, this will be subtracted
>> +                      * from packet length in post crypto processing
>> +                      */
>> +                     if (sa->sqh_len) {
>> +                             mb[i]->pkt_len += sa->sqh_len;
>> +                             ml = rte_pktmbuf_lastseg(mb[i]);
>> +                             ml->data_len += sa->sqh_len;
>> +                     }
>> +
>>                        outb_pkt_xprepare(sa, sqc, &icv);
>>                        lksd_none_cop_prepare(cop[k], cs, mb[i]);
>>                        outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
>> @@ -356,6 +370,7 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>        uint32_t i, k, n, l2, l3;
>>        uint64_t sqn;
>>        rte_be64_t sqc;
>> +     struct rte_mbuf *ml;
>>        struct rte_ipsec_sa *sa;
>>        struct rte_cryptodev_sym_session *cs;
>>        union sym_op_data icv;
>> @@ -384,6 +399,19 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>
>>                /* success, setup crypto op */
>>                if (rc >= 0) {
>> +                     /*
>> +                      * if esn is used then high-order 32 bits are also
>> +                      * used in ICV calculation but are not transmitted,
>> +                      * update packet length to be consistent with auth
>> +                      * data length and offset, this will be subtracted
>> +                      * from packet length in post crypto processing
>> +                      */
>> +                     if (sa->sqh_len) {
>> +                             mb[i]->pkt_len += sa->sqh_len;
>> +                             ml = rte_pktmbuf_lastseg(mb[i]);
>> +                             ml->data_len += sa->sqh_len;
>> +                     }
>> +
>>                        outb_pkt_xprepare(sa, sqc, &icv);
>>                        lksd_none_cop_prepare(cop[k], cs, mb[i]);
>>                        outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
>> @@ -425,6 +453,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>        for (i = 0; i != num; i++) {
>>                if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
>>                        ml = rte_pktmbuf_lastseg(mb[i]);
>> +                     /* remove high-order 32 bits of esn from packet len */
>> +                     mb[i]->pkt_len -= sa->sqh_len;
>> +                     ml->data_len -= sa->sqh_len;
>>                        icv = rte_pktmbuf_mtod_offset(ml, void *,
>>                                ml->data_len - icv_len);
>>                        remove_sqh(icv, icv_len);
>> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
>> index 846e317..ff01358 100644
>> --- a/lib/librte_ipsec/sa.c
>> +++ b/lib/librte_ipsec/sa.c
>> @@ -610,10 +610,10 @@ inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
>>        switch (sa->type & msk) {
>>        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
>>        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
>> -             pf->process = esp_inb_tun_pkt_process;
>> +             pf->process = inline_inb_tun_pkt_process;
>>                break;
>>        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
>> -             pf->process = esp_inb_trs_pkt_process;
>> +             pf->process = inline_inb_trs_pkt_process;
>>                break;
>>        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
>>        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
>> diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
>> index ffb5fb4..20c0a65 100644
>> --- a/lib/librte_ipsec/sa.h
>> +++ b/lib/librte_ipsec/sa.h
>> @@ -143,9 +143,17 @@ esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>>        struct rte_mbuf *mb[], uint16_t num);
>>
>>  uint16_t
>> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>> +     struct rte_mbuf *mb[], uint16_t num);
>> +
>> +uint16_t
>>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>>        struct rte_mbuf *mb[], uint16_t num);
>>
>> +uint16_t
>> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>> +     struct rte_mbuf *mb[], uint16_t num);
>> +
>>  /* outbound processing */
>>
>>  uint16_t
>> --
>> 2.7.4
>
  
Ananyev, Konstantin May 9, 2019, 11:59 a.m. UTC | #3
> >>
> >> When esn is used then high-order 32 bits are included in ICV
> >> calculation however are not transmitted. Update packet length
> >> to be consistent with auth data offset and length before crypto
> >> operation. High-order 32 bits of esn will be removed from packet
> >> length in crypto post processing.
> >
> > Hi Lukasz,
> > Why you want to do it?
> > I deliberately didn't include SQH bits into the pkt_len/data_len,
> > because it is a temporary data and we are going to drop it anyway.
> > Konstantin
> >
> > Hi Konstantin,
> > Our OcteonTx crypto driver validates pkt_len with auth data length/offset and it complains
> > because it is told to authenticate more data that a packet holds (according to pkt_len).
> > I came across this when running IPSec tests which use esn.
> > I understand that sqh 32 bits are temporary and included only for ICV calculation however
> > not including them in pkt_len for crypto processing is inconsistent in my opinion.
> > Thanks,
> > Lukasz
> >
> 
> Hi Konstantin,
> 
> I should have elaborated more. When 32 high bits of esn are not included in
> packet length then auth offset and data point to data which is outside packet
> (according to packet length).
> This makes crypto request (auth data length and offset) incoherent with a packet
> which the crypto request points to.
> 
> This is my argument for including 32 high bits of esn into packet length even
> though the inclusion is only temporary.

I understood the reasoning.
Will try to have a closer look in next few days.
Thanks
Konstantin
  
Ananyev, Konstantin May 14, 2019, 1:52 p.m. UTC | #4
Hi Lukasz,

> >>
> >> When esn is used then high-order 32 bits are included in ICV
> >> calculation however are not transmitted. Update packet length
> >> to be consistent with auth data offset and length before crypto
> >> operation. High-order 32 bits of esn will be removed from packet
> >> length in crypto post processing.
> >
> > Hi Lukasz,
> > Why you want to do it?
> > I deliberately didn't include SQH bits into the pkt_len/data_len,
> > because it is a temporary data and we are going to drop it anyway.
> > Konstantin
> >
> > Hi Konstantin,
> > Our OcteonTx crypto driver validates pkt_len with auth data length/offset and it complains
> > because it is told to authenticate more data that a packet holds (according to pkt_len).

Thanks for explanation, just to confirm about the check in your PMD:
You are talking about struct rte_crypto_sym_op auth.data.offset and auth.data.length,
i.e: auth.data.offset + auth.data.length > pkt_len
Or something else?

find drivers/*/octeon* -type f | xargs grep -l 'auth\.data\.'
returns no results.

Konstantin

> > I came across this when running IPSec tests which use esn.
> > I understand that sqh 32 bits are temporary and included only for ICV calculation however
> > not including them in pkt_len for crypto processing is inconsistent in my opinion.
> > Thanks,
> > Lukasz
> >
> 
> Hi Konstantin,
> 
> I should have elaborated more. When 32 high bits of esn are not included in
> packet length then auth offset and data point to data which is outside packet
> (according to packet length).
> This makes crypto request (auth data length and offset) incoherent with a packet
> which the crypto request points to.
> 
> This is my argument for including 32 high bits of esn into packet length even
> though the inclusion is only temporary.
> 
> Thanks,
> Lukasz
>
  
Lukas Bartosik [C] May 14, 2019, 2:31 p.m. UTC | #5
On 14.05.2019 15:52, Ananyev, Konstantin wrote:
> Hi Lukasz,
> 
>>>>
>>>> When esn is used then high-order 32 bits are included in ICV
>>>> calculation however are not transmitted. Update packet length
>>>> to be consistent with auth data offset and length before crypto
>>>> operation. High-order 32 bits of esn will be removed from packet
>>>> length in crypto post processing.
>>>
>>> Hi Lukasz,
>>> Why you want to do it?
>>> I deliberately didn't include SQH bits into the pkt_len/data_len,
>>> because it is a temporary data and we are going to drop it anyway.
>>> Konstantin
>>>
>>> Hi Konstantin,
>>> Our OcteonTx crypto driver validates pkt_len with auth data length/offset and it complains
>>> because it is told to authenticate more data that a packet holds (according to pkt_len).
> 
> Thanks for explanation, just to confirm about the check in your PMD:
> You are talking about struct rte_crypto_sym_op auth.data.offset and auth.data.length,
> i.e: auth.data.offset + auth.data.length > pkt_len
> Or something else?
> 
> find drivers/*/octeon* -type f | xargs grep -l 'auth\.data\.'
> returns no results.
> 
> Konstantin
> 

Hi Konstantin

This is exactly auth.data.length and auth.data.offset from rte_crypto_sym_op.
The check takes place in drivers/common/cpt/cpt_ucode.h in cpt_dec_hmac_prep function
although there is no direct check for auth.data.offset + auth.data.length > pkt_len
as at this point auth.data.offset, auth.data.length and pkt_len are stored in 
internal structures related to how we process crypto requests.

Thanks,
Lukasz

>>> I came across this when running IPSec tests which use esn.
>>> I understand that sqh 32 bits are temporary and included only for ICV calculation however
>>> not including them in pkt_len for crypto processing is inconsistent in my opinion.
>>> Thanks,
>>> Lukasz
>>>
>>
>> Hi Konstantin,
>>
>> I should have elaborated more. When 32 high bits of esn are not included in
>> packet length then auth offset and data point to data which is outside packet
>> (according to packet length).
>> This makes crypto request (auth data length and offset) incoherent with a packet
>> which the crypto request points to.
>>
>> This is my argument for including 32 high bits of esn into packet length even
>> though the inclusion is only temporary.
>>
>> Thanks,
>> Lukasz
>>
  
Ananyev, Konstantin May 19, 2019, 2:47 p.m. UTC | #6
Hi Lukasz,
Thanks for clarifications.
Looks good in general.
Few small comments below.
Konstantin
 
> When esn is used then high-order 32 bits are included in ICV
> calculation however are not transmitted. Update packet length
> to be consistent with auth data offset and length before crypto
> operation. High-order 32 bits of esn will be removed from packet
> length in crypto post processing.
> 
> Change-Id: I5ba50e341059a8d6a5e4ce7c626dfe7b9173740b
> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
> ---
>  lib/librte_ipsec/esp_inb.c  | 56 +++++++++++++++++++++++++++++++++++++--------
>  lib/librte_ipsec/esp_outb.c | 31 +++++++++++++++++++++++++
>  lib/librte_ipsec/sa.c       |  4 ++--
>  lib/librte_ipsec/sa.h       |  8 +++++++
>  4 files changed, 87 insertions(+), 12 deletions(-)
> 
> diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
> index 4e0e12a..eb899e3 100644
> --- a/lib/librte_ipsec/esp_inb.c
> +++ b/lib/librte_ipsec/esp_inb.c
> @@ -16,7 +16,8 @@
>  #include "pad.h"
> 
>  typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
> -	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
> +	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
> +	uint8_t is_inline);
> 
>  /*
>   * helper function to fill crypto_sym op for cipher+auth algorithms.
> @@ -181,6 +182,15 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
>  	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
>  	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
> 
> +	/*
> +	 * if esn is used then high-order 32 bits are also used in ICV
> +	 * calculation but are not transmitted, update packet length
> +	 * to be consistent with auth data length and offset, this will
> +	 * be subtracted from packet length in post crypto processing

Here and in several other comments below - you repeat basically the same thing.
Seems a bit excessive. I suppose just to put in in one place, or probably even
in patch description will be enough.

> +	 */
> +	mb->pkt_len += sa->sqh_len;
> +	ml->data_len += sa->sqh_len;
> +
>  	inb_pkt_xprepare(sa, sqn, icv);
>  	return plen;
>  }
> @@ -373,14 +383,20 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
>   */
>  static inline uint16_t
>  tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
> -	uint32_t sqn[], uint32_t dr[], uint16_t num)
> +	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>  {
>  	uint32_t adj, i, k, tl;
>  	uint32_t hl[num];
>  	struct esp_tail espt[num];
>  	struct rte_mbuf *ml[num];
> 
> -	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
> +	/*
> +	 * remove high-order 32 bits of esn from packet length
> +	 * which was added before crypto processing, this doesn't
> +	 * apply to inline case
> +	 */

This comment seems a bit misleading, as we have remove not only sqh,
but also icv, espt, padding.

> +	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
> +				(is_inline ? 0 : sa->sqh_len);
>  	const uint32_t cofs = sa->ctp.cipher.offset;
> 
>  	/*
> @@ -420,7 +436,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>   */
>  static inline uint16_t
>  trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
> -	uint32_t sqn[], uint32_t dr[], uint16_t num)
> +	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>  {
>  	char *np;
>  	uint32_t i, k, l2, tl;
> @@ -428,7 +444,13 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>  	struct esp_tail espt[num];
>  	struct rte_mbuf *ml[num];
> 
> -	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
> +	/*
> +	 * remove high-order 32 bits of esn from packet length
> +	 * which was added before crypto processing, this doesn't
> +	 * apply to inline case
> +	 */
> +	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
> +				(is_inline ? 0 : sa->sqh_len);
>  	const uint32_t cofs = sa->ctp.cipher.offset;
> 
>  	/*
> @@ -496,8 +518,8 @@ esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
>   * process group of ESP inbound packets.
>   */
>  static inline uint16_t
> -esp_inb_pkt_process(const struct rte_ipsec_session *ss,
> -	struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
> +esp_inb_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> +	uint16_t num, uint8_t is_inline, esp_inb_process_t process)
>  {
>  	uint32_t k, n;
>  	struct rte_ipsec_sa *sa;
> @@ -507,7 +529,7 @@ esp_inb_pkt_process(const struct rte_ipsec_session *ss,
>  	sa = ss->sa;
> 
>  	/* process packets, extract seq numbers */
> -	k = process(sa, mb, sqn, dr, num);
> +	k = process(sa, mb, sqn, dr, num, is_inline);
> 
>  	/* handle unprocessed mbufs */
>  	if (k != num && k != 0)
> @@ -533,7 +555,14 @@ uint16_t
>  esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num)
>  {
> -	return esp_inb_pkt_process(ss, mb, num, tun_process);
> +	return esp_inb_pkt_process(ss, mb, num, 0, tun_process);

To make things a bit cleaner, can I suggest the following:
1. make esp_inb_pkt_process() take as a parameters: 
    const struct rte_ipsec_sa *sa (instead of const struct rte_ipsec_session *ss)
    uint32_t sqh_len instead of is_inlne

So here it would become:

sa = ss->sa;
return esp_inb_pkt_process(ss, mb, num, sa->sqh_len, tun_process);

For inline it would be:

sa = ss->sa;
return esp_inb_pkt_process(ss, mb, num, 0, tun_process);


> +}
> +
> +uint16_t
> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_inb_pkt_process(ss, mb, num, 1, tun_process);
>  }
> 
>  /*
> @@ -543,5 +572,12 @@ uint16_t
>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num)
>  {
> -	return esp_inb_pkt_process(ss, mb, num, trs_process);
> +	return esp_inb_pkt_process(ss, mb, num, 0, trs_process);
> +}
> +
> +uint16_t
> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_inb_pkt_process(ss, mb, num, 1, trs_process);
>  }
> diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
> index c798bc4..71a595e 100644
> --- a/lib/librte_ipsec/esp_outb.c
> +++ b/lib/librte_ipsec/esp_outb.c
> @@ -221,6 +221,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	uint32_t i, k, n;
>  	uint64_t sqn;
>  	rte_be64_t sqc;
> +	struct rte_mbuf *ml;
>  	struct rte_ipsec_sa *sa;
>  	struct rte_cryptodev_sym_session *cs;
>  	union sym_op_data icv;
> @@ -246,6 +247,19 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> 
>  		/* success, setup crypto op */
>  		if (rc >= 0) {
> +			/*
> +			 * if esn is used then high-order 32 bits are also
> +			 * used in ICV calculation but are not transmitted,
> +			 * update packet length to be consistent with auth
> +			 * data length and offset, this will be subtracted
> +			 * from packet length in post crypto processing
> +			 */
> +			if (sa->sqh_len) {
> +				mb[i]->pkt_len += sa->sqh_len;
> +				ml = rte_pktmbuf_lastseg(mb[i]);
> +				ml->data_len += sa->sqh_len;
> +			}

That means we have to go through our 'next' list once again.
Seems suboptimal.
I think would be better to make outb_tun_pkt_prepare() to take sqh_len as extra parameter.
Then inside it we can just:
tlen = pdlen + sa->icv_len + sqh_len; 
...
if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
                return -ENOSPC; 
...
ml->data_len += tlen;
mb->pkt_len += tlen;
...
pdofs += pdlen  + sqh_len;

Same for transport mode.


> +
>  			outb_pkt_xprepare(sa, sqc, &icv);
>  			lksd_none_cop_prepare(cop[k], cs, mb[i]);
>  			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
> @@ -356,6 +370,7 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	uint32_t i, k, n, l2, l3;
>  	uint64_t sqn;
>  	rte_be64_t sqc;
> +	struct rte_mbuf *ml;
>  	struct rte_ipsec_sa *sa;
>  	struct rte_cryptodev_sym_session *cs;
>  	union sym_op_data icv;
> @@ -384,6 +399,19 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> 
>  		/* success, setup crypto op */
>  		if (rc >= 0) {
> +			/*
> +			 * if esn is used then high-order 32 bits are also
> +			 * used in ICV calculation but are not transmitted,
> +			 * update packet length to be consistent with auth
> +			 * data length and offset, this will be subtracted
> +			 * from packet length in post crypto processing
> +			 */
> +			if (sa->sqh_len) {
> +				mb[i]->pkt_len += sa->sqh_len;
> +				ml = rte_pktmbuf_lastseg(mb[i]);
> +				ml->data_len += sa->sqh_len;
> +			}
> +
>  			outb_pkt_xprepare(sa, sqc, &icv);
>  			lksd_none_cop_prepare(cop[k], cs, mb[i]);
>  			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
> @@ -425,6 +453,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	for (i = 0; i != num; i++) {
>  		if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
>  			ml = rte_pktmbuf_lastseg(mb[i]);
> +			/* remove high-order 32 bits of esn from packet len */
> +			mb[i]->pkt_len -= sa->sqh_len;
> +			ml->data_len -= sa->sqh_len;
>  			icv = rte_pktmbuf_mtod_offset(ml, void *,
>  				ml->data_len - icv_len);
>  			remove_sqh(icv, icv_len);
> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
> index 846e317..ff01358 100644
> --- a/lib/librte_ipsec/sa.c
> +++ b/lib/librte_ipsec/sa.c
> @@ -610,10 +610,10 @@ inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
>  	switch (sa->type & msk) {
>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
> -		pf->process = esp_inb_tun_pkt_process;
> +		pf->process = inline_inb_tun_pkt_process;
>  		break;
>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
> -		pf->process = esp_inb_trs_pkt_process;
> +		pf->process = inline_inb_trs_pkt_process;
>  		break;
>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
> diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
> index ffb5fb4..20c0a65 100644
> --- a/lib/librte_ipsec/sa.h
> +++ b/lib/librte_ipsec/sa.h
> @@ -143,9 +143,17 @@ esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num);
> 
>  uint16_t
> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num);
> +
> +uint16_t
>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num);
> 
> +uint16_t
> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num);
> +
>  /* outbound processing */
> 
>  uint16_t
> --
> 2.7.4
  
Lukas Bartosik [C] May 20, 2019, 11:13 a.m. UTC | #7
Hi Konstantin,

Thank you for the review.
I will send a revised patch which addresses your comments.

Thanks,
Lukasz

On 19.05.2019 16:47, Ananyev, Konstantin wrote:
> 
> Hi Lukasz,
> Thanks for clarifications.
> Looks good in general.
> Few small comments below.
> Konstantin
>  
>> When esn is used then high-order 32 bits are included in ICV
>> calculation however are not transmitted. Update packet length
>> to be consistent with auth data offset and length before crypto
>> operation. High-order 32 bits of esn will be removed from packet
>> length in crypto post processing.
>>
>> Change-Id: I5ba50e341059a8d6a5e4ce7c626dfe7b9173740b
>> Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
>> ---
>>  lib/librte_ipsec/esp_inb.c  | 56 +++++++++++++++++++++++++++++++++++++--------
>>  lib/librte_ipsec/esp_outb.c | 31 +++++++++++++++++++++++++
>>  lib/librte_ipsec/sa.c       |  4 ++--
>>  lib/librte_ipsec/sa.h       |  8 +++++++
>>  4 files changed, 87 insertions(+), 12 deletions(-)
>>
>> diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
>> index 4e0e12a..eb899e3 100644
>> --- a/lib/librte_ipsec/esp_inb.c
>> +++ b/lib/librte_ipsec/esp_inb.c
>> @@ -16,7 +16,8 @@
>>  #include "pad.h"
>>
>>  typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
>> -	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
>> +	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
>> +	uint8_t is_inline);
>>
>>  /*
>>   * helper function to fill crypto_sym op for cipher+auth algorithms.
>> @@ -181,6 +182,15 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
>>  	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
>>  	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
>>
>> +	/*
>> +	 * if esn is used then high-order 32 bits are also used in ICV
>> +	 * calculation but are not transmitted, update packet length
>> +	 * to be consistent with auth data length and offset, this will
>> +	 * be subtracted from packet length in post crypto processing
> 
> Here and in several other comments below - you repeat basically the same thing.
> Seems a bit excessive. I suppose just to put in in one place, or probably even
> in patch description will be enough.
> 
>> +	 */
>> +	mb->pkt_len += sa->sqh_len;
>> +	ml->data_len += sa->sqh_len;
>> +
>>  	inb_pkt_xprepare(sa, sqn, icv);
>>  	return plen;
>>  }
>> @@ -373,14 +383,20 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
>>   */
>>  static inline uint16_t
>>  tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>> -	uint32_t sqn[], uint32_t dr[], uint16_t num)
>> +	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>>  {
>>  	uint32_t adj, i, k, tl;
>>  	uint32_t hl[num];
>>  	struct esp_tail espt[num];
>>  	struct rte_mbuf *ml[num];
>>
>> -	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
>> +	/*
>> +	 * remove high-order 32 bits of esn from packet length
>> +	 * which was added before crypto processing, this doesn't
>> +	 * apply to inline case
>> +	 */
> 
> This comment seems a bit misleading, as we have remove not only sqh,
> but also icv, espt, padding.
> 
>> +	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
>> +				(is_inline ? 0 : sa->sqh_len);
>>  	const uint32_t cofs = sa->ctp.cipher.offset;
>>
>>  	/*
>> @@ -420,7 +436,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>>   */
>>  static inline uint16_t
>>  trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>> -	uint32_t sqn[], uint32_t dr[], uint16_t num)
>> +	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
>>  {
>>  	char *np;
>>  	uint32_t i, k, l2, tl;
>> @@ -428,7 +444,13 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>>  	struct esp_tail espt[num];
>>  	struct rte_mbuf *ml[num];
>>
>> -	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
>> +	/*
>> +	 * remove high-order 32 bits of esn from packet length
>> +	 * which was added before crypto processing, this doesn't
>> +	 * apply to inline case
>> +	 */
>> +	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
>> +				(is_inline ? 0 : sa->sqh_len);
>>  	const uint32_t cofs = sa->ctp.cipher.offset;
>>
>>  	/*
>> @@ -496,8 +518,8 @@ esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
>>   * process group of ESP inbound packets.
>>   */
>>  static inline uint16_t
>> -esp_inb_pkt_process(const struct rte_ipsec_session *ss,
>> -	struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
>> +esp_inb_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>> +	uint16_t num, uint8_t is_inline, esp_inb_process_t process)
>>  {
>>  	uint32_t k, n;
>>  	struct rte_ipsec_sa *sa;
>> @@ -507,7 +529,7 @@ esp_inb_pkt_process(const struct rte_ipsec_session *ss,
>>  	sa = ss->sa;
>>
>>  	/* process packets, extract seq numbers */
>> -	k = process(sa, mb, sqn, dr, num);
>> +	k = process(sa, mb, sqn, dr, num, is_inline);
>>
>>  	/* handle unprocessed mbufs */
>>  	if (k != num && k != 0)
>> @@ -533,7 +555,14 @@ uint16_t
>>  esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>>  	struct rte_mbuf *mb[], uint16_t num)
>>  {
>> -	return esp_inb_pkt_process(ss, mb, num, tun_process);
>> +	return esp_inb_pkt_process(ss, mb, num, 0, tun_process);
> 
> To make things a bit cleaner, can I suggest the following:
> 1. make esp_inb_pkt_process() take as a parameters: 
>     const struct rte_ipsec_sa *sa (instead of const struct rte_ipsec_session *ss)
>     uint32_t sqh_len instead of is_inlne
> 
> So here it would become:
> 
> sa = ss->sa;
> return esp_inb_pkt_process(ss, mb, num, sa->sqh_len, tun_process);
> 
> For inline it would be:
> 
> sa = ss->sa;
> return esp_inb_pkt_process(ss, mb, num, 0, tun_process);
> 
> 
>> +}
>> +
>> +uint16_t
>> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>> +	struct rte_mbuf *mb[], uint16_t num)
>> +{
>> +	return esp_inb_pkt_process(ss, mb, num, 1, tun_process);
>>  }
>>
>>  /*
>> @@ -543,5 +572,12 @@ uint16_t
>>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>>  	struct rte_mbuf *mb[], uint16_t num)
>>  {
>> -	return esp_inb_pkt_process(ss, mb, num, trs_process);
>> +	return esp_inb_pkt_process(ss, mb, num, 0, trs_process);
>> +}
>> +
>> +uint16_t
>> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>> +	struct rte_mbuf *mb[], uint16_t num)
>> +{
>> +	return esp_inb_pkt_process(ss, mb, num, 1, trs_process);
>>  }
>> diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
>> index c798bc4..71a595e 100644
>> --- a/lib/librte_ipsec/esp_outb.c
>> +++ b/lib/librte_ipsec/esp_outb.c
>> @@ -221,6 +221,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>  	uint32_t i, k, n;
>>  	uint64_t sqn;
>>  	rte_be64_t sqc;
>> +	struct rte_mbuf *ml;
>>  	struct rte_ipsec_sa *sa;
>>  	struct rte_cryptodev_sym_session *cs;
>>  	union sym_op_data icv;
>> @@ -246,6 +247,19 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>
>>  		/* success, setup crypto op */
>>  		if (rc >= 0) {
>> +			/*
>> +			 * if esn is used then high-order 32 bits are also
>> +			 * used in ICV calculation but are not transmitted,
>> +			 * update packet length to be consistent with auth
>> +			 * data length and offset, this will be subtracted
>> +			 * from packet length in post crypto processing
>> +			 */
>> +			if (sa->sqh_len) {
>> +				mb[i]->pkt_len += sa->sqh_len;
>> +				ml = rte_pktmbuf_lastseg(mb[i]);
>> +				ml->data_len += sa->sqh_len;
>> +			}
> 
> That means we have to go through our 'next' list once again.
> Seems suboptimal.
> I think would be better to make outb_tun_pkt_prepare() to take sqh_len as extra parameter.
> Then inside it we can just:
> tlen = pdlen + sa->icv_len + sqh_len; 
> ...
> if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
>                 return -ENOSPC; 
> ...
> ml->data_len += tlen;
> mb->pkt_len += tlen;
> ...
> pdofs += pdlen  + sqh_len;
> 
> Same for transport mode.
> 
> 
>> +
>>  			outb_pkt_xprepare(sa, sqc, &icv);
>>  			lksd_none_cop_prepare(cop[k], cs, mb[i]);
>>  			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
>> @@ -356,6 +370,7 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>  	uint32_t i, k, n, l2, l3;
>>  	uint64_t sqn;
>>  	rte_be64_t sqc;
>> +	struct rte_mbuf *ml;
>>  	struct rte_ipsec_sa *sa;
>>  	struct rte_cryptodev_sym_session *cs;
>>  	union sym_op_data icv;
>> @@ -384,6 +399,19 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>
>>  		/* success, setup crypto op */
>>  		if (rc >= 0) {
>> +			/*
>> +			 * if esn is used then high-order 32 bits are also
>> +			 * used in ICV calculation but are not transmitted,
>> +			 * update packet length to be consistent with auth
>> +			 * data length and offset, this will be subtracted
>> +			 * from packet length in post crypto processing
>> +			 */
>> +			if (sa->sqh_len) {
>> +				mb[i]->pkt_len += sa->sqh_len;
>> +				ml = rte_pktmbuf_lastseg(mb[i]);
>> +				ml->data_len += sa->sqh_len;
>> +			}
>> +
>>  			outb_pkt_xprepare(sa, sqc, &icv);
>>  			lksd_none_cop_prepare(cop[k], cs, mb[i]);
>>  			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
>> @@ -425,6 +453,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>>  	for (i = 0; i != num; i++) {
>>  		if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
>>  			ml = rte_pktmbuf_lastseg(mb[i]);
>> +			/* remove high-order 32 bits of esn from packet len */
>> +			mb[i]->pkt_len -= sa->sqh_len;
>> +			ml->data_len -= sa->sqh_len;
>>  			icv = rte_pktmbuf_mtod_offset(ml, void *,
>>  				ml->data_len - icv_len);
>>  			remove_sqh(icv, icv_len);
>> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
>> index 846e317..ff01358 100644
>> --- a/lib/librte_ipsec/sa.c
>> +++ b/lib/librte_ipsec/sa.c
>> @@ -610,10 +610,10 @@ inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
>>  	switch (sa->type & msk) {
>>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
>>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
>> -		pf->process = esp_inb_tun_pkt_process;
>> +		pf->process = inline_inb_tun_pkt_process;
>>  		break;
>>  	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
>> -		pf->process = esp_inb_trs_pkt_process;
>> +		pf->process = inline_inb_trs_pkt_process;
>>  		break;
>>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
>>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
>> diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
>> index ffb5fb4..20c0a65 100644
>> --- a/lib/librte_ipsec/sa.h
>> +++ b/lib/librte_ipsec/sa.h
>> @@ -143,9 +143,17 @@ esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>>  	struct rte_mbuf *mb[], uint16_t num);
>>
>>  uint16_t
>> +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
>> +	struct rte_mbuf *mb[], uint16_t num);
>> +
>> +uint16_t
>>  esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>>  	struct rte_mbuf *mb[], uint16_t num);
>>
>> +uint16_t
>> +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>> +	struct rte_mbuf *mb[], uint16_t num);
>> +
>>  /* outbound processing */
>>
>>  uint16_t
>> --
>> 2.7.4
>
  

Patch

diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
index 4e0e12a..eb899e3 100644
--- a/lib/librte_ipsec/esp_inb.c
+++ b/lib/librte_ipsec/esp_inb.c
@@ -16,7 +16,8 @@ 
 #include "pad.h"
 
 typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
-	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
+	struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
+	uint8_t is_inline);
 
 /*
  * helper function to fill crypto_sym op for cipher+auth algorithms.
@@ -181,6 +182,15 @@  inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
 	icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
 
+	/*
+	 * if esn is used then high-order 32 bits are also used in ICV
+	 * calculation but are not transmitted, update packet length
+	 * to be consistent with auth data length and offset, this will
+	 * be subtracted from packet length in post crypto processing
+	 */
+	mb->pkt_len += sa->sqh_len;
+	ml->data_len += sa->sqh_len;
+
 	inb_pkt_xprepare(sa, sqn, icv);
 	return plen;
 }
@@ -373,14 +383,20 @@  tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
  */
 static inline uint16_t
 tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
-	uint32_t sqn[], uint32_t dr[], uint16_t num)
+	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
 {
 	uint32_t adj, i, k, tl;
 	uint32_t hl[num];
 	struct esp_tail espt[num];
 	struct rte_mbuf *ml[num];
 
-	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
+	/*
+	 * remove high-order 32 bits of esn from packet length
+	 * which was added before crypto processing, this doesn't
+	 * apply to inline case
+	 */
+	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
+				(is_inline ? 0 : sa->sqh_len);
 	const uint32_t cofs = sa->ctp.cipher.offset;
 
 	/*
@@ -420,7 +436,7 @@  tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
  */
 static inline uint16_t
 trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
-	uint32_t sqn[], uint32_t dr[], uint16_t num)
+	uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t is_inline)
 {
 	char *np;
 	uint32_t i, k, l2, tl;
@@ -428,7 +444,13 @@  trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
 	struct esp_tail espt[num];
 	struct rte_mbuf *ml[num];
 
-	const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
+	/*
+	 * remove high-order 32 bits of esn from packet length
+	 * which was added before crypto processing, this doesn't
+	 * apply to inline case
+	 */
+	const uint32_t tlen = sa->icv_len + sizeof(espt[0]) +
+				(is_inline ? 0 : sa->sqh_len);
 	const uint32_t cofs = sa->ctp.cipher.offset;
 
 	/*
@@ -496,8 +518,8 @@  esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
  * process group of ESP inbound packets.
  */
 static inline uint16_t
-esp_inb_pkt_process(const struct rte_ipsec_session *ss,
-	struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
+esp_inb_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+	uint16_t num, uint8_t is_inline, esp_inb_process_t process)
 {
 	uint32_t k, n;
 	struct rte_ipsec_sa *sa;
@@ -507,7 +529,7 @@  esp_inb_pkt_process(const struct rte_ipsec_session *ss,
 	sa = ss->sa;
 
 	/* process packets, extract seq numbers */
-	k = process(sa, mb, sqn, dr, num);
+	k = process(sa, mb, sqn, dr, num, is_inline);
 
 	/* handle unprocessed mbufs */
 	if (k != num && k != 0)
@@ -533,7 +555,14 @@  uint16_t
 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num)
 {
-	return esp_inb_pkt_process(ss, mb, num, tun_process);
+	return esp_inb_pkt_process(ss, mb, num, 0, tun_process);
+}
+
+uint16_t
+inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num)
+{
+	return esp_inb_pkt_process(ss, mb, num, 1, tun_process);
 }
 
 /*
@@ -543,5 +572,12 @@  uint16_t
 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num)
 {
-	return esp_inb_pkt_process(ss, mb, num, trs_process);
+	return esp_inb_pkt_process(ss, mb, num, 0, trs_process);
+}
+
+uint16_t
+inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num)
+{
+	return esp_inb_pkt_process(ss, mb, num, 1, trs_process);
 }
diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
index c798bc4..71a595e 100644
--- a/lib/librte_ipsec/esp_outb.c
+++ b/lib/librte_ipsec/esp_outb.c
@@ -221,6 +221,7 @@  esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	uint32_t i, k, n;
 	uint64_t sqn;
 	rte_be64_t sqc;
+	struct rte_mbuf *ml;
 	struct rte_ipsec_sa *sa;
 	struct rte_cryptodev_sym_session *cs;
 	union sym_op_data icv;
@@ -246,6 +247,19 @@  esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 
 		/* success, setup crypto op */
 		if (rc >= 0) {
+			/*
+			 * if esn is used then high-order 32 bits are also
+			 * used in ICV calculation but are not transmitted,
+			 * update packet length to be consistent with auth
+			 * data length and offset, this will be subtracted
+			 * from packet length in post crypto processing
+			 */
+			if (sa->sqh_len) {
+				mb[i]->pkt_len += sa->sqh_len;
+				ml = rte_pktmbuf_lastseg(mb[i]);
+				ml->data_len += sa->sqh_len;
+			}
+
 			outb_pkt_xprepare(sa, sqc, &icv);
 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
 			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
@@ -356,6 +370,7 @@  esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	uint32_t i, k, n, l2, l3;
 	uint64_t sqn;
 	rte_be64_t sqc;
+	struct rte_mbuf *ml;
 	struct rte_ipsec_sa *sa;
 	struct rte_cryptodev_sym_session *cs;
 	union sym_op_data icv;
@@ -384,6 +399,19 @@  esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 
 		/* success, setup crypto op */
 		if (rc >= 0) {
+			/*
+			 * if esn is used then high-order 32 bits are also
+			 * used in ICV calculation but are not transmitted,
+			 * update packet length to be consistent with auth
+			 * data length and offset, this will be subtracted
+			 * from packet length in post crypto processing
+			 */
+			if (sa->sqh_len) {
+				mb[i]->pkt_len += sa->sqh_len;
+				ml = rte_pktmbuf_lastseg(mb[i]);
+				ml->data_len += sa->sqh_len;
+			}
+
 			outb_pkt_xprepare(sa, sqc, &icv);
 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
 			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
@@ -425,6 +453,9 @@  esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	for (i = 0; i != num; i++) {
 		if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
 			ml = rte_pktmbuf_lastseg(mb[i]);
+			/* remove high-order 32 bits of esn from packet len */
+			mb[i]->pkt_len -= sa->sqh_len;
+			ml->data_len -= sa->sqh_len;
 			icv = rte_pktmbuf_mtod_offset(ml, void *,
 				ml->data_len - icv_len);
 			remove_sqh(icv, icv_len);
diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
index 846e317..ff01358 100644
--- a/lib/librte_ipsec/sa.c
+++ b/lib/librte_ipsec/sa.c
@@ -610,10 +610,10 @@  inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
 	switch (sa->type & msk) {
 	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
 	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
-		pf->process = esp_inb_tun_pkt_process;
+		pf->process = inline_inb_tun_pkt_process;
 		break;
 	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
-		pf->process = esp_inb_trs_pkt_process;
+		pf->process = inline_inb_trs_pkt_process;
 		break;
 	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
 	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
index ffb5fb4..20c0a65 100644
--- a/lib/librte_ipsec/sa.h
+++ b/lib/librte_ipsec/sa.h
@@ -143,9 +143,17 @@  esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
 
 uint16_t
+inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
 
+uint16_t
+inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num);
+
 /* outbound processing */
 
 uint16_t