Hi Lukasz,
> diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
> index c798bc4..ed5974b 100644
> --- a/lib/librte_ipsec/esp_outb.c
> +++ b/lib/librte_ipsec/esp_outb.c
> @@ -126,11 +126,11 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>
> /* pad length + esp tail */
> pdlen = clen - plen;
> - tlen = pdlen + sa->icv_len;
> + tlen = pdlen + sa->icv_len + sa->sqh_len;
We probably don't want to increase pkt_len by sa->sqh_len for inline case.
That's why I suggested to pass sqh_len as parameter to that function.
Then for inline we can just pass 0.
Do you see any obstacles with that approach?
Same thought for transport mode.
Konstantin
>
> /* do append and prepend */
> ml = rte_pktmbuf_lastseg(mb);
> - if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
> + if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
> return -ENOSPC;
>
> /* prepend header */
> @@ -152,8 +152,8 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
> rte_memcpy(ph, sa->hdr, sa->hdr_len);
>
> /* update original and new ip header fields */
> - update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
> - sqn_low16(sqc));
> + update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len - sa->sqh_len,
> + sa->hdr_l3_off, sqn_low16(sqc));
>
> /* update spi, seqn and iv */
> esph = (struct esp_hdr *)(ph + sa->hdr_len);
> @@ -292,11 +292,11 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>
> /* pad length + esp tail */
> pdlen = clen - plen;
> - tlen = pdlen + sa->icv_len;
> + tlen = pdlen + sa->icv_len + sa->sqh_len;
>
> /* do append and insert */
> ml = rte_pktmbuf_lastseg(mb);
> - if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
> + if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
> return -ENOSPC;
>
> /* prepend space for ESP header */
> @@ -314,8 +314,8 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
> insert_esph(ph, ph + hlen, uhlen);
>
> /* update ip header fields */
> - np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
> - IPPROTO_ESP);
> + np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sa->sqh_len, l2len,
> + l3len, IPPROTO_ESP);
>
> /* update spi, seqn and iv */
> esph = (struct esp_hdr *)(ph + uhlen);
> @@ -425,6 +425,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> for (i = 0; i != num; i++) {
> if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
> ml = rte_pktmbuf_lastseg(mb[i]);
> + /* remove high-order 32 bits of esn from packet len */
> + mb[i]->pkt_len -= sa->sqh_len;
> + ml->data_len -= sa->sqh_len;
> icv = rte_pktmbuf_mtod_offset(ml, void *,
> ml->data_len - icv_len);
> remove_sqh(icv, icv_len);
Hi Konstantin
On 30.05.2019 18:51, Ananyev, Konstantin wrote:
> Hi Lukasz,
>
>> diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
>> index c798bc4..ed5974b 100644
>> --- a/lib/librte_ipsec/esp_outb.c
>> +++ b/lib/librte_ipsec/esp_outb.c
>> @@ -126,11 +126,11 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>>
>> /* pad length + esp tail */
>> pdlen = clen - plen;
>> - tlen = pdlen + sa->icv_len;
>> + tlen = pdlen + sa->icv_len + sa->sqh_len;
>
> We probably don't want to increase pkt_len by sa->sqh_len for inline case.
> That's why I suggested to pass sqh_len as parameter to that function.
> Then for inline we can just pass 0.
> Do you see any obstacles with that approach?
> Same thought for transport mode.
> Konstantin
>
I agree this is incorrect. I have missed inline case.
I will send revised patch.
Thanks,
Lukasz
>>
>> /* do append and prepend */
>> ml = rte_pktmbuf_lastseg(mb);
>> - if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
>> + if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
>> return -ENOSPC;
>>
>> /* prepend header */
>> @@ -152,8 +152,8 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>> rte_memcpy(ph, sa->hdr, sa->hdr_len);
>>
>> /* update original and new ip header fields */
>> - update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
>> - sqn_low16(sqc));
>> + update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len - sa->sqh_len,
>> + sa->hdr_l3_off, sqn_low16(sqc));
>>
>> /* update spi, seqn and iv */
>> esph = (struct esp_hdr *)(ph + sa->hdr_len);
>> @@ -292,11 +292,11 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>>
>> /* pad length + esp tail */
>> pdlen = clen - plen;
>> - tlen = pdlen + sa->icv_len;
>> + tlen = pdlen + sa->icv_len + sa->sqh_len;
>>
>> /* do append and insert */
>> ml = rte_pktmbuf_lastseg(mb);
>> - if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
>> + if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
>> return -ENOSPC;
>>
>> /* prepend space for ESP header */
>> @@ -314,8 +314,8 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>> insert_esph(ph, ph + hlen, uhlen);
>>
>> /* update ip header fields */
>> - np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
>> - IPPROTO_ESP);
>> + np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sa->sqh_len, l2len,
>> + l3len, IPPROTO_ESP);
>>
>> /* update spi, seqn and iv */
>> esph = (struct esp_hdr *)(ph + uhlen);
>> @@ -425,6 +425,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>> for (i = 0; i != num; i++) {
>> if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
>> ml = rte_pktmbuf_lastseg(mb[i]);
>> + /* remove high-order 32 bits of esn from packet len */
>> + mb[i]->pkt_len -= sa->sqh_len;
>> + ml->data_len -= sa->sqh_len;
>> icv = rte_pktmbuf_mtod_offset(ml, void *,
>> ml->data_len - icv_len);
>> remove_sqh(icv, icv_len);
@@ -16,7 +16,8 @@
#include "pad.h"
typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa,
- struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num);
+ struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num,
+ uint8_t sqh_len);
/*
* helper function to fill crypto_sym op for cipher+auth algorithms.
@@ -181,6 +182,15 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
+ /*
+ * if esn is used then high-order 32 bits are also used in ICV
+ * calculation but are not transmitted, update packet length
+ * to be consistent with auth data length and offset, this will
+ * be subtracted from packet length in post crypto processing
+ */
+ mb->pkt_len += sa->sqh_len;
+ ml->data_len += sa->sqh_len;
+
inb_pkt_xprepare(sa, sqn, icv);
return plen;
}
@@ -373,14 +383,18 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
*/
static inline uint16_t
tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
- uint32_t sqn[], uint32_t dr[], uint16_t num)
+ uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
{
uint32_t adj, i, k, tl;
uint32_t hl[num];
struct esp_tail espt[num];
struct rte_mbuf *ml[num];
- const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
+ /*
+ * remove icv, esp trailer and high-order
+ * 32 bits of esn from packet length
+ */
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
const uint32_t cofs = sa->ctp.cipher.offset;
/*
@@ -420,7 +434,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
*/
static inline uint16_t
trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
- uint32_t sqn[], uint32_t dr[], uint16_t num)
+ uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len)
{
char *np;
uint32_t i, k, l2, tl;
@@ -428,7 +442,11 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
struct esp_tail espt[num];
struct rte_mbuf *ml[num];
- const uint32_t tlen = sa->icv_len + sizeof(espt[0]);
+ /*
+ * remove icv, esp trailer and high-order
+ * 32 bits of esn from packet length
+ */
+ const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len;
const uint32_t cofs = sa->ctp.cipher.offset;
/*
@@ -496,18 +514,15 @@ esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],
* process group of ESP inbound packets.
*/
static inline uint16_t
-esp_inb_pkt_process(const struct rte_ipsec_session *ss,
- struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process)
+esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
+ uint16_t num, uint8_t sqh_len, esp_inb_process_t process)
{
uint32_t k, n;
- struct rte_ipsec_sa *sa;
uint32_t sqn[num];
uint32_t dr[num];
- sa = ss->sa;
-
/* process packets, extract seq numbers */
- k = process(sa, mb, sqn, dr, num);
+ k = process(sa, mb, sqn, dr, num, sqh_len);
/* handle unprocessed mbufs */
if (k != num && k != 0)
@@ -533,7 +548,16 @@ uint16_t
esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
- return esp_inb_pkt_process(ss, mb, num, tun_process);
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process);
+}
+
+uint16_t
+inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process);
}
/*
@@ -543,5 +567,14 @@ uint16_t
esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
- return esp_inb_pkt_process(ss, mb, num, trs_process);
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process);
+}
+
+uint16_t
+inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process);
}
@@ -126,11 +126,11 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
/* pad length + esp tail */
pdlen = clen - plen;
- tlen = pdlen + sa->icv_len;
+ tlen = pdlen + sa->icv_len + sa->sqh_len;
/* do append and prepend */
ml = rte_pktmbuf_lastseg(mb);
- if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
return -ENOSPC;
/* prepend header */
@@ -152,8 +152,8 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
rte_memcpy(ph, sa->hdr, sa->hdr_len);
/* update original and new ip header fields */
- update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,
- sqn_low16(sqc));
+ update_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len - sa->sqh_len,
+ sa->hdr_l3_off, sqn_low16(sqc));
/* update spi, seqn and iv */
esph = (struct esp_hdr *)(ph + sa->hdr_len);
@@ -292,11 +292,11 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
/* pad length + esp tail */
pdlen = clen - plen;
- tlen = pdlen + sa->icv_len;
+ tlen = pdlen + sa->icv_len + sa->sqh_len;
/* do append and insert */
ml = rte_pktmbuf_lastseg(mb);
- if (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))
+ if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
return -ENOSPC;
/* prepend space for ESP header */
@@ -314,8 +314,8 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
insert_esph(ph, ph + hlen, uhlen);
/* update ip header fields */
- np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,
- IPPROTO_ESP);
+ np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sa->sqh_len, l2len,
+ l3len, IPPROTO_ESP);
/* update spi, seqn and iv */
esph = (struct esp_hdr *)(ph + uhlen);
@@ -425,6 +425,9 @@ esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
for (i = 0; i != num; i++) {
if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
ml = rte_pktmbuf_lastseg(mb[i]);
+ /* remove high-order 32 bits of esn from packet len */
+ mb[i]->pkt_len -= sa->sqh_len;
+ ml->data_len -= sa->sqh_len;
icv = rte_pktmbuf_mtod_offset(ml, void *,
ml->data_len - icv_len);
remove_sqh(icv, icv_len);
@@ -610,10 +610,10 @@ inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
switch (sa->type & msk) {
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
- pf->process = esp_inb_tun_pkt_process;
+ pf->process = inline_inb_tun_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
- pf->process = esp_inb_trs_pkt_process;
+ pf->process = inline_inb_trs_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
@@ -143,9 +143,17 @@ esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
uint16_t
+inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
/* outbound processing */
uint16_t