@@ -105,6 +105,73 @@ inb_cop_prepare(struct rte_crypto_op *cop,
}
}
+static inline int
+inb_sync_crypto_proc_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ const union sym_op_data *icv, uint32_t pofs, uint32_t plen,
+ struct rte_security_vec *buf, struct iovec *cur_vec,
+ void *iv, void **aad, void **digest)
+{
+ struct rte_mbuf *ms;
+ struct iovec *vec = cur_vec;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivp;
+ uint32_t algo, left, off = 0, n_seg = 0;
+
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ pofs + sizeof(struct rte_esp_hdr));
+ algo = sa->algo_type;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = (struct aead_gcm_iv *)iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ *aad = icv->va + sa->icv_len;
+ off = sa->ctp.cipher.offset + pofs;
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ off = sa->ctp.auth.offset + pofs;
+ break;
+ case ALGO_TYPE_AES_CTR:
+ off = sa->ctp.auth.offset + pofs;
+ ctr = (struct aesctr_cnt_blk *)iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_NULL:
+ break;
+ }
+
+ *digest = icv->va;
+
+ left = plen - sa->ctp.cipher.length;
+
+ ms = mbuf_get_seg_ofs(mb, &off);
+ if (!ms)
+ return -1;
+
+ while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {
+ uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+ vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+ vec->iov_len = len;
+
+ left -= len;
+ vec++;
+ n_seg++;
+ ms = ms->next;
+ off = 0;
+ }
+
+ if (left)
+ return -1;
+
+ buf->vec = cur_vec;
+ buf->num = n_seg;
+
+ return n_seg;
+}
+
/*
* Helper function for prepare() to deal with situation when
* ICV is spread by two segments. Tries to move ICV completely into the
@@ -512,7 +579,6 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
return k;
}
-
/*
* *process* function for tunnel packets
*/
@@ -625,6 +691,112 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
return n;
}
+/*
+ * process packets using sync crypto engine
+ */
+static uint16_t
+esp_inb_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num, uint8_t sqh_len,
+ esp_inb_process_t process)
+{
+ int32_t rc;
+ uint32_t i, k, hl, n, p;
+ struct rte_ipsec_sa *sa;
+ struct replay_sqn *rsn;
+ union sym_op_data icv;
+ uint32_t sqn[num];
+ uint32_t dr[num];
+ struct rte_security_vec buf[num];
+ struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+ uint32_t vec_idx = 0;
+ uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+ void *iv[num];
+ void *aad[num];
+ void *digest[num];
+ int status[num];
+
+ sa = ss->sa;
+ rsn = rsn_acquire(sa);
+
+ k = 0;
+ for (i = 0; i != num; i++) {
+ hl = mb[i]->l2_len + mb[i]->l3_len;
+ rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
+ if (rc >= 0) {
+ iv[k] = (void *)ivs[k];
+ rc = inb_sync_crypto_proc_prepare(sa, mb[i], &icv, hl,
+ rc, buf + k, vec + vec_idx, iv + k,
+ &aad[k], &digest[k]);
+ if (rc < 0) {
+ dr[i - k] = i;
+ continue;
+ }
+
+ vec_idx += rc;
+ k++;
+ } else
+ dr[i - k] = i;
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != num) {
+ rte_errno = EBADMSG;
+
+ if (unlikely(k == 0))
+ return 0;
+
+ move_bad_mbufs(mb, dr, num, num - k);
+ }
+
+ /* process the packets */
+ n = 0;
+ rte_security_process_cpu_crypto_bulk(ss->security.ctx,
+ ss->security.ses, buf, iv, aad, digest, status,
+ k);
+ /* move failed process packets to dr */
+ for (i = 0; i < k; i++) {
+ if (status[i]) {
+ dr[n++] = i;
+ rte_errno = EBADMSG;
+ }
+ }
+
+ /* move bad packets to the back */
+ if (n)
+ move_bad_mbufs(mb, dr, k, n);
+
+ /* process packets */
+ p = process(sa, mb, sqn, dr, k - n, sqh_len);
+
+ if (p != k - n && p != 0)
+ move_bad_mbufs(mb, dr, k - n, k - n - p);
+
+ if (p != num)
+ rte_errno = EBADMSG;
+
+ return p;
+}
+
+uint16_t
+esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
+ tun_process);
+}
+
+uint16_t
+esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ struct rte_ipsec_sa *sa = ss->sa;
+
+ return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
+ trs_process);
+}
+
/*
* process group of ESP inbound tunnel packets.
*/
@@ -403,6 +403,292 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
return k;
}
+
+static inline int
+outb_sync_crypto_proc_prepare(struct rte_mbuf *m, const struct rte_ipsec_sa *sa,
+ const uint64_t ivp[IPSEC_MAX_IV_QWORD],
+ const union sym_op_data *icv, uint32_t hlen, uint32_t plen,
+ struct rte_security_vec *buf, struct iovec *cur_vec, void *iv,
+ void **aad, void **digest)
+{
+ struct rte_mbuf *ms;
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ struct iovec *vec = cur_vec;
+ uint32_t left, off = 0, n_seg = 0;
+ uint32_t algo;
+
+ algo = sa->algo_type;
+
+ switch (algo) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ *aad = (void *)(icv->va + sa->icv_len);
+ off = sa->ctp.cipher.offset + hlen;
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ off = sa->ctp.auth.offset + hlen;
+ break;
+ case ALGO_TYPE_AES_CTR:
+ ctr = iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_NULL:
+ break;
+ }
+
+ *digest = (void *)icv->va;
+
+ left = sa->ctp.cipher.length + plen;
+
+ ms = mbuf_get_seg_ofs(m, &off);
+ if (!ms)
+ return -1;
+
+ while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {
+ uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+ vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+ vec->iov_len = len;
+
+ left -= len;
+ vec++;
+ n_seg++;
+ ms = ms->next;
+ off = 0;
+ }
+
+ if (left)
+ return -1;
+
+ buf->vec = cur_vec;
+ buf->num = n_seg;
+
+ return n_seg;
+}
+
+/**
+ * Local post process function prototype that same as process function prototype
+ * as rte_ipsec_sa_pkt_func's process().
+ */
+typedef uint16_t (*sync_crypto_post_process)(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[],
+ uint16_t num);
+static uint16_t
+esp_outb_tun_sync_crypto_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num,
+ sync_crypto_post_process post_process)
+{
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_security_ctx *ctx;
+ struct rte_security_session *rss;
+ union sym_op_data icv;
+ struct rte_security_vec buf[num];
+ struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+ uint32_t vec_idx = 0;
+ void *aad[num];
+ void *digest[num];
+ void *iv[num];
+ uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+ uint64_t ivp[IPSEC_MAX_IV_QWORD];
+ int status[num];
+ uint32_t dr[num];
+ uint32_t i, n, k;
+ int32_t rc;
+
+ sa = ss->sa;
+ ctx = ss->security.ctx;
+ rss = ss->security.ses;
+
+ k = 0;
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ for (i = 0; i != n; i++) {
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(ivp, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_tun_pkt_prepare(sa, sqc, ivp, mb[i], &icv,
+ sa->sqh_len);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+
+ iv[k] = (void *)ivs[k];
+ rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
+ 0, rc, buf + k, vec + vec_idx, iv + k,
+ &aad[k], &digest[k]);
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ continue;
+ }
+
+ vec_idx += rc;
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ if (unlikely(k == 0)) {
+ rte_errno = EBADMSG;
+ return 0;
+ }
+
+ /* process the packets */
+ n = 0;
+ rte_security_process_cpu_crypto_bulk(ctx, rss, buf, (void **)iv,
+ (void **)aad, (void **)digest, status, k);
+ /* move failed process packets to dr */
+ for (i = 0; i < n; i++) {
+ if (status[i])
+ dr[n++] = i;
+ }
+
+ if (n)
+ move_bad_mbufs(mb, dr, k, n);
+
+ return post_process(ss, mb, k - n);
+}
+
+static uint16_t
+esp_outb_trs_sync_crypto_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num,
+ sync_crypto_post_process post_process)
+
+{
+ uint64_t sqn;
+ rte_be64_t sqc;
+ struct rte_ipsec_sa *sa;
+ struct rte_security_ctx *ctx;
+ struct rte_security_session *rss;
+ union sym_op_data icv;
+ struct rte_security_vec buf[num];
+ struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+ uint32_t vec_idx = 0;
+ void *aad[num];
+ void *digest[num];
+ uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+ void *iv[num];
+ int status[num];
+ uint64_t ivp[IPSEC_MAX_IV_QWORD];
+ uint32_t dr[num];
+ uint32_t i, n, k;
+ uint32_t l2, l3;
+ int32_t rc;
+
+ sa = ss->sa;
+ ctx = ss->security.ctx;
+ rss = ss->security.ses;
+
+ k = 0;
+ n = num;
+ sqn = esn_outb_update_sqn(sa, &n);
+ if (n != num)
+ rte_errno = EOVERFLOW;
+
+ for (i = 0; i != n; i++) {
+ l2 = mb[i]->l2_len;
+ l3 = mb[i]->l3_len;
+
+ sqc = rte_cpu_to_be_64(sqn + i);
+ gen_iv(ivp, sqc);
+
+ /* try to update the packet itself */
+ rc = outb_trs_pkt_prepare(sa, sqc, ivp, mb[i], l2, l3, &icv,
+ sa->sqh_len);
+
+ /* success, setup crypto op */
+ if (rc >= 0) {
+ outb_pkt_xprepare(sa, sqc, &icv);
+
+ iv[k] = (void *)ivs[k];
+
+ rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
+ l2 + l3, rc, buf + k, vec + vec_idx,
+ iv + k, &aad[k], &digest[k]);
+ if (rc < 0) {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ continue;
+ }
+
+ vec_idx += rc;
+ k++;
+ /* failure, put packet into the death-row */
+ } else {
+ dr[i - k] = i;
+ rte_errno = -rc;
+ }
+ }
+
+ /* copy not prepared mbufs beyond good ones */
+ if (k != n && k != 0)
+ move_bad_mbufs(mb, dr, n, n - k);
+
+ /* process the packets */
+ n = 0;
+ rte_security_process_cpu_crypto_bulk(ctx, rss, buf, (void **)iv,
+ (void **)aad, (void **)digest, status, k);
+ /* move failed process packets to dr */
+ for (i = 0; i < k; i++) {
+ if (status[i])
+ dr[n++] = i;
+ }
+
+ if (n)
+ move_bad_mbufs(mb, dr, k, n);
+
+ return post_process(ss, mb, k - n);
+}
+
+uint16_t
+esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_tun_sync_crypto_process(ss, mb, num,
+ esp_outb_sqh_process);
+}
+
+uint16_t
+esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_tun_sync_crypto_process(ss, mb, num,
+ esp_outb_pkt_flag_process);
+}
+
+uint16_t
+esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_trs_sync_crypto_process(ss, mb, num,
+ esp_outb_sqh_process);
+}
+
+uint16_t
+esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
+{
+ return esp_outb_trs_sync_crypto_process(ss, mb, num,
+ esp_outb_pkt_flag_process);
+}
+
/*
* process outbound packets for SA with ESN support,
* for algorithms that require SQN.hibits to be implictly included
@@ -410,8 +696,8 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
* In that case we have to move ICV bytes back to their proper place.
*/
uint16_t
-esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
+esp_outb_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
{
uint32_t i, k, icv_len, *icv;
struct rte_mbuf *ml;
@@ -544,9 +544,9 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss,
* - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
* - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
*/
-static uint16_t
-pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
- uint16_t num)
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num)
{
uint32_t i, k;
uint32_t dr[num];
@@ -599,12 +599,48 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
pf->prepare = esp_outb_tun_prepare;
pf->process = (sa->sqh_len != 0) ?
- esp_outb_sqh_process : pkt_flag_process;
+ esp_outb_sqh_process : esp_outb_pkt_flag_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
pf->prepare = esp_outb_trs_prepare;
pf->process = (sa->sqh_len != 0) ?
- esp_outb_sqh_process : pkt_flag_process;
+ esp_outb_sqh_process : esp_outb_pkt_flag_process;
+ break;
+ default:
+ rc = -ENOTSUP;
+ }
+
+ return rc;
+}
+
+static int
+lksd_sync_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+ struct rte_ipsec_sa_pkt_func *pf)
+{
+ int32_t rc;
+
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ rc = 0;
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = esp_inb_tun_sync_crypto_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = esp_inb_trs_sync_crypto_pkt_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_tun_sync_crpyto_sqh_process :
+ esp_outb_tun_sync_crpyto_flag_process;
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ pf->process = (sa->sqh_len != 0) ?
+ esp_outb_trs_sync_crpyto_sqh_process :
+ esp_outb_trs_sync_crpyto_flag_process;
break;
default:
rc = -ENOTSUP;
@@ -672,13 +708,16 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
RTE_IPSEC_SATP_DIR_IB)
- pf->process = pkt_flag_process;
+ pf->process = esp_outb_pkt_flag_process;
else
pf->process = inline_proto_outb_pkt_process;
break;
case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
pf->prepare = lksd_proto_prepare;
- pf->process = pkt_flag_process;
+ pf->process = esp_outb_pkt_flag_process;
+ break;
+ case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+ rc = lksd_sync_crypto_pkt_func_select(sa, pf);
break;
default:
rc = -ENOTSUP;
@@ -156,6 +156,14 @@ uint16_t
inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
/* outbound processing */
uint16_t
@@ -170,6 +178,10 @@ uint16_t
esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
uint16_t num);
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
uint16_t
inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
@@ -182,4 +194,21 @@ uint16_t
inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
+
#endif /* _SA_H_ */
@@ -19,7 +19,9 @@ session_check(struct rte_ipsec_session *ss)
return -EINVAL;
if ((ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
ss->type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) &&
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ ss->type ==
+ RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) &&
ss->security.ctx == NULL)
return -EINVAL;
}