@@ -8,9 +8,8 @@
#include "roc_api.h"
-static void
-ipsec_hmac_opad_ipad_gen(struct rte_crypto_sym_xform *auth_xform,
- uint8_t *hmac_opad_ipad)
+void
+cnxk_sec_opad_ipad_gen(struct rte_crypto_sym_xform *auth_xform, uint8_t *hmac_opad_ipad)
{
const uint8_t *key = auth_xform->auth.key.data;
uint32_t length = auth_xform->auth.key.length;
@@ -192,7 +191,7 @@ ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
const uint8_t *auth_key = auth_xfrm->auth.key.data;
roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad);
} else {
- ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
+ cnxk_sec_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
}
tmp_key = (uint64_t *)hmac_opad_ipad;
@@ -741,7 +740,7 @@ onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
key = cipher_xfrm->cipher.key.data;
length = cipher_xfrm->cipher.key.length;
- ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
+ cnxk_sec_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
}
switch (length) {
@@ -1374,7 +1373,7 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad);
} else if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_NULL) {
- ipsec_hmac_opad_ipad_gen(auth_xform, hmac_opad_ipad);
+ cnxk_sec_opad_ipad_gen(auth_xform, hmac_opad_ipad);
}
}
@@ -1441,7 +1440,7 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad);
} else if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_NULL) {
- ipsec_hmac_opad_ipad_gen(auth_xform, hmac_opad_ipad);
+ cnxk_sec_opad_ipad_gen(auth_xform, hmac_opad_ipad);
}
}
@@ -61,14 +61,15 @@ bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa);
bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa);
/* [CN9K] */
-int __roc_api
-cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
- struct rte_crypto_sym_xform *crypto_xform,
- struct roc_ie_on_inb_sa *in_sa);
+int __roc_api cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *crypto_xform,
+ struct roc_ie_on_inb_sa *in_sa);
-int __roc_api
-cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
- struct rte_crypto_sym_xform *crypto_xform,
- struct roc_ie_on_outb_sa *out_sa);
+int __roc_api cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *crypto_xform,
+ struct roc_ie_on_outb_sa *out_sa);
+
+__rte_internal
+void cnxk_sec_opad_ipad_gen(struct rte_crypto_sym_xform *auth_xform, uint8_t *hmac_opad_ipad);
#endif /* _CNXK_SECURITY_H__ */
@@ -1,6 +1,7 @@
INTERNAL {
global:
+ cnxk_sec_opad_ipad_gen;
cnxk_ipsec_icvlen_get;
cnxk_ipsec_ivlen_get;
cnxk_ipsec_outb_rlens_get;
@@ -80,8 +80,9 @@ cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
}
static __rte_always_inline int __rte_hot
-cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cn10k_sec_session *sess,
- struct cpt_inst_s *inst, struct cpt_inflight_req *infl_req, const bool is_sg_ver2)
+cpt_sec_ipsec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+ struct cn10k_sec_session *sess, struct cpt_inst_s *inst,
+ struct cpt_inflight_req *infl_req, const bool is_sg_ver2)
{
struct rte_crypto_sym_op *sym_op = op->sym;
int ret;
@@ -91,7 +92,7 @@ cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cn10k
return -ENOTSUP;
}
- if (sess->is_outbound)
+ if (sess->ipsec.is_outbound)
ret = process_outb_sa(&qp->lf, op, sess, &qp->meta_info, infl_req, inst,
is_sg_ver2);
else
@@ -100,6 +101,17 @@ cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cn10k
return ret;
}
+static __rte_always_inline int __rte_hot
+cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cn10k_sec_session *sess,
+ struct cpt_inst_s *inst, struct cpt_inflight_req *infl_req, const bool is_sg_ver2)
+{
+
+ if (sess->proto == RTE_SECURITY_PROTOCOL_IPSEC)
+ return cpt_sec_ipsec_inst_fill(qp, op, sess, &inst[0], infl_req, is_sg_ver2);
+
+ return 0;
+}
+
static inline int
cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[], struct cpt_inst_s inst[],
struct cpt_inflight_req *infl_req, const bool is_sg_ver2)
@@ -20,7 +20,7 @@
#include "roc_api.h"
static uint64_t
-ipsec_cpt_inst_w7_get(struct roc_cpt *roc_cpt, void *sa)
+cpt_inst_w7_get(struct roc_cpt *roc_cpt, void *sa)
{
union cpt_inst_w7 w7;
@@ -64,7 +64,7 @@ cn10k_ipsec_outb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
goto sa_dptr_free;
}
- sec_sess->inst.w7 = ipsec_cpt_inst_w7_get(roc_cpt, out_sa);
+ sec_sess->inst.w7 = cpt_inst_w7_get(roc_cpt, out_sa);
#ifdef LA_IPSEC_DEBUG
/* Use IV from application in debug mode */
@@ -89,7 +89,7 @@ cn10k_ipsec_outb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
}
#endif
- sec_sess->is_outbound = true;
+ sec_sess->ipsec.is_outbound = true;
/* Get Rlen calculation data */
ret = cnxk_ipsec_outb_rlens_get(&rlens, ipsec_xfrm, crypto_xfrm);
@@ -150,6 +150,7 @@ cn10k_ipsec_outb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
/* Trigger CTX flush so that data is written back to DRAM */
roc_cpt_lf_ctx_flush(lf, out_sa, false);
+ sec_sess->proto = RTE_SECURITY_PROTOCOL_IPSEC;
plt_atomic_thread_fence(__ATOMIC_SEQ_CST);
sa_dptr_free:
@@ -189,8 +190,8 @@ cn10k_ipsec_inb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
goto sa_dptr_free;
}
- sec_sess->is_outbound = false;
- sec_sess->inst.w7 = ipsec_cpt_inst_w7_get(roc_cpt, in_sa);
+ sec_sess->ipsec.is_outbound = false;
+ sec_sess->inst.w7 = cpt_inst_w7_get(roc_cpt, in_sa);
/* Save index/SPI in cookie, specific required for Rx Inject */
sa_dptr->w1.s.cookie = 0xFFFFFFFF;
@@ -209,7 +210,7 @@ cn10k_ipsec_inb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
*/
if (ipsec_xfrm->options.ip_csum_enable) {
param1.s.ip_csum_disable = ROC_IE_OT_SA_INNER_PKT_IP_CSUM_ENABLE;
- sec_sess->ip_csum = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ sec_sess->ipsec.ip_csum = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
/* Disable L4 checksum verification by default */
@@ -250,6 +251,7 @@ cn10k_ipsec_inb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
/* Trigger CTX flush so that data is written back to DRAM */
roc_cpt_lf_ctx_flush(lf, in_sa, true);
+ sec_sess->proto = RTE_SECURITY_PROTOCOL_IPSEC;
plt_atomic_thread_fence(__ATOMIC_SEQ_CST);
sa_dptr_free:
@@ -298,16 +300,15 @@ cn10k_sec_session_create(void *device, struct rte_security_session_conf *conf,
if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
return -EINVAL;
- if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
- return -ENOTSUP;
-
- ((struct cn10k_sec_session *)sess)->userdata = conf->userdata;
- return cn10k_ipsec_session_create(device, &conf->ipsec,
- conf->crypto_xform, sess);
+ if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC) {
+ ((struct cn10k_sec_session *)sess)->userdata = conf->userdata;
+ return cn10k_ipsec_session_create(device, &conf->ipsec, conf->crypto_xform, sess);
+ }
+ return -ENOTSUP;
}
static int
-cn10k_sec_session_destroy(void *dev, struct rte_security_session *sec_sess)
+cn10k_sec_ipsec_session_destroy(void *dev, struct rte_security_session *sec_sess)
{
struct rte_cryptodev *crypto_dev = dev;
union roc_ot_ipsec_sa_word2 *w2;
@@ -318,9 +319,6 @@ cn10k_sec_session_destroy(void *dev, struct rte_security_session *sec_sess)
void *sa_dptr = NULL;
int ret;
- if (unlikely(sec_sess == NULL))
- return -EINVAL;
-
sess = (struct cn10k_sec_session *)sec_sess;
qp = crypto_dev->data->queue_pairs[0];
@@ -336,7 +334,7 @@ cn10k_sec_session_destroy(void *dev, struct rte_security_session *sec_sess)
ret = -1;
- if (sess->is_outbound) {
+ if (sess->ipsec.is_outbound) {
sa_dptr = plt_zmalloc(sizeof(struct roc_ot_ipsec_outb_sa), 8);
if (sa_dptr != NULL) {
roc_ot_ipsec_outb_sa_init(sa_dptr);
@@ -376,6 +374,18 @@ cn10k_sec_session_destroy(void *dev, struct rte_security_session *sec_sess)
return 0;
}
+static int
+cn10k_sec_session_destroy(void *dev, struct rte_security_session *sec_sess)
+{
+ if (unlikely(sec_sess == NULL))
+ return -EINVAL;
+
+ if (((struct cn10k_sec_session *)sec_sess)->proto == RTE_SECURITY_PROTOCOL_IPSEC)
+ return cn10k_sec_ipsec_session_destroy(dev, sec_sess);
+
+ return -EINVAL;
+}
+
static unsigned int
cn10k_sec_session_get_size(void *device __rte_unused)
{
@@ -405,7 +415,7 @@ cn10k_sec_session_stats_get(void *device, struct rte_security_session *sess,
stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
sa = &priv->sa;
- if (priv->is_outbound) {
+ if (priv->ipsec.is_outbound) {
out_sa = &sa->out_sa;
roc_cpt_lf_ctx_flush(&qp->lf, out_sa, false);
rte_delay_ms(1);
@@ -29,13 +29,18 @@ struct cn10k_sec_session {
/** PMD private space */
+ enum rte_security_session_protocol proto;
/** Pre-populated CPT inst words */
struct cnxk_cpt_inst_tmpl inst;
uint16_t max_extended_len;
uint16_t iv_offset;
uint8_t iv_length;
- uint8_t ip_csum;
- bool is_outbound;
+ union {
+ struct {
+ uint8_t ip_csum;
+ bool is_outbound;
+ } ipsec;
+ };
/** Queue pair */
struct cnxk_cpt_qp *qp;
/** Userdata to be set for Rx inject */
@@ -121,7 +121,7 @@ process_outb_sa(struct roc_cpt_lf *lf, struct rte_crypto_op *cop, struct cn10k_s
i = 0;
gather_comp = (struct roc_sglist_comp *)((uint8_t *)m_data + 8);
- i = fill_ipsec_sg_comp_from_pkt(gather_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(gather_comp, i, m_src);
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -132,7 +132,7 @@ process_outb_sa(struct roc_cpt_lf *lf, struct rte_crypto_op *cop, struct cn10k_s
i = 0;
scatter_comp = (struct roc_sglist_comp *)((uint8_t *)gather_comp + g_size_bytes);
- i = fill_ipsec_sg_comp_from_pkt(scatter_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(scatter_comp, i, m_src);
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -170,7 +170,7 @@ process_outb_sa(struct roc_cpt_lf *lf, struct rte_crypto_op *cop, struct cn10k_s
i = 0;
gather_comp = (struct roc_sg2list_comp *)((uint8_t *)m_data);
- i = fill_ipsec_sg2_comp_from_pkt(gather_comp, i, m_src);
+ i = fill_sg2_comp_from_pkt(gather_comp, i, m_src);
cpt_inst_w5.s.gather_sz = ((i + 2) / 3);
g_size_bytes = ((i + 2) / 3) * sizeof(struct roc_sg2list_comp);
@@ -181,7 +181,7 @@ process_outb_sa(struct roc_cpt_lf *lf, struct rte_crypto_op *cop, struct cn10k_s
i = 0;
scatter_comp = (struct roc_sg2list_comp *)((uint8_t *)gather_comp + g_size_bytes);
- i = fill_ipsec_sg2_comp_from_pkt(scatter_comp, i, m_src);
+ i = fill_sg2_comp_from_pkt(scatter_comp, i, m_src);
cpt_inst_w6.s.scatter_sz = ((i + 2) / 3);
@@ -211,7 +211,7 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn10k_sec_session *sess, struct
inst->w4.u64 = sess->inst.w4 | rte_pktmbuf_pkt_len(m_src);
dptr = rte_pktmbuf_mtod(m_src, uint64_t);
inst->dptr = dptr;
- m_src->ol_flags |= (uint64_t)sess->ip_csum;
+ m_src->ol_flags |= (uint64_t)sess->ipsec.ip_csum;
} else if (is_sg_ver2 == false) {
struct roc_sglist_comp *scatter_comp, *gather_comp;
uint32_t g_size_bytes, s_size_bytes;
@@ -234,7 +234,7 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn10k_sec_session *sess, struct
/* Input Gather List */
i = 0;
gather_comp = (struct roc_sglist_comp *)((uint8_t *)m_data + 8);
- i = fill_ipsec_sg_comp_from_pkt(gather_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(gather_comp, i, m_src);
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -242,7 +242,7 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn10k_sec_session *sess, struct
/* Output Scatter List */
i = 0;
scatter_comp = (struct roc_sglist_comp *)((uint8_t *)gather_comp + g_size_bytes);
- i = fill_ipsec_sg_comp_from_pkt(scatter_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(scatter_comp, i, m_src);
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -270,7 +270,7 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn10k_sec_session *sess, struct
i = 0;
gather_comp = (struct roc_sg2list_comp *)((uint8_t *)m_data);
- i = fill_ipsec_sg2_comp_from_pkt(gather_comp, i, m_src);
+ i = fill_sg2_comp_from_pkt(gather_comp, i, m_src);
cpt_inst_w5.s.gather_sz = ((i + 2) / 3);
g_size_bytes = ((i + 2) / 3) * sizeof(struct roc_sg2list_comp);
@@ -278,7 +278,7 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn10k_sec_session *sess, struct
/* Output Scatter List */
i = 0;
scatter_comp = (struct roc_sg2list_comp *)((uint8_t *)gather_comp + g_size_bytes);
- i = fill_ipsec_sg2_comp_from_pkt(scatter_comp, i, m_src);
+ i = fill_sg2_comp_from_pkt(scatter_comp, i, m_src);
cpt_inst_w6.s.scatter_sz = ((i + 2) / 3);
@@ -132,7 +132,7 @@ process_outb_sa(struct cpt_qp_meta_info *m_info, struct rte_crypto_op *cop,
gather_comp = (struct roc_sglist_comp *)((uint8_t *)m_data + 8);
i = fill_sg_comp(gather_comp, i, (uint64_t)hdr, hdr_len);
- i = fill_ipsec_sg_comp_from_pkt(gather_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(gather_comp, i, m_src);
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -146,7 +146,7 @@ process_outb_sa(struct cpt_qp_meta_info *m_info, struct rte_crypto_op *cop,
scatter_comp = (struct roc_sglist_comp *)((uint8_t *)gather_comp + g_size_bytes);
i = fill_sg_comp(scatter_comp, i, (uint64_t)hdr, hdr_len);
- i = fill_ipsec_sg_comp_from_pkt(scatter_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(scatter_comp, i, m_src);
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -228,7 +228,7 @@ process_inb_sa(struct cpt_qp_meta_info *m_info, struct rte_crypto_op *cop,
*/
i = 0;
gather_comp = (struct roc_sglist_comp *)((uint8_t *)m_data + 8);
- i = fill_ipsec_sg_comp_from_pkt(gather_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(gather_comp, i, m_src);
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -239,7 +239,7 @@ process_inb_sa(struct cpt_qp_meta_info *m_info, struct rte_crypto_op *cop,
i = 0;
scatter_comp = (struct roc_sglist_comp *)((uint8_t *)gather_comp + g_size_bytes);
i = fill_sg_comp(scatter_comp, i, (uint64_t)hdr, hdr_len);
- i = fill_ipsec_sg_comp_from_pkt(scatter_comp, i, m_src);
+ i = fill_sg_comp_from_pkt(scatter_comp, i, m_src);
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_sglist_comp);
@@ -11,9 +11,10 @@
#include "roc_ae.h"
#include "roc_cpt.h"
-#define CNXK_CPT_MAX_CAPS 55
-#define CNXK_SEC_CRYPTO_MAX_CAPS 16
-#define CNXK_SEC_MAX_CAPS 9
+#define CNXK_CPT_MAX_CAPS 55
+#define CNXK_SEC_IPSEC_CRYPTO_MAX_CAPS 16
+#define CNXK_SEC_MAX_CAPS 9
+
/**
* Device private data
*/
@@ -23,8 +24,7 @@ struct cnxk_cpt_vf {
uint16_t *rx_chan_base;
struct roc_cpt cpt;
struct rte_cryptodev_capabilities crypto_caps[CNXK_CPT_MAX_CAPS];
- struct rte_cryptodev_capabilities
- sec_crypto_caps[CNXK_SEC_CRYPTO_MAX_CAPS];
+ struct rte_cryptodev_capabilities sec_ipsec_crypto_caps[CNXK_SEC_IPSEC_CRYPTO_MAX_CAPS];
struct rte_security_capability sec_caps[CNXK_SEC_MAX_CAPS];
uint64_t cnxk_fpm_iova[ROC_AE_EC_ID_PMAX];
struct roc_ae_ec_group *ec_grp[ROC_AE_EC_ID_PMAX];
@@ -20,13 +20,14 @@
RTE_DIM(caps_##name)); \
} while (0)
-#define SEC_CAPS_ADD(cnxk_caps, cur_pos, hw_caps, name) \
+#define SEC_IPSEC_CAPS_ADD(cnxk_caps, cur_pos, hw_caps, name) \
do { \
if ((hw_caps[CPT_ENG_TYPE_SE].name) || \
(hw_caps[CPT_ENG_TYPE_IE].name) || \
(hw_caps[CPT_ENG_TYPE_AE].name)) \
- sec_caps_add(cnxk_caps, cur_pos, sec_caps_##name, \
- RTE_DIM(sec_caps_##name)); \
+ sec_ipsec_caps_add(cnxk_caps, cur_pos, \
+ sec_ipsec_caps_##name, \
+ RTE_DIM(sec_ipsec_caps_##name)); \
} while (0)
static const struct rte_cryptodev_capabilities caps_mul[] = {
@@ -1184,7 +1185,7 @@ static const struct rte_cryptodev_capabilities caps_end[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-static const struct rte_cryptodev_capabilities sec_caps_aes[] = {
+static const struct rte_cryptodev_capabilities sec_ipsec_caps_aes[] = {
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -1332,7 +1333,7 @@ static const struct rte_cryptodev_capabilities sec_caps_aes[] = {
},
};
-static const struct rte_cryptodev_capabilities sec_caps_des[] = {
+static const struct rte_cryptodev_capabilities sec_ipsec_caps_des[] = {
{ /* DES */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -1375,7 +1376,7 @@ static const struct rte_cryptodev_capabilities sec_caps_des[] = {
},
};
-static const struct rte_cryptodev_capabilities sec_caps_sha1_sha2[] = {
+static const struct rte_cryptodev_capabilities sec_ipsec_caps_sha1_sha2[] = {
{ /* SHA1 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -1478,7 +1479,7 @@ static const struct rte_cryptodev_capabilities sec_caps_sha1_sha2[] = {
},
};
-static const struct rte_cryptodev_capabilities sec_caps_null[] = {
+static const struct rte_cryptodev_capabilities sec_ipsec_caps_null[] = {
{ /* NULL (CIPHER) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -1691,29 +1692,28 @@ cnxk_crypto_capabilities_get(struct cnxk_cpt_vf *vf)
}
static void
-sec_caps_limit_check(int *cur_pos, int nb_caps)
+sec_ipsec_caps_limit_check(int *cur_pos, int nb_caps)
{
- PLT_VERIFY(*cur_pos + nb_caps <= CNXK_SEC_CRYPTO_MAX_CAPS);
+ PLT_VERIFY(*cur_pos + nb_caps <= CNXK_SEC_IPSEC_CRYPTO_MAX_CAPS);
}
static void
-sec_caps_add(struct rte_cryptodev_capabilities cnxk_caps[], int *cur_pos,
- const struct rte_cryptodev_capabilities *caps, int nb_caps)
+sec_ipsec_caps_add(struct rte_cryptodev_capabilities cnxk_caps[], int *cur_pos,
+ const struct rte_cryptodev_capabilities *caps, int nb_caps)
{
- sec_caps_limit_check(cur_pos, nb_caps);
+ sec_ipsec_caps_limit_check(cur_pos, nb_caps);
memcpy(&cnxk_caps[*cur_pos], caps, nb_caps * sizeof(caps[0]));
*cur_pos += nb_caps;
}
static void
-cn10k_sec_crypto_caps_update(struct rte_cryptodev_capabilities cnxk_caps[],
- int *cur_pos)
+cn10k_sec_ipsec_crypto_caps_update(struct rte_cryptodev_capabilities cnxk_caps[], int *cur_pos)
{
const struct rte_cryptodev_capabilities *cap;
unsigned int i;
- sec_caps_limit_check(cur_pos, 1);
+ sec_ipsec_caps_limit_check(cur_pos, 1);
/* NULL auth */
for (i = 0; i < RTE_DIM(caps_null); i++) {
@@ -1727,7 +1727,7 @@ cn10k_sec_crypto_caps_update(struct rte_cryptodev_capabilities cnxk_caps[],
}
static void
-cn9k_sec_crypto_caps_update(struct rte_cryptodev_capabilities cnxk_caps[])
+cn9k_sec_ipsec_crypto_caps_update(struct rte_cryptodev_capabilities cnxk_caps[])
{
struct rte_cryptodev_capabilities *caps;
@@ -1747,27 +1747,26 @@ cn9k_sec_crypto_caps_update(struct rte_cryptodev_capabilities cnxk_caps[])
}
static void
-sec_crypto_caps_populate(struct rte_cryptodev_capabilities cnxk_caps[],
- union cpt_eng_caps *hw_caps)
+sec_ipsec_crypto_caps_populate(struct rte_cryptodev_capabilities cnxk_caps[],
+ union cpt_eng_caps *hw_caps)
{
int cur_pos = 0;
- SEC_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, aes);
- SEC_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, des);
- SEC_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, sha1_sha2);
+ SEC_IPSEC_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, aes);
+ SEC_IPSEC_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, des);
+ SEC_IPSEC_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, sha1_sha2);
if (roc_model_is_cn10k())
- cn10k_sec_crypto_caps_update(cnxk_caps, &cur_pos);
+ cn10k_sec_ipsec_crypto_caps_update(cnxk_caps, &cur_pos);
else
- cn9k_sec_crypto_caps_update(cnxk_caps);
+ cn9k_sec_ipsec_crypto_caps_update(cnxk_caps);
- sec_caps_add(cnxk_caps, &cur_pos, sec_caps_null,
- RTE_DIM(sec_caps_null));
- sec_caps_add(cnxk_caps, &cur_pos, caps_end, RTE_DIM(caps_end));
+ sec_ipsec_caps_add(cnxk_caps, &cur_pos, sec_ipsec_caps_null, RTE_DIM(sec_ipsec_caps_null));
+ sec_ipsec_caps_add(cnxk_caps, &cur_pos, caps_end, RTE_DIM(caps_end));
}
static void
-cnxk_sec_caps_update(struct rte_security_capability *sec_cap)
+cnxk_sec_ipsec_caps_update(struct rte_security_capability *sec_cap)
{
sec_cap->ipsec.options.udp_encap = 1;
sec_cap->ipsec.options.copy_df = 1;
@@ -1775,7 +1774,7 @@ cnxk_sec_caps_update(struct rte_security_capability *sec_cap)
}
static void
-cn10k_sec_caps_update(struct rte_security_capability *sec_cap)
+cn10k_sec_ipsec_caps_update(struct rte_security_capability *sec_cap)
{
if (sec_cap->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
#ifdef LA_IPSEC_DEBUG
@@ -1797,7 +1796,7 @@ cn10k_sec_caps_update(struct rte_security_capability *sec_cap)
}
static void
-cn9k_sec_caps_update(struct rte_security_capability *sec_cap)
+cn9k_sec_ipsec_caps_update(struct rte_security_capability *sec_cap)
{
if (sec_cap->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
#ifdef LA_IPSEC_DEBUG
@@ -1814,22 +1813,24 @@ cnxk_cpt_caps_populate(struct cnxk_cpt_vf *vf)
unsigned long i;
crypto_caps_populate(vf->crypto_caps, vf->cpt.hw_caps);
- sec_crypto_caps_populate(vf->sec_crypto_caps, vf->cpt.hw_caps);
+ sec_ipsec_crypto_caps_populate(vf->sec_ipsec_crypto_caps, vf->cpt.hw_caps);
PLT_STATIC_ASSERT(RTE_DIM(sec_caps_templ) <= RTE_DIM(vf->sec_caps));
memcpy(vf->sec_caps, sec_caps_templ, sizeof(sec_caps_templ));
for (i = 0; i < RTE_DIM(sec_caps_templ) - 1; i++) {
- vf->sec_caps[i].crypto_capabilities = vf->sec_crypto_caps;
- cnxk_sec_caps_update(&vf->sec_caps[i]);
+ if (vf->sec_caps[i].protocol == RTE_SECURITY_PROTOCOL_IPSEC) {
+ vf->sec_caps[i].crypto_capabilities = vf->sec_ipsec_crypto_caps;
- if (roc_model_is_cn10k())
- cn10k_sec_caps_update(&vf->sec_caps[i]);
+ cnxk_sec_ipsec_caps_update(&vf->sec_caps[i]);
- if (roc_model_is_cn9k())
- cn9k_sec_caps_update(&vf->sec_caps[i]);
+ if (roc_model_is_cn10k())
+ cn10k_sec_ipsec_caps_update(&vf->sec_caps[i]);
+ if (roc_model_is_cn9k())
+ cn9k_sec_ipsec_caps_update(&vf->sec_caps[i]);
+ }
}
}
@@ -129,7 +129,7 @@ fill_sg_comp_from_iov(struct roc_sglist_comp *list, uint32_t i, struct roc_se_io
}
static __rte_always_inline uint32_t
-fill_ipsec_sg_comp_from_pkt(struct roc_sglist_comp *list, uint32_t i, struct rte_mbuf *pkt)
+fill_sg_comp_from_pkt(struct roc_sglist_comp *list, uint32_t i, struct rte_mbuf *pkt)
{
uint32_t buf_sz;
void *vaddr;
@@ -150,7 +150,7 @@ fill_ipsec_sg_comp_from_pkt(struct roc_sglist_comp *list, uint32_t i, struct rte
}
static __rte_always_inline uint32_t
-fill_ipsec_sg2_comp_from_pkt(struct roc_sg2list_comp *list, uint32_t i, struct rte_mbuf *pkt)
+fill_sg2_comp_from_pkt(struct roc_sg2list_comp *list, uint32_t i, struct rte_mbuf *pkt)
{
uint32_t buf_sz;
void *vaddr;