@@ -147,6 +147,8 @@ New Features
* **Updated ipsec_mb crypto driver.**
* Bump minimum IPSec Multi-buffer version to 1.4 for SW PMDs.
+ * Kasumi and ChaChaPoly PMDs now share the job API codepath
+ with AESNI_MB PMD.
* **Updated Marvell cnxk crypto driver.**
@@ -8,6 +8,8 @@
RTE_DEFINE_PER_LCORE(pid_t, pid);
+uint8_t pmd_driver_id_aesni_mb;
+
struct aesni_mb_op_buf_data {
struct rte_mbuf *m;
uint32_t offset;
@@ -692,7 +694,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
aesni_mb_session_configure(IMB_MGR *mb_mgr,
void *priv_sess,
const struct rte_crypto_sym_xform *xform)
@@ -2039,7 +2041,7 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
return job;
}
-static uint16_t
+uint16_t
aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
@@ -2171,6 +2173,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return processed_jobs;
}
+
static inline int
check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
{
@@ -2226,7 +2229,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec,
return k;
}
-static uint32_t
+uint32_t
aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
struct rte_crypto_sym_vec *vec)
@@ -19,6 +19,19 @@
#define MAX_NUM_SEGS 16
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+ const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec);
+
static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -715,8 +728,6 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-uint8_t pmd_driver_id_aesni_mb;
-
struct aesni_mb_qp_data {
uint8_t temp_digests[IMB_MAX_JOBS][DIGEST_LENGTH_MAX];
/* *< Buffers used to store the digest generated
@@ -3,334 +3,7 @@
*/
#include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
- void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
- struct chacha20_poly1305_session *sess = priv_sess;
- const struct rte_crypto_sym_xform *auth_xform;
- const struct rte_crypto_sym_xform *cipher_xform;
- const struct rte_crypto_sym_xform *aead_xform;
-
- uint8_t key_length;
- const uint8_t *key;
- enum ipsec_mb_operation mode;
- int ret = 0;
-
- ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, &aead_xform);
- if (ret)
- return ret;
-
- sess->op = mode;
-
- switch (sess->op) {
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
- case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
- if (aead_xform->aead.algo !=
- RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
- IPSEC_MB_LOG(ERR,
- "The only combined operation supported is CHACHA20 POLY1305");
- ret = -ENOTSUP;
- goto error_exit;
- }
- /* Set IV parameters */
- sess->iv.offset = aead_xform->aead.iv.offset;
- sess->iv.length = aead_xform->aead.iv.length;
- key_length = aead_xform->aead.key.length;
- key = aead_xform->aead.key.data;
- sess->aad_length = aead_xform->aead.aad_length;
- sess->req_digest_length = aead_xform->aead.digest_length;
- break;
- default:
- IPSEC_MB_LOG(
- ERR, "Wrong xform type, has to be AEAD or authentication");
- ret = -ENOTSUP;
- goto error_exit;
- }
-
- /* IV check */
- if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
- sess->iv.length != 0) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- ret = -EINVAL;
- goto error_exit;
- }
-
- /* Check key length */
- if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
- IPSEC_MB_LOG(ERR, "Invalid key length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
- }
-
- /* Digest check */
- if (sess->req_digest_length != CHACHA20_POLY1305_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Invalid digest length");
- ret = -EINVAL;
- goto error_exit;
- } else {
- sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
- }
-
-error_exit:
- return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param qp queue pair
- * @param op symmetric crypto operation
- * @param session chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- struct rte_crypto_sym_op *sym_op = op->sym;
- struct rte_mbuf *m_src = sym_op->m_src;
- uint32_t offset, data_offset, data_length;
- uint32_t part_len, data_len;
- int total_len;
- uint8_t *tag;
- unsigned int oop = 0;
-
- offset = sym_op->aead.data.offset;
- data_offset = offset;
- data_length = sym_op->aead.data.length;
- RTE_ASSERT(m_src != NULL);
-
- while (offset >= m_src->data_len && data_length != 0) {
- offset -= m_src->data_len;
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
- }
-
- src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
- data_len = m_src->data_len - offset;
- part_len = (data_len < data_length) ? data_len :
- data_length;
-
- /* In-place */
- if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
- dst = src;
- /* Out-of-place */
- else {
- oop = 1;
- /* Segmented destination buffer is not supported
- * if operation is Out-of-place
- */
- RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
- dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
- data_offset);
- }
-
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
-
- IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- iv_ptr, sym_op->aead.aad.data,
- (uint64_t)session->aad_length);
-
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = sym_op->aead.digest.data;
- IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
-
- } else {
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
-
- total_len = data_length - part_len;
-
- while (total_len) {
- m_src = m_src->next;
-
- RTE_ASSERT(m_src != NULL);
-
- src = rte_pktmbuf_mtod(m_src, uint8_t *);
- if (oop)
- dst += part_len;
- else
- dst = src;
- part_len = (m_src->data_len < total_len) ?
- m_src->data_len : total_len;
-
- if (dst == NULL || src == NULL) {
- IPSEC_MB_LOG(ERR, "Invalid src or dst input");
- return -EINVAL;
- }
- IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
- session->key,
- &qp_data->chacha20_poly1305_ctx_data,
- dst, src, (uint64_t)part_len);
- total_len -= part_len;
- if (total_len < 0) {
- IPSEC_MB_LOG(ERR, "Invalid part len");
- return -EINVAL;
- }
- }
-
- tag = qp_data->temp_digest;
- IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
- &qp_data->chacha20_poly1305_ctx_data,
- tag, session->gen_digest_length);
- }
-
- return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *session)
-{
- struct chacha20_poly1305_qp_data *qp_data =
- ipsec_mb_get_qp_private_data(qp);
-
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Verify digest if required */
- if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
- session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
- uint8_t *digest = op->sym->aead.digest.data;
- uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
- rte_hexdump(stdout, "auth tag (orig):",
- digest, session->req_digest_length);
- rte_hexdump(stdout, "auth tag (calc):",
- tag, session->req_digest_length);
-#endif
- if (memcmp(tag, digest, session->req_digest_length) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- }
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp Queue Pair to process
- * @param op Crypto operation
- * @param sess Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
- struct rte_crypto_op *op,
- struct chacha20_poly1305_session *sess)
-{
- post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
- /* Free session if a session-less crypto op */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(sess, 0, sizeof(struct chacha20_poly1305_session));
- rte_mempool_put(qp->sess_mp, op->sym->session);
- op->sym->session = NULL;
- }
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
- struct rte_crypto_op **ops, uint16_t nb_ops)
-{
- struct chacha20_poly1305_session *sess;
- struct ipsec_mb_qp *qp = queue_pair;
-
- int retval = 0;
- unsigned int i = 0, nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
-
- for (i = 0; i < nb_dequeued; i++) {
-
- sess = ipsec_mb_get_session_private(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
- if (retval < 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->stats.dequeue_err_count++;
- break;
- }
-
- handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
- }
-
- qp->stats.dequeued_count += i;
-
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
chacha_poly_data->caps = chacha20_poly1305_capabilities;
- chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+ chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
chacha_poly_data->feature_flags =
RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -395,10 +68,9 @@ RTE_INIT(ipsec_mb_register_chacha20_poly1305)
RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
chacha_poly_data->internals_priv_size = 0;
chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
- chacha_poly_data->qp_priv_size =
- sizeof(struct chacha20_poly1305_qp_data);
+ chacha_poly_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
chacha_poly_data->session_configure =
- chacha20_poly1305_session_configure;
+ aesni_mb_session_configure;
chacha_poly_data->session_priv_size =
- sizeof(struct chacha20_poly1305_session);
+ sizeof(struct aesni_mb_session);
}
@@ -7,9 +7,7 @@
#include "ipsec_mb_private.h"
-#define CHACHA20_POLY1305_IV_LENGTH 12
#define CHACHA20_POLY1305_DIGEST_LENGTH 16
-#define CHACHA20_POLY1305_KEY_SIZE 32
static const
struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
@@ -45,30 +43,4 @@ struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
uint8_t pmd_driver_id_chacha20_poly1305;
-/** CHACHA20 POLY1305 private session structure */
-struct chacha20_poly1305_session {
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
- uint16_t aad_length;
- /**< AAD length */
- uint16_t req_digest_length;
- /**< Requested digest length */
- uint16_t gen_digest_length;
- /**< Generated digest length */
- uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
- enum ipsec_mb_operation op;
-} __rte_cache_aligned;
-
-struct chacha20_poly1305_qp_data {
- struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
- uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_CHACHA_POLY_PRIV_H_ */
@@ -10,406 +10,7 @@
#include <rte_malloc.h>
#include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
- const struct rte_crypto_sym_xform *xform)
-{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
- enum ipsec_mb_operation mode;
- struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
- &cipher_xform, NULL);
-
- if (ret)
- return ret;
-
- if (cipher_xform) {
- /* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
- IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
- return -ENOTSUP;
- }
-
- sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
- if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong IV length");
- return -EINVAL;
- }
-
- /* Initialize key */
- IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
- cipher_xform->cipher.key.data,
- &sess->pKeySched_cipher);
- }
-
- if (auth_xform) {
- /* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
- IPSEC_MB_LOG(ERR, "Unsupported authentication");
- return -ENOTSUP;
- }
-
- if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- IPSEC_MB_LOG(ERR, "Wrong digest length");
- return -EINVAL;
- }
-
- sess->auth_op = auth_xform->auth.op;
-
- /* Initialize key */
- IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
- &sess->pKeySched_hash);
- }
-
- sess->op = mode;
- return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- const void *src[num_ops];
- void *dst[num_ops];
- uint8_t *iv_ptr;
- uint64_t iv[num_ops];
- uint32_t num_bytes[num_ops];
-
- for (i = 0; i < num_ops; i++) {
- src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- dst[i] = ops[i]->sym->m_dst
- ? rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3))
- : rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
- uint8_t *,
- (ops[i]->sym->cipher.data.offset >> 3));
- iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
- iv[i] = *((uint64_t *)(iv_ptr));
- num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
- processed_ops++;
- }
-
- if (processed_ops != 0)
- IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
- iv, src, dst, num_bytes,
- processed_ops);
-
- return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
- struct kasumi_session *session)
-{
- uint8_t *src, *dst;
- uint8_t *iv_ptr;
- uint64_t iv;
- uint32_t length_in_bits, offset_in_bits;
-
- offset_in_bits = op->sym->cipher.data.offset;
- src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
- if (op->sym->m_dst == NULL)
- dst = src;
- else
- dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
- iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->cipher_iv_offset);
- iv = *((uint64_t *)(iv_ptr));
- length_in_bits = op->sym->cipher.data.length;
-
- IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
- src, dst, length_in_bits, offset_in_bits);
-
- return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
- struct kasumi_session *session, uint8_t num_ops)
-{
- unsigned int i;
- uint8_t processed_ops = 0;
- uint8_t *src, *dst;
- uint32_t length_in_bits;
- uint32_t num_bytes;
- struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
- for (i = 0; i < num_ops; i++) {
- /* Data must be byte aligned */
- if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- IPSEC_MB_LOG(ERR, "Invalid Offset");
- break;
- }
-
- length_in_bits = ops[i]->sym->auth.data.length;
-
- src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
- (ops[i]->sym->auth.data.offset >> 3));
- /* Direction from next bit after end of message */
- num_bytes = length_in_bits >> 3;
-
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = qp_data->temp_digest;
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
-
- /* Verify digest. */
- if (memcmp(dst, ops[i]->sym->auth.digest.data,
- KASUMI_DIGEST_LENGTH)
- != 0)
- ops[i]->status
- = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- dst = ops[i]->sym->auth.digest.data;
-
- IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
- &session->pKeySched_hash, src,
- num_bytes, dst);
- }
- processed_ops++;
- }
-
- return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
- struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
- unsigned int i;
- unsigned int processed_ops;
-
- switch (session->op) {
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- break;
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
- processed_ops
- = process_kasumi_cipher_op(qp, ops, session, num_ops);
- process_kasumi_hash_op(qp, ops, session, processed_ops);
- break;
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
- processed_ops
- = process_kasumi_hash_op(qp, ops, session, num_ops);
- process_kasumi_cipher_op(qp, ops, session, processed_ops);
- break;
- default:
- /* Operation not supported. */
- processed_ops = 0;
- }
-
- for (i = 0; i < num_ops; i++) {
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- /* Free session if a session-less crypto op. */
- if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
- ops[i]->sym->session = NULL;
- }
- }
- return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
- struct ipsec_mb_qp *qp)
-{
- unsigned int processed_op;
-
- switch (session->op) {
- /* case KASUMI_OP_ONLY_CIPHER: */
- case IPSEC_MB_OP_ENCRYPT_ONLY:
- case IPSEC_MB_OP_DECRYPT_ONLY:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- break;
- /* case KASUMI_OP_ONLY_AUTH: */
- case IPSEC_MB_OP_HASH_GEN_ONLY:
- case IPSEC_MB_OP_HASH_VERIFY_ONLY:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_CIPHER_AUTH: */
- case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
- processed_op = process_kasumi_cipher_op_bit(qp, op, session);
- if (processed_op == 1)
- process_kasumi_hash_op(qp, &op, session, 1);
- break;
- /* case KASUMI_OP_AUTH_CIPHER: */
- case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
- processed_op = process_kasumi_hash_op(qp, &op, session, 1);
- if (processed_op == 1)
- process_kasumi_cipher_op_bit(qp, op, session);
- break;
- default:
- /* Operation not supported. */
- processed_op = 0;
- }
-
- /*
- * If there was no error/authentication failure,
- * change status to successful.
- */
- if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* Free session if a session-less crypto op. */
- if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
- sizeof(struct kasumi_session));
- rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
- op->sym->session = NULL;
- }
- return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct rte_crypto_op *c_ops[nb_ops];
- struct rte_crypto_op *curr_c_op = NULL;
-
- struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
- struct ipsec_mb_qp *qp = queue_pair;
- unsigned int i;
- uint8_t burst_size = 0;
- uint8_t processed_ops;
- unsigned int nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
- (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_dequeued; i++) {
- curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
- if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
- || (curr_c_op->sym->m_dst != NULL
- && !rte_pktmbuf_is_contiguous(
- curr_c_op->sym->m_dst))) {
- IPSEC_MB_LOG(ERR,
- "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
- curr_c_op);
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- break;
- }
-#endif
-
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- curr_sess = (struct kasumi_session *)
- ipsec_mb_get_session_private(qp, curr_c_op);
- if (unlikely(curr_sess == NULL
- || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
- curr_c_op->status
- = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- break;
- }
-
- /* If length/offset is at bit-level, process this buffer alone.
- */
- if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
- /* Process the ops of the previous session. */
- if (prev_sess != NULL) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
-
- processed_ops = process_op_bit(curr_c_op,
- curr_sess, qp);
- if (processed_ops != 1)
- break;
-
- continue;
- }
-
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
- /*
- * When there are enough ops to process in a batch,
- * process them, and start a new batch.
- */
- if (burst_size == KASUMI_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = NULL;
- }
- } else {
- /*
- * Different session, process the ops
- * of the previous session.
- */
- processed_ops = process_ops(c_ops, prev_sess, qp,
- burst_size);
- if (processed_ops < burst_size) {
- burst_size = 0;
- break;
- }
-
- burst_size = 0;
- prev_sess = curr_sess;
-
- c_ops[burst_size++] = curr_c_op;
- }
- }
-
- if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
- }
-
- qp->stats.dequeued_count += i;
- return i;
-}
+#include "pmd_aesni_mb_priv.h"
struct rte_cryptodev_ops kasumi_pmd_ops = {
.dev_configure = ipsec_mb_config,
@@ -460,7 +61,7 @@ RTE_INIT(ipsec_mb_register_kasumi)
= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
kasumi_data->caps = kasumi_capabilities;
- kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+ kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -469,7 +70,8 @@ RTE_INIT(ipsec_mb_register_kasumi)
| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
kasumi_data->internals_priv_size = 0;
kasumi_data->ops = &kasumi_pmd_ops;
- kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
- kasumi_data->session_configure = kasumi_session_configure;
- kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+ kasumi_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+ kasumi_data->session_configure = aesni_mb_session_configure;
+ kasumi_data->session_priv_size =
+ sizeof(struct aesni_mb_session);
}
@@ -9,8 +9,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
#define KASUMI_DIGEST_LENGTH 4
uint8_t pmd_driver_id_kasumi;
@@ -60,22 +58,4 @@ static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- kasumi_key_sched_t pKeySched_cipher;
- kasumi_key_sched_t pKeySched_hash;
- enum ipsec_mb_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-struct kasumi_qp_data {
- uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
- /* *< Buffers used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-};
-
#endif /* _PMD_KASUMI_PRIV_H_ */