[v2,5/8] crypto/cnxk: add cn9k crypto adapter fast path ops

Message ID f77448479b1b7b094fa13fb81fdfeaff4a98664e.1630584303.git.sthotton@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series Crypto adapter support for Marvell CNXK driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Shijith Thotton Sept. 2, 2021, 12:17 p.m. UTC
  Added crypto adapter enqueue and dequeue operations for CN9K.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
Acked-by: Anoob Joseph <anoobj@marvell.com>
---
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 235 ++++++++++++++++-------
 drivers/crypto/cnxk/cn9k_cryptodev_ops.h |   6 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h |  28 +++
 drivers/crypto/cnxk/meson.build          |   2 +-
 drivers/crypto/cnxk/version.map          |   5 +
 5 files changed, 205 insertions(+), 71 deletions(-)
  

Patch

diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 724965be5b..08f08c8339 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -4,6 +4,7 @@ 
 
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_event_crypto_adapter.h>
 
 #include "cn9k_cryptodev.h"
 #include "cn9k_cryptodev_ops.h"
@@ -62,27 +63,94 @@  cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
 	return NULL;
 }
 
+static inline int
+cn9k_cpt_prepare_instruction(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+			     struct cpt_inflight_req *infl_req,
+			     struct cpt_inst_s *inst)
+{
+	int ret;
+
+	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+		struct rte_crypto_sym_op *sym_op;
+		struct cnxk_se_sess *sess;
+
+		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			sym_op = op->sym;
+			sess = get_sym_session_private_data(
+				sym_op->session, cn9k_cryptodev_driver_id);
+			ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
+						     inst);
+		} else {
+			sess = cn9k_cpt_sym_temp_sess_create(qp, op);
+			if (unlikely(sess == NULL)) {
+				plt_dp_err("Could not create temp session");
+				return -1;
+			}
+
+			ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
+						     inst);
+			if (unlikely(ret)) {
+				sym_session_clear(cn9k_cryptodev_driver_id,
+						  op->sym->session);
+				rte_mempool_put(qp->sess_mp, op->sym->session);
+			}
+		}
+		inst->w7.u64 = sess->cpt_inst_w7;
+	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+		struct rte_crypto_asym_op *asym_op;
+		struct cnxk_ae_sess *sess;
+
+		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			asym_op = op->asym;
+			sess = get_asym_session_private_data(
+				asym_op->session, cn9k_cryptodev_driver_id);
+			ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
+			inst->w7.u64 = sess->cpt_inst_w7;
+		} else {
+			ret = -EINVAL;
+		}
+	} else {
+		ret = -EINVAL;
+		plt_dp_err("Unsupported op type");
+	}
+
+	return ret;
+}
+
+static inline void
+cn9k_cpt_submit_instruction(struct cpt_inst_s *inst, uint64_t lmtline,
+			    uint64_t io_addr)
+{
+	uint64_t lmt_status;
+
+	do {
+		/* Copy CPT command to LMTLINE */
+		roc_lmt_mov((void *)lmtline, inst, 2);
+
+		/*
+		 * Make sure compiler does not reorder memcpy and ldeor.
+		 * LMTST transactions are always flushed from the write
+		 * buffer immediately, a DMB is not required to push out
+		 * LMTSTs.
+		 */
+		rte_io_wmb();
+		lmt_status = roc_lmt_submit_ldeor(io_addr);
+	} while (lmt_status == 0);
+}
+
 static uint16_t
 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct cpt_inflight_req *infl_req;
-	struct rte_crypto_asym_op *asym_op;
-	struct rte_crypto_sym_op *sym_op;
 	uint16_t nb_allowed, count = 0;
 	struct cnxk_cpt_qp *qp = qptr;
 	struct pending_queue *pend_q;
 	struct rte_crypto_op *op;
 	struct cpt_inst_s inst;
-	uint64_t lmt_status;
-	uint64_t lmtline;
-	uint64_t io_addr;
 	int ret;
 
 	pend_q = &qp->pend_q;
 
-	lmtline = qp->lmtline.lmt_base;
-	io_addr = qp->lmtline.io_addr;
-
 	inst.w0.u64 = 0;
 	inst.w2.u64 = 0;
 	inst.w3.u64 = 0;
@@ -95,77 +163,18 @@  cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 		infl_req = &pend_q->req_queue[pend_q->enq_tail];
 		infl_req->op_flags = 0;
 
-		if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
-			struct cnxk_se_sess *sess;
-
-			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-				sym_op = op->sym;
-				sess = get_sym_session_private_data(
-					sym_op->session,
-					cn9k_cryptodev_driver_id);
-				ret = cn9k_cpt_sym_inst_fill(qp, op, sess,
-							     infl_req, &inst);
-			} else {
-				sess = cn9k_cpt_sym_temp_sess_create(qp, op);
-				if (unlikely(sess == NULL)) {
-					plt_dp_err(
-						"Could not create temp session");
-					break;
-				}
-
-				ret = cn9k_cpt_sym_inst_fill(qp, op, sess,
-							     infl_req, &inst);
-				if (unlikely(ret)) {
-					sym_session_clear(
-						cn9k_cryptodev_driver_id,
-						op->sym->session);
-					rte_mempool_put(qp->sess_mp,
-							op->sym->session);
-				}
-			}
-			inst.w7.u64 = sess->cpt_inst_w7;
-		} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
-			struct cnxk_ae_sess *sess;
-
-			ret = -EINVAL;
-			if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-				asym_op = op->asym;
-				sess = get_asym_session_private_data(
-					asym_op->session,
-					cn9k_cryptodev_driver_id);
-				ret = cnxk_ae_enqueue(qp, op, infl_req, &inst,
-						      sess);
-				inst.w7.u64 = sess->cpt_inst_w7;
-			}
-		} else {
-			plt_dp_err("Unsupported op type");
-			break;
-		}
-
+		ret = cn9k_cpt_prepare_instruction(qp, op, infl_req, &inst);
 		if (unlikely(ret)) {
 			plt_dp_err("Could not process op: %p", op);
 			break;
 		}
 
 		infl_req->cop = op;
-
 		infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 		inst.res_addr = (uint64_t)&infl_req->res;
 
-		do {
-			/* Copy CPT command to LMTLINE */
-			memcpy((void *)lmtline, &inst, sizeof(inst));
-
-			/*
-			 * Make sure compiler does not reorder memcpy and ldeor.
-			 * LMTST transactions are always flushed from the write
-			 * buffer immediately, a DMB is not required to push out
-			 * LMTSTs.
-			 */
-			rte_io_wmb();
-			lmt_status = roc_lmt_submit_ldeor(io_addr);
-		} while (lmt_status == 0);
-
+		cn9k_cpt_submit_instruction(&inst, qp->lmtline.lmt_base,
+					    qp->lmtline.io_addr);
 		MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
 	}
 
@@ -176,6 +185,72 @@  cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 	return count;
 }
 
+uint16_t
+cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
+{
+	union rte_event_crypto_metadata *ec_mdata;
+	struct cpt_inflight_req *infl_req;
+	struct rte_event *rsp_info;
+	struct cnxk_cpt_qp *qp;
+	struct cpt_inst_s inst;
+	uint8_t cdev_id;
+	uint16_t qp_id;
+	int ret;
+
+	ec_mdata = cnxk_event_crypto_mdata_get(op);
+	if (!ec_mdata) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	cdev_id = ec_mdata->request_info.cdev_id;
+	qp_id = ec_mdata->request_info.queue_pair_id;
+	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+	rsp_info = &ec_mdata->response_info;
+
+	if (unlikely(!qp->ca.enabled)) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
+		rte_errno = ENOMEM;
+		return 0;
+	}
+	infl_req->op_flags = 0;
+
+	ret = cn9k_cpt_prepare_instruction(qp, op, infl_req, &inst);
+	if (unlikely(ret)) {
+		plt_dp_err("Could not process op: %p", op);
+		rte_mempool_put(qp->ca.req_mp, infl_req);
+		return 0;
+	}
+
+	infl_req->cop = op;
+	infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
+	infl_req->qp = qp;
+	inst.w0.u64 = 0;
+	inst.res_addr = (uint64_t)&infl_req->res;
+	inst.w2.u64 = CNXK_CPT_INST_W2(
+		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
+		rsp_info->sched_type, rsp_info->queue_id, 0);
+	inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
+
+	if (roc_cpt_is_iq_full(&qp->lf)) {
+		rte_mempool_put(qp->ca.req_mp, infl_req);
+		rte_errno = EAGAIN;
+		return 0;
+	}
+
+	if (!rsp_info->sched_type)
+		roc_sso_hws_head_wait(tag_op);
+
+	cn9k_cpt_submit_instruction(&inst, qp->lmtline.lmt_base,
+				    qp->lmtline.io_addr);
+
+	return 1;
+}
+
 static inline void
 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 			      struct cpt_inflight_req *infl_req)
@@ -249,6 +324,26 @@  cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 	}
 }
 
+uintptr_t
+cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
+{
+	struct cpt_inflight_req *infl_req;
+	struct rte_crypto_op *cop;
+	struct cnxk_cpt_qp *qp;
+
+	infl_req = (struct cpt_inflight_req *)(get_work1);
+	cop = infl_req->cop;
+	qp = infl_req->qp;
+
+	cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
+
+	if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+		rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+
+	rte_mempool_put(qp->ca.req_mp, infl_req);
+	return (uintptr_t)cop;
+}
+
 static uint16_t
 cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.h b/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
index 2277f6bcfb..1255de33ae 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
@@ -11,4 +11,10 @@  extern struct rte_cryptodev_ops cn9k_cpt_ops;
 
 void cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
 
+__rte_internal
+uint16_t cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op,
+					 struct rte_crypto_op *op);
+__rte_internal
+uintptr_t cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1);
+
 #endif /* _CN9K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 22dc2ab78d..0d02d44799 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -6,6 +6,7 @@ 
 #define _CNXK_CRYPTODEV_OPS_H_
 
 #include <rte_cryptodev.h>
+#include <rte_event_crypto_adapter.h>
 
 #include "roc_api.h"
 
@@ -16,6 +17,13 @@ 
 
 #define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
 
+/* Macros to form words in CPT instruction */
+#define CNXK_CPT_INST_W2(tag, tt, grp, rvu_pf_func)                            \
+	((tag) | ((uint64_t)(tt) << 32) | ((uint64_t)(grp) << 34) |            \
+	 ((uint64_t)(rvu_pf_func) << 48))
+#define CNXK_CPT_INST_W3(qord, wqe_ptr)                                        \
+	(qord | ((uintptr_t)(wqe_ptr) >> 3) << 3)
+
 struct cpt_qp_meta_info {
 	struct rte_mempool *pool;
 	int mlen;
@@ -40,6 +48,7 @@  struct cpt_inflight_req {
 	struct rte_crypto_op *cop;
 	void *mdata;
 	uint8_t op_flags;
+	void *qp;
 } __rte_aligned(16);
 
 struct pending_queue {
@@ -122,4 +131,23 @@  int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
 			struct rte_crypto_asym_xform *xform,
 			struct rte_cryptodev_asym_session *sess,
 			struct rte_mempool *pool);
+
+static inline union rte_event_crypto_metadata *
+cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
+{
+	union rte_event_crypto_metadata *ec_mdata;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		ec_mdata = rte_cryptodev_sym_session_get_user_data(
+			op->sym->session);
+	else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+		 op->private_data_offset)
+		ec_mdata = (union rte_event_crypto_metadata
+				    *)((uint8_t *)op + op->private_data_offset);
+	else
+		return NULL;
+
+	return ec_mdata;
+}
+
 #endif /* _CNXK_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
index c56d6cf35d..e076783629 100644
--- a/drivers/crypto/cnxk/meson.build
+++ b/drivers/crypto/cnxk/meson.build
@@ -20,6 +20,6 @@  sources = files(
         'cnxk_cryptodev_sec.c',
 )
 
-deps += ['bus_pci', 'common_cnxk', 'security']
+deps += ['bus_pci', 'common_cnxk', 'security', 'eventdev']
 
 includes += include_directories('../../../lib/net')
diff --git a/drivers/crypto/cnxk/version.map b/drivers/crypto/cnxk/version.map
index ee80c51721..0817743947 100644
--- a/drivers/crypto/cnxk/version.map
+++ b/drivers/crypto/cnxk/version.map
@@ -1,3 +1,8 @@ 
 INTERNAL {
+	global:
+
+	cn9k_cpt_crypto_adapter_enqueue;
+	cn9k_cpt_crypto_adapter_dequeue;
+
 	local: *;
 };