diff mbox series

[v4,2/7] crypto/cnxk: add event metadata set operation

Message ID 20220501192457.3670278-3-gakhil@marvell.com (mailing list archive)
State Superseded
Delegated to: akhil goyal
Headers show
Series Add new cryptodev op for event metadata | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Akhil Goyal May 1, 2022, 7:24 p.m. UTC
From: Volodymyr Fialko <vfialko@marvell.com>

Added cryptodev operation for setting event crypto
metadata for all supported sessions - sym/asym/security.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
Signed-off-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Abhinandan Gujjar <Abhinandan.gujjar@intel.com>
---
 drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 144 +++++++++++++++++++---
 drivers/crypto/cnxk/cn10k_ipsec.h         |   2 +
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c  | 138 ++++++++++++++++++---
 drivers/crypto/cnxk/cn9k_ipsec.h          |   2 +
 drivers/crypto/cnxk/cnxk_ae.h             |   2 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |  18 ---
 drivers/crypto/cnxk/cnxk_se.h             |   2 +
 7 files changed, 255 insertions(+), 53 deletions(-)

Comments

Gujjar, Abhinandan S May 2, 2022, 11:07 a.m. UTC | #1
Acked-by: Abhinandan Gujjar <Abhinandan.gujjar@intel.com>

> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Monday, May 2, 2022 12:55 AM
> To: dev@dpdk.org
> Cc: anoobj@marvell.com; jerinj@marvell.com; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; Jayatheerthan, Jay
> <jay.jayatheerthan@intel.com>; Vangati, Narender
> <narender.vangati@intel.com>; vfialko@marvell.com; Akhil Goyal
> <gakhil@marvell.com>; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Gujjar,
> Abhinandan S <abhinandan.gujjar@intel.com>
> Subject: [PATCH v4 2/7] crypto/cnxk: add event metadata set operation
> 
> From: Volodymyr Fialko <vfialko@marvell.com>
> 
> Added cryptodev operation for setting event crypto metadata for all supported
> sessions - sym/asym/security.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> Signed-off-by: Akhil Goyal <gakhil@marvell.com>
> Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
> Acked-by: Abhinandan Gujjar <Abhinandan.gujjar@intel.com>
> ---
>  drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 144 +++++++++++++++++++---
>  drivers/crypto/cnxk/cn10k_ipsec.h         |   2 +
>  drivers/crypto/cnxk/cn9k_cryptodev_ops.c  | 138 ++++++++++++++++++---
>  drivers/crypto/cnxk/cn9k_ipsec.h          |   2 +
>  drivers/crypto/cnxk/cnxk_ae.h             |   2 +
>  drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |  18 ---
>  drivers/crypto/cnxk/cnxk_se.h             |   2 +
>  7 files changed, 255 insertions(+), 53 deletions(-)
> 
> diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
> b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
> index c4d5d039ec..01aa0d6870 100644
> --- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
> +++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
> @@ -264,30 +264,136 @@ cn10k_cpt_enqueue_burst(void *qptr, struct
> rte_crypto_op **ops, uint16_t nb_ops)
>  	return count + i;
>  }
> 
> -uint16_t
> -cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op
> *op)
> +static int
> +cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev
> __rte_unused,
> +				      void *sess,
> +				      enum rte_crypto_op_type op_type,
> +				      enum rte_crypto_op_sess_type sess_type,
> +				      void *mdata)
>  {
> -	union rte_event_crypto_metadata *ec_mdata;
> -	struct cpt_inflight_req *infl_req;
> +	union rte_event_crypto_metadata *ec_mdata = mdata;
>  	struct rte_event *rsp_info;
> -	uint64_t lmt_base, lmt_arg;
> -	struct cpt_inst_s *inst;
>  	struct cnxk_cpt_qp *qp;
>  	uint8_t cdev_id;
> -	uint16_t lmt_id;
> -	uint16_t qp_id;
> -	int ret;
> -
> -	ec_mdata = cnxk_event_crypto_mdata_get(op);
> -	if (!ec_mdata) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> +	int16_t qp_id;
> +	uint64_t w2;
> 
> +	/* Get queue pair */
>  	cdev_id = ec_mdata->request_info.cdev_id;
>  	qp_id = ec_mdata->request_info.queue_pair_id;
>  	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
> +
> +	/* Prepare w2 */
>  	rsp_info = &ec_mdata->response_info;
> +	w2 = CNXK_CPT_INST_W2(
> +		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
> +		rsp_info->sched_type, rsp_info->queue_id, 0);
> +
> +	/* Set meta according to session type */
> +	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
> +		if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> +			struct cn10k_sec_session *priv;
> +			struct cn10k_ipsec_sa *sa;
> +
> +			priv = get_sec_session_private_data(sess);
> +			sa = &priv->sa;
> +			sa->qp = qp;
> +			sa->inst.w2 = w2;
> +		} else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct cnxk_se_sess *priv;
> +
> +			priv = get_sym_session_private_data(
> +				sess, cn10k_cryptodev_driver_id);
> +			priv->qp = qp;
> +			priv->cpt_inst_w2 = w2;
> +		} else
> +			return -EINVAL;
> +	} else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
> +		if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct rte_cryptodev_asym_session *asym_sess = sess;
> +			struct cnxk_ae_sess *priv;
> +
> +			priv = (struct cnxk_ae_sess *)asym_sess-
> >sess_private_data;
> +			priv->qp = qp;
> +			priv->cpt_inst_w2 = w2;
> +		} else
> +			return -EINVAL;
> +	} else
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static inline int
> +cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
> +			 struct cnxk_cpt_qp **qp, uint64_t *w2) {
> +	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
> +		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> +			struct cn10k_sec_session *priv;
> +			struct cn10k_ipsec_sa *sa;
> +
> +			priv = get_sec_session_private_data(op->sym-
> >sec_session);
> +			sa = &priv->sa;
> +			*qp = sa->qp;
> +			*w2 = sa->inst.w2;
> +		} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct cnxk_se_sess *priv;
> +
> +			priv = get_sym_session_private_data(
> +				op->sym->session,
> cn10k_cryptodev_driver_id);
> +			*qp = priv->qp;
> +			*w2 = priv->cpt_inst_w2;
> +		} else {
> +			union rte_event_crypto_metadata *ec_mdata;
> +			struct rte_event *rsp_info;
> +			uint8_t cdev_id;
> +			uint16_t qp_id;
> +
> +			ec_mdata = (union rte_event_crypto_metadata *)
> +				((uint8_t *)op + op->private_data_offset);
> +			if (!ec_mdata)
> +				return -EINVAL;
> +			rsp_info = &ec_mdata->response_info;
> +			cdev_id = ec_mdata->request_info.cdev_id;
> +			qp_id = ec_mdata->request_info.queue_pair_id;
> +			*qp = rte_cryptodevs[cdev_id].data-
> >queue_pairs[qp_id];
> +			*w2 = CNXK_CPT_INST_W2(
> +				(RTE_EVENT_TYPE_CRYPTODEV << 28) |
> rsp_info->flow_id,
> +				rsp_info->sched_type, rsp_info->queue_id, 0);
> +		}
> +	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
> +		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct rte_cryptodev_asym_session *asym_sess;
> +			struct cnxk_ae_sess *priv;
> +
> +			asym_sess = op->asym->session;
> +			priv = (struct cnxk_ae_sess *)asym_sess-
> >sess_private_data;
> +			*qp = priv->qp;
> +			*w2 = priv->cpt_inst_w2;
> +		} else
> +			return -EINVAL;
> +	} else
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +uint16_t
> +cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op
> +*op) {
> +	struct cpt_inflight_req *infl_req;
> +	uint64_t lmt_base, lmt_arg, w2;
> +	struct cpt_inst_s *inst;
> +	struct cnxk_cpt_qp *qp;
> +	uint16_t lmt_id;
> +	int ret;
> +
> +	ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
> +	if (unlikely(ret)) {
> +		rte_errno = EINVAL;
> +		return 0;
> +	}
> 
>  	if (unlikely(!qp->ca.enabled)) {
>  		rte_errno = EINVAL;
> @@ -316,9 +422,7 @@ cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op,
> struct rte_crypto_op *op)
>  	infl_req->qp = qp;
>  	inst->w0.u64 = 0;
>  	inst->res_addr = (uint64_t)&infl_req->res;
> -	inst->w2.u64 = CNXK_CPT_INST_W2(
> -		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
> -		rsp_info->sched_type, rsp_info->queue_id, 0);
> +	inst->w2.u64 = w2;
>  	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
> 
>  	if (roc_cpt_is_iq_full(&qp->lf)) {
> @@ -327,7 +431,7 @@ cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op,
> struct rte_crypto_op *op)
>  		return 0;
>  	}
> 
> -	if (!rsp_info->sched_type)
> +	if (inst->w2.s.tt == RTE_SCHED_TYPE_ORDERED)
>  		roc_sso_hws_head_wait(tag_op);
> 
>  	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id; @@ -592,4
> +696,6 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {
>  	.asym_session_configure = cnxk_ae_session_cfg,
>  	.asym_session_clear = cnxk_ae_session_clear,
> 
> +	/* Event crypto ops */
> +	.session_ev_mdata_set = cn10k_cpt_crypto_adapter_ev_mdata_set,
>  };
> diff --git a/drivers/crypto/cnxk/cn10k_ipsec.h
> b/drivers/crypto/cnxk/cn10k_ipsec.h
> index 647a71cdd5..1c1d904799 100644
> --- a/drivers/crypto/cnxk/cn10k_ipsec.h
> +++ b/drivers/crypto/cnxk/cn10k_ipsec.h
> @@ -20,6 +20,8 @@ struct cn10k_ipsec_sa {
>  	uint16_t iv_offset;
>  	uint8_t iv_length;
>  	bool is_outbound;
> +	/** Queue pair */
> +	struct cnxk_cpt_qp *qp;
> 
>  	/**
>  	 * End of SW mutable area
> diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
> b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
> index d3d441cb24..98fa97ef01 100644
> --- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
> +++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
> @@ -316,28 +316,134 @@ cn9k_cpt_enqueue_burst(void *qptr, struct
> rte_crypto_op **ops, uint16_t nb_ops)
>  	return count;
>  }
> 
> -uint16_t
> -cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op
> *op)
> +static int
> +cn9k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev
> __rte_unused,
> +				     void *sess,
> +				     enum rte_crypto_op_type op_type,
> +				     enum rte_crypto_op_sess_type sess_type,
> +				     void *mdata)
>  {
> -	union rte_event_crypto_metadata *ec_mdata;
> -	struct cpt_inflight_req *infl_req;
> +	union rte_event_crypto_metadata *ec_mdata = mdata;
>  	struct rte_event *rsp_info;
>  	struct cnxk_cpt_qp *qp;
> -	struct cpt_inst_s inst;
>  	uint8_t cdev_id;
>  	uint16_t qp_id;
> -	int ret;
> -
> -	ec_mdata = cnxk_event_crypto_mdata_get(op);
> -	if (!ec_mdata) {
> -		rte_errno = EINVAL;
> -		return 0;
> -	}
> +	uint64_t w2;
> 
> +	/* Get queue pair */
>  	cdev_id = ec_mdata->request_info.cdev_id;
>  	qp_id = ec_mdata->request_info.queue_pair_id;
>  	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
> +
> +	/* Prepare w2 */
>  	rsp_info = &ec_mdata->response_info;
> +	w2 = CNXK_CPT_INST_W2(
> +		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
> +		rsp_info->sched_type, rsp_info->queue_id, 0);
> +
> +	/* Set meta according to session type */
> +	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
> +		if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> +			struct cn9k_sec_session *priv;
> +			struct cn9k_ipsec_sa *sa;
> +
> +			priv = get_sec_session_private_data(sess);
> +			sa = &priv->sa;
> +			sa->qp = qp;
> +			sa->inst.w2 = w2;
> +		} else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct cnxk_se_sess *priv;
> +
> +			priv = get_sym_session_private_data(
> +				sess, cn9k_cryptodev_driver_id);
> +			priv->qp = qp;
> +			priv->cpt_inst_w2 = w2;
> +		} else
> +			return -EINVAL;
> +	} else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
> +		if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct rte_cryptodev_asym_session *asym_sess = sess;
> +			struct cnxk_ae_sess *priv;
> +
> +			priv = (struct cnxk_ae_sess *)asym_sess-
> >sess_private_data;
> +			priv->qp = qp;
> +			priv->cpt_inst_w2 = w2;
> +		} else
> +			return -EINVAL;
> +	} else
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static inline int
> +cn9k_ca_meta_info_extract(struct rte_crypto_op *op,
> +			struct cnxk_cpt_qp **qp, struct cpt_inst_s *inst) {
> +	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
> +		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> +			struct cn9k_sec_session *priv;
> +			struct cn9k_ipsec_sa *sa;
> +
> +			priv = get_sec_session_private_data(op->sym-
> >sec_session);
> +			sa = &priv->sa;
> +			*qp = sa->qp;
> +			inst->w2.u64 = sa->inst.w2;
> +		} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct cnxk_se_sess *priv;
> +
> +			priv = get_sym_session_private_data(
> +				op->sym->session, cn9k_cryptodev_driver_id);
> +			*qp = priv->qp;
> +			inst->w2.u64 = priv->cpt_inst_w2;
> +		} else {
> +			union rte_event_crypto_metadata *ec_mdata;
> +			struct rte_event *rsp_info;
> +			uint8_t cdev_id;
> +			uint16_t qp_id;
> +
> +			ec_mdata = (union rte_event_crypto_metadata *)
> +				((uint8_t *)op + op->private_data_offset);
> +			if (!ec_mdata)
> +				return -EINVAL;
> +			rsp_info = &ec_mdata->response_info;
> +			cdev_id = ec_mdata->request_info.cdev_id;
> +			qp_id = ec_mdata->request_info.queue_pair_id;
> +			*qp = rte_cryptodevs[cdev_id].data-
> >queue_pairs[qp_id];
> +			inst->w2.u64 = CNXK_CPT_INST_W2(
> +				(RTE_EVENT_TYPE_CRYPTODEV << 28) |
> rsp_info->flow_id,
> +				rsp_info->sched_type, rsp_info->queue_id, 0);
> +		}
> +	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
> +		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct rte_cryptodev_asym_session *asym_sess;
> +			struct cnxk_ae_sess *priv;
> +
> +			asym_sess = op->asym->session;
> +			priv = (struct cnxk_ae_sess *)asym_sess-
> >sess_private_data;
> +			*qp = priv->qp;
> +			inst->w2.u64 = priv->cpt_inst_w2;
> +		} else
> +			return -EINVAL;
> +	} else
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +uint16_t
> +cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op
> +*op) {
> +	struct cpt_inflight_req *infl_req;
> +	struct cnxk_cpt_qp *qp;
> +	struct cpt_inst_s inst;
> +	int ret;
> +
> +	ret = cn9k_ca_meta_info_extract(op, &qp, &inst);
> +	if (unlikely(ret)) {
> +		rte_errno = EINVAL;
> +		return 0;
> +	}
> 
>  	if (unlikely(!qp->ca.enabled)) {
>  		rte_errno = EINVAL;
> @@ -362,9 +468,6 @@ cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op,
> struct rte_crypto_op *op)
>  	infl_req->qp = qp;
>  	inst.w0.u64 = 0;
>  	inst.res_addr = (uint64_t)&infl_req->res;
> -	inst.w2.u64 = CNXK_CPT_INST_W2(
> -		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
> -		rsp_info->sched_type, rsp_info->queue_id, 0);
>  	inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
> 
>  	if (roc_cpt_is_iq_full(&qp->lf)) {
> @@ -373,7 +476,7 @@ cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op,
> struct rte_crypto_op *op)
>  		return 0;
>  	}
> 
> -	if (!rsp_info->sched_type)
> +	if (inst.w2.s.tt == RTE_SCHED_TYPE_ORDERED)
>  		roc_sso_hws_head_wait(tag_op);
> 
>  	cn9k_cpt_inst_submit(&inst, qp->lmtline.lmt_base, qp-
> >lmtline.io_addr); @@ -613,4 +716,7 @@ struct rte_cryptodev_ops
> cn9k_cpt_ops = {
>  	.asym_session_configure = cnxk_ae_session_cfg,
>  	.asym_session_clear = cnxk_ae_session_clear,
> 
> +	/* Event crypto ops */
> +	.session_ev_mdata_set = cn9k_cpt_crypto_adapter_ev_mdata_set,
> +
>  };
> diff --git a/drivers/crypto/cnxk/cn9k_ipsec.h
> b/drivers/crypto/cnxk/cn9k_ipsec.h
> index f3acad561b..499dbc2782 100644
> --- a/drivers/crypto/cnxk/cn9k_ipsec.h
> +++ b/drivers/crypto/cnxk/cn9k_ipsec.h
> @@ -42,6 +42,8 @@ struct cn9k_ipsec_sa {
>  	struct cnxk_on_ipsec_ar ar;
>  	/** Anti replay window size */
>  	uint32_t replay_win_sz;
> +	/** Queue pair */
> +	struct cnxk_cpt_qp *qp;
>  };
> 
>  struct cn9k_sec_session {
> diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h
> index 01ccfcd334..10854c79c8 100644
> --- a/drivers/crypto/cnxk/cnxk_ae.h
> +++ b/drivers/crypto/cnxk/cnxk_ae.h
> @@ -22,6 +22,8 @@ struct cnxk_ae_sess {
>  	uint64_t *cnxk_fpm_iova;
>  	struct roc_ae_ec_group **ec_grp;
>  	uint64_t cpt_inst_w7;
> +	uint64_t cpt_inst_w2;
> +	struct cnxk_cpt_qp *qp;
>  };
> 
>  static __rte_always_inline void
> diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
> b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
> index ab0f00ee7c..7ece0214dc 100644
> --- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
> +++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
> @@ -125,24 +125,6 @@ int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
>  			struct rte_cryptodev_asym_session *sess);  void
> cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp);
> 
> -static inline union rte_event_crypto_metadata * -
> cnxk_event_crypto_mdata_get(struct rte_crypto_op *op) -{
> -	union rte_event_crypto_metadata *ec_mdata;
> -
> -	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> -		ec_mdata = rte_cryptodev_sym_session_get_user_data(
> -			op->sym->session);
> -	else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
> -		 op->private_data_offset)
> -		ec_mdata = (union rte_event_crypto_metadata
> -				    *)((uint8_t *)op + op->private_data_offset);
> -	else
> -		return NULL;
> -
> -	return ec_mdata;
> -}
> -
>  static __rte_always_inline void
>  pending_queue_advance(uint64_t *index, const uint64_t mask)  { diff --git
> a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h index
> ce7ca2eda9..a339b80a87 100644
> --- a/drivers/crypto/cnxk/cnxk_se.h
> +++ b/drivers/crypto/cnxk/cnxk_se.h
> @@ -33,6 +33,8 @@ struct cnxk_se_sess {
>  	uint16_t auth_iv_offset;
>  	uint32_t salt;
>  	uint64_t cpt_inst_w7;
> +	uint64_t cpt_inst_w2;
> +	struct cnxk_cpt_qp *qp;
>  	struct roc_se_ctx roc_se_ctx;
>  } __rte_cache_aligned;
> 
> --
> 2.25.1
diff mbox series

Patch

diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index c4d5d039ec..01aa0d6870 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -264,30 +264,136 @@  cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 	return count + i;
 }
 
-uint16_t
-cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
+static int
+cn10k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
+				      void *sess,
+				      enum rte_crypto_op_type op_type,
+				      enum rte_crypto_op_sess_type sess_type,
+				      void *mdata)
 {
-	union rte_event_crypto_metadata *ec_mdata;
-	struct cpt_inflight_req *infl_req;
+	union rte_event_crypto_metadata *ec_mdata = mdata;
 	struct rte_event *rsp_info;
-	uint64_t lmt_base, lmt_arg;
-	struct cpt_inst_s *inst;
 	struct cnxk_cpt_qp *qp;
 	uint8_t cdev_id;
-	uint16_t lmt_id;
-	uint16_t qp_id;
-	int ret;
-
-	ec_mdata = cnxk_event_crypto_mdata_get(op);
-	if (!ec_mdata) {
-		rte_errno = EINVAL;
-		return 0;
-	}
+	int16_t qp_id;
+	uint64_t w2;
 
+	/* Get queue pair */
 	cdev_id = ec_mdata->request_info.cdev_id;
 	qp_id = ec_mdata->request_info.queue_pair_id;
 	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+
+	/* Prepare w2 */
 	rsp_info = &ec_mdata->response_info;
+	w2 = CNXK_CPT_INST_W2(
+		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
+		rsp_info->sched_type, rsp_info->queue_id, 0);
+
+	/* Set meta according to session type */
+	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+		if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			struct cn10k_sec_session *priv;
+			struct cn10k_ipsec_sa *sa;
+
+			priv = get_sec_session_private_data(sess);
+			sa = &priv->sa;
+			sa->qp = qp;
+			sa->inst.w2 = w2;
+		} else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct cnxk_se_sess *priv;
+
+			priv = get_sym_session_private_data(
+				sess, cn10k_cryptodev_driver_id);
+			priv->qp = qp;
+			priv->cpt_inst_w2 = w2;
+		} else
+			return -EINVAL;
+	} else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+		if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct rte_cryptodev_asym_session *asym_sess = sess;
+			struct cnxk_ae_sess *priv;
+
+			priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
+			priv->qp = qp;
+			priv->cpt_inst_w2 = w2;
+		} else
+			return -EINVAL;
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static inline int
+cn10k_ca_meta_info_extract(struct rte_crypto_op *op,
+			 struct cnxk_cpt_qp **qp, uint64_t *w2)
+{
+	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			struct cn10k_sec_session *priv;
+			struct cn10k_ipsec_sa *sa;
+
+			priv = get_sec_session_private_data(op->sym->sec_session);
+			sa = &priv->sa;
+			*qp = sa->qp;
+			*w2 = sa->inst.w2;
+		} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct cnxk_se_sess *priv;
+
+			priv = get_sym_session_private_data(
+				op->sym->session, cn10k_cryptodev_driver_id);
+			*qp = priv->qp;
+			*w2 = priv->cpt_inst_w2;
+		} else {
+			union rte_event_crypto_metadata *ec_mdata;
+			struct rte_event *rsp_info;
+			uint8_t cdev_id;
+			uint16_t qp_id;
+
+			ec_mdata = (union rte_event_crypto_metadata *)
+				((uint8_t *)op + op->private_data_offset);
+			if (!ec_mdata)
+				return -EINVAL;
+			rsp_info = &ec_mdata->response_info;
+			cdev_id = ec_mdata->request_info.cdev_id;
+			qp_id = ec_mdata->request_info.queue_pair_id;
+			*qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+			*w2 = CNXK_CPT_INST_W2(
+				(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
+				rsp_info->sched_type, rsp_info->queue_id, 0);
+		}
+	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct rte_cryptodev_asym_session *asym_sess;
+			struct cnxk_ae_sess *priv;
+
+			asym_sess = op->asym->session;
+			priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
+			*qp = priv->qp;
+			*w2 = priv->cpt_inst_w2;
+		} else
+			return -EINVAL;
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+uint16_t
+cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
+{
+	struct cpt_inflight_req *infl_req;
+	uint64_t lmt_base, lmt_arg, w2;
+	struct cpt_inst_s *inst;
+	struct cnxk_cpt_qp *qp;
+	uint16_t lmt_id;
+	int ret;
+
+	ret = cn10k_ca_meta_info_extract(op, &qp, &w2);
+	if (unlikely(ret)) {
+		rte_errno = EINVAL;
+		return 0;
+	}
 
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
@@ -316,9 +422,7 @@  cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
 	infl_req->qp = qp;
 	inst->w0.u64 = 0;
 	inst->res_addr = (uint64_t)&infl_req->res;
-	inst->w2.u64 = CNXK_CPT_INST_W2(
-		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
-		rsp_info->sched_type, rsp_info->queue_id, 0);
+	inst->w2.u64 = w2;
 	inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 
 	if (roc_cpt_is_iq_full(&qp->lf)) {
@@ -327,7 +431,7 @@  cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
 		return 0;
 	}
 
-	if (!rsp_info->sched_type)
+	if (inst->w2.s.tt == RTE_SCHED_TYPE_ORDERED)
 		roc_sso_hws_head_wait(tag_op);
 
 	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
@@ -592,4 +696,6 @@  struct rte_cryptodev_ops cn10k_cpt_ops = {
 	.asym_session_configure = cnxk_ae_session_cfg,
 	.asym_session_clear = cnxk_ae_session_clear,
 
+	/* Event crypto ops */
+	.session_ev_mdata_set = cn10k_cpt_crypto_adapter_ev_mdata_set,
 };
diff --git a/drivers/crypto/cnxk/cn10k_ipsec.h b/drivers/crypto/cnxk/cn10k_ipsec.h
index 647a71cdd5..1c1d904799 100644
--- a/drivers/crypto/cnxk/cn10k_ipsec.h
+++ b/drivers/crypto/cnxk/cn10k_ipsec.h
@@ -20,6 +20,8 @@  struct cn10k_ipsec_sa {
 	uint16_t iv_offset;
 	uint8_t iv_length;
 	bool is_outbound;
+	/** Queue pair */
+	struct cnxk_cpt_qp *qp;
 
 	/**
 	 * End of SW mutable area
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index d3d441cb24..98fa97ef01 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -316,28 +316,134 @@  cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 	return count;
 }
 
-uint16_t
-cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
+static int
+cn9k_cpt_crypto_adapter_ev_mdata_set(struct rte_cryptodev *dev __rte_unused,
+				     void *sess,
+				     enum rte_crypto_op_type op_type,
+				     enum rte_crypto_op_sess_type sess_type,
+				     void *mdata)
 {
-	union rte_event_crypto_metadata *ec_mdata;
-	struct cpt_inflight_req *infl_req;
+	union rte_event_crypto_metadata *ec_mdata = mdata;
 	struct rte_event *rsp_info;
 	struct cnxk_cpt_qp *qp;
-	struct cpt_inst_s inst;
 	uint8_t cdev_id;
 	uint16_t qp_id;
-	int ret;
-
-	ec_mdata = cnxk_event_crypto_mdata_get(op);
-	if (!ec_mdata) {
-		rte_errno = EINVAL;
-		return 0;
-	}
+	uint64_t w2;
 
+	/* Get queue pair */
 	cdev_id = ec_mdata->request_info.cdev_id;
 	qp_id = ec_mdata->request_info.queue_pair_id;
 	qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+
+	/* Prepare w2 */
 	rsp_info = &ec_mdata->response_info;
+	w2 = CNXK_CPT_INST_W2(
+		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
+		rsp_info->sched_type, rsp_info->queue_id, 0);
+
+	/* Set meta according to session type */
+	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+		if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			struct cn9k_sec_session *priv;
+			struct cn9k_ipsec_sa *sa;
+
+			priv = get_sec_session_private_data(sess);
+			sa = &priv->sa;
+			sa->qp = qp;
+			sa->inst.w2 = w2;
+		} else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct cnxk_se_sess *priv;
+
+			priv = get_sym_session_private_data(
+				sess, cn9k_cryptodev_driver_id);
+			priv->qp = qp;
+			priv->cpt_inst_w2 = w2;
+		} else
+			return -EINVAL;
+	} else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+		if (sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct rte_cryptodev_asym_session *asym_sess = sess;
+			struct cnxk_ae_sess *priv;
+
+			priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
+			priv->qp = qp;
+			priv->cpt_inst_w2 = w2;
+		} else
+			return -EINVAL;
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static inline int
+cn9k_ca_meta_info_extract(struct rte_crypto_op *op,
+			struct cnxk_cpt_qp **qp, struct cpt_inst_s *inst)
+{
+	if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			struct cn9k_sec_session *priv;
+			struct cn9k_ipsec_sa *sa;
+
+			priv = get_sec_session_private_data(op->sym->sec_session);
+			sa = &priv->sa;
+			*qp = sa->qp;
+			inst->w2.u64 = sa->inst.w2;
+		} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct cnxk_se_sess *priv;
+
+			priv = get_sym_session_private_data(
+				op->sym->session, cn9k_cryptodev_driver_id);
+			*qp = priv->qp;
+			inst->w2.u64 = priv->cpt_inst_w2;
+		} else {
+			union rte_event_crypto_metadata *ec_mdata;
+			struct rte_event *rsp_info;
+			uint8_t cdev_id;
+			uint16_t qp_id;
+
+			ec_mdata = (union rte_event_crypto_metadata *)
+				((uint8_t *)op + op->private_data_offset);
+			if (!ec_mdata)
+				return -EINVAL;
+			rsp_info = &ec_mdata->response_info;
+			cdev_id = ec_mdata->request_info.cdev_id;
+			qp_id = ec_mdata->request_info.queue_pair_id;
+			*qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
+			inst->w2.u64 = CNXK_CPT_INST_W2(
+				(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
+				rsp_info->sched_type, rsp_info->queue_id, 0);
+		}
+	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+		if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct rte_cryptodev_asym_session *asym_sess;
+			struct cnxk_ae_sess *priv;
+
+			asym_sess = op->asym->session;
+			priv = (struct cnxk_ae_sess *)asym_sess->sess_private_data;
+			*qp = priv->qp;
+			inst->w2.u64 = priv->cpt_inst_w2;
+		} else
+			return -EINVAL;
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+uint16_t
+cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
+{
+	struct cpt_inflight_req *infl_req;
+	struct cnxk_cpt_qp *qp;
+	struct cpt_inst_s inst;
+	int ret;
+
+	ret = cn9k_ca_meta_info_extract(op, &qp, &inst);
+	if (unlikely(ret)) {
+		rte_errno = EINVAL;
+		return 0;
+	}
 
 	if (unlikely(!qp->ca.enabled)) {
 		rte_errno = EINVAL;
@@ -362,9 +468,6 @@  cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
 	infl_req->qp = qp;
 	inst.w0.u64 = 0;
 	inst.res_addr = (uint64_t)&infl_req->res;
-	inst.w2.u64 = CNXK_CPT_INST_W2(
-		(RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
-		rsp_info->sched_type, rsp_info->queue_id, 0);
 	inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 
 	if (roc_cpt_is_iq_full(&qp->lf)) {
@@ -373,7 +476,7 @@  cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
 		return 0;
 	}
 
-	if (!rsp_info->sched_type)
+	if (inst.w2.s.tt == RTE_SCHED_TYPE_ORDERED)
 		roc_sso_hws_head_wait(tag_op);
 
 	cn9k_cpt_inst_submit(&inst, qp->lmtline.lmt_base, qp->lmtline.io_addr);
@@ -613,4 +716,7 @@  struct rte_cryptodev_ops cn9k_cpt_ops = {
 	.asym_session_configure = cnxk_ae_session_cfg,
 	.asym_session_clear = cnxk_ae_session_clear,
 
+	/* Event crypto ops */
+	.session_ev_mdata_set = cn9k_cpt_crypto_adapter_ev_mdata_set,
+
 };
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.h b/drivers/crypto/cnxk/cn9k_ipsec.h
index f3acad561b..499dbc2782 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.h
+++ b/drivers/crypto/cnxk/cn9k_ipsec.h
@@ -42,6 +42,8 @@  struct cn9k_ipsec_sa {
 	struct cnxk_on_ipsec_ar ar;
 	/** Anti replay window size */
 	uint32_t replay_win_sz;
+	/** Queue pair */
+	struct cnxk_cpt_qp *qp;
 };
 
 struct cn9k_sec_session {
diff --git a/drivers/crypto/cnxk/cnxk_ae.h b/drivers/crypto/cnxk/cnxk_ae.h
index 01ccfcd334..10854c79c8 100644
--- a/drivers/crypto/cnxk/cnxk_ae.h
+++ b/drivers/crypto/cnxk/cnxk_ae.h
@@ -22,6 +22,8 @@  struct cnxk_ae_sess {
 	uint64_t *cnxk_fpm_iova;
 	struct roc_ae_ec_group **ec_grp;
 	uint64_t cpt_inst_w7;
+	uint64_t cpt_inst_w2;
+	struct cnxk_cpt_qp *qp;
 };
 
 static __rte_always_inline void
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index ab0f00ee7c..7ece0214dc 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -125,24 +125,6 @@  int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
 			struct rte_cryptodev_asym_session *sess);
 void cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp);
 
-static inline union rte_event_crypto_metadata *
-cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
-{
-	union rte_event_crypto_metadata *ec_mdata;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-		ec_mdata = rte_cryptodev_sym_session_get_user_data(
-			op->sym->session);
-	else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
-		 op->private_data_offset)
-		ec_mdata = (union rte_event_crypto_metadata
-				    *)((uint8_t *)op + op->private_data_offset);
-	else
-		return NULL;
-
-	return ec_mdata;
-}
-
 static __rte_always_inline void
 pending_queue_advance(uint64_t *index, const uint64_t mask)
 {
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index ce7ca2eda9..a339b80a87 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -33,6 +33,8 @@  struct cnxk_se_sess {
 	uint16_t auth_iv_offset;
 	uint32_t salt;
 	uint64_t cpt_inst_w7;
+	uint64_t cpt_inst_w2;
+	struct cnxk_cpt_qp *qp;
 	struct roc_se_ctx roc_se_ctx;
 } __rte_cache_aligned;