[v2,3/5] crypto/ipsec_mb: add remaining SGL support

Message ID 20220825142901.898007-4-ciara.power@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series add remaining SGL support to AESNI_MB |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Power, Ciara Aug. 25, 2022, 2:28 p.m. UTC
  The intel-ipsec-mb library supports SGL for GCM and ChaChaPoly
algorithms using the JOB API.
This support was added to AESNI_MB PMD previously, but the SGL feature
flags could not be added due to no SGL support for other algorithms.

This patch adds a workaround SGL approach for other algorithms
using the JOB API. The segmented input buffers are copied into a
linear buffer, which is passed as a single job to intel-ipsec-mb.
The job is processed, and on return, the linear buffer is split into the
original destination segments.

Existing AESNI_MB testcases are passing with these feature flags added.

Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v2:
  - Small improvements when copying segments to linear buffer.
  - Added documentation changes.
---
 doc/guides/cryptodevs/aesni_mb.rst          |   1 -
 doc/guides/cryptodevs/features/aesni_mb.ini |   4 +
 doc/guides/rel_notes/release_22_11.rst      |   4 +
 drivers/crypto/ipsec_mb/pmd_aesni_mb.c      | 191 ++++++++++++++++----
 4 files changed, 166 insertions(+), 34 deletions(-)
  

Comments

De Lara Guarch, Pablo Sept. 15, 2022, 11:47 a.m. UTC | #1
Hi Ciara,

> -----Original Message-----
> From: Power, Ciara <ciara.power@intel.com>
> Sent: Thursday, August 25, 2022 3:29 PM
> To: Zhang, Roy Fan <roy.fan.zhang@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: dev@dpdk.org; Ji, Kai <kai.ji@intel.com>; Power, Ciara
> <ciara.power@intel.com>
> Subject: [PATCH v2 3/5] crypto/ipsec_mb: add remaining SGL support
> 
> The intel-ipsec-mb library supports SGL for GCM and ChaChaPoly algorithms
> using the JOB API.
> This support was added to AESNI_MB PMD previously, but the SGL feature
> flags could not be added due to no SGL support for other algorithms.
> 
> This patch adds a workaround SGL approach for other algorithms using the
> JOB API. The segmented input buffers are copied into a linear buffer, which is
> passed as a single job to intel-ipsec-mb.
> The job is processed, and on return, the linear buffer is split into the original
> destination segments.
> 
> Existing AESNI_MB testcases are passing with these feature flags added.
> 
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> 
> ---
> v2:
>   - Small improvements when copying segments to linear buffer.
>   - Added documentation changes.
> ---
>  doc/guides/cryptodevs/aesni_mb.rst          |   1 -
>  doc/guides/cryptodevs/features/aesni_mb.ini |   4 +
>  doc/guides/rel_notes/release_22_11.rst      |   4 +
>  drivers/crypto/ipsec_mb/pmd_aesni_mb.c      | 191 ++++++++++++++++----
>  4 files changed, 166 insertions(+), 34 deletions(-)
> 

...

> +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c

...

> 
> +static int
> +handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t
> dst_offset,
> +		struct aesni_mb_session *session)
> +{
> +	uint64_t cipher_len, auth_len;
> +	uint8_t *src, *linear_buf = NULL;
> +	int total_len;
> +	int lb_offset = 0;

Suggest using unsigned here (probably uint64_t for total_len, as it gets the value from max between cipher_len and auth_len).

> +	struct rte_mbuf *src_seg;
> +	uint16_t src_len;
> +
> +	if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
> +			job->cipher_mode ==
> IMB_CIPHER_KASUMI_UEA1_BITLEN)
> +		cipher_len = (job->msg_len_to_cipher_in_bits >> 3) +
> +				(job->cipher_start_src_offset_in_bits >> 3);
> +	else
> +		cipher_len = job->msg_len_to_cipher_in_bytes +
> +				job->cipher_start_src_offset_in_bytes;
> +
> +	if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
> +			job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN)
> +		auth_len = (job->msg_len_to_hash_in_bits >> 3) +
> +				job->hash_start_src_offset_in_bytes;
> +	else if (job->hash_alg == IMB_AUTH_AES_GMAC)
> +		auth_len = job->u.GCM.aad_len_in_bytes;
> +	else
> +		auth_len = job->msg_len_to_hash_in_bytes +
> +				job->hash_start_src_offset_in_bytes;
> +
> +	total_len = RTE_MAX(auth_len, cipher_len);
> +	linear_buf = rte_zmalloc(NULL, total_len + job-
> >auth_tag_output_len_in_bytes, 0);
> +	if (linear_buf == NULL) {
> +		IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear
> Buffer\n");
> +		return -1;
> +	}
> +

..

> +static void
> +post_process_sgl_linear(struct rte_crypto_op *op, IMB_JOB *job,
> +		struct aesni_mb_session *sess, uint8_t *linear_buf) {
> +
> +	int lb_offset = 0;
> +	struct rte_mbuf *m_dst = op->sym->m_dst == NULL ?
> +			op->sym->m_src : op->sym->m_dst;
> +	uint16_t total_len, dst_len;
> +	uint64_t cipher_len, auth_len;
> +	uint8_t *dst;
> +
> +	if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
> +			job->cipher_mode ==
> IMB_CIPHER_KASUMI_UEA1_BITLEN)
> +		cipher_len = (job->msg_len_to_cipher_in_bits >> 3) +
> +				(job->cipher_start_src_offset_in_bits >> 3);
> +	else
> +		cipher_len = job->msg_len_to_cipher_in_bytes +
> +				job->cipher_start_src_offset_in_bytes;
> +
> +	if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
> +			job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN)
> +		auth_len = (job->msg_len_to_hash_in_bits >> 3) +
> +				job->hash_start_src_offset_in_bytes;
> +	else if (job->hash_alg == IMB_AUTH_AES_GMAC)
> +		auth_len = job->u.GCM.aad_len_in_bytes;
> +	else
> +		auth_len = job->msg_len_to_hash_in_bytes +
> +				job->hash_start_src_offset_in_bytes;
> +

This code above is the same as the code in handle_sgl_linear.
Maybe you can have a separate function and remove duplication.
  

Patch

diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 07222ee117..59c134556f 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -72,7 +72,6 @@  Protocol offloads:
 Limitations
 -----------
 
-* Chained mbufs are not supported.
 * Out-of-place is not supported for combined Crypto-CRC DOCSIS security
   protocol.
 * RTE_CRYPTO_CIPHER_DES_DOCSISBPI is not supported for combined Crypto-CRC
diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini
index 3c648a391e..e4e965c35a 100644
--- a/doc/guides/cryptodevs/features/aesni_mb.ini
+++ b/doc/guides/cryptodevs/features/aesni_mb.ini
@@ -12,6 +12,10 @@  CPU AVX                = Y
 CPU AVX2               = Y
 CPU AVX512             = Y
 CPU AESNI              = Y
+In Place SGL           = Y
+OOP SGL In SGL Out     = Y
+OOP SGL In LB  Out     = Y
+OOP LB  In SGL Out     = Y
 OOP LB  In LB  Out     = Y
 CPU crypto             = Y
 Symmetric sessionless  = Y
diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 8c021cf050..6416f0a4e1 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -55,6 +55,10 @@  New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added SGL support to AESNI_MB PMD.**
+
+  Added support for SGL to AESNI_MB PMD. Support for inplace,
+  OOP SGL in SGL out, OOP LB in SGL out, and OOP SGL in LB out added.
 
 Removed Items
 -------------
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 944fce0261..800a9ae72c 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -937,7 +937,7 @@  static inline uint64_t
 auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
 		uint32_t oop, const uint32_t auth_offset,
 		const uint32_t cipher_offset, const uint32_t auth_length,
-		const uint32_t cipher_length)
+		const uint32_t cipher_length, uint8_t lb_sgl)
 {
 	struct rte_mbuf *m_src, *m_dst;
 	uint8_t *p_src, *p_dst;
@@ -945,7 +945,7 @@  auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
 	uint32_t cipher_end, auth_end;
 
 	/* Only cipher then hash needs special calculation. */
-	if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH)
+	if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl)
 		return auth_offset;
 
 	m_src = op->sym->m_src;
@@ -1159,6 +1159,74 @@  handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr,
 	return 0;
 }
 
+static int
+handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset,
+		struct aesni_mb_session *session)
+{
+	uint64_t cipher_len, auth_len;
+	uint8_t *src, *linear_buf = NULL;
+	int total_len;
+	int lb_offset = 0;
+	struct rte_mbuf *src_seg;
+	uint16_t src_len;
+
+	if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
+			job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN)
+		cipher_len = (job->msg_len_to_cipher_in_bits >> 3) +
+				(job->cipher_start_src_offset_in_bits >> 3);
+	else
+		cipher_len = job->msg_len_to_cipher_in_bytes +
+				job->cipher_start_src_offset_in_bytes;
+
+	if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
+			job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN)
+		auth_len = (job->msg_len_to_hash_in_bits >> 3) +
+				job->hash_start_src_offset_in_bytes;
+	else if (job->hash_alg == IMB_AUTH_AES_GMAC)
+		auth_len = job->u.GCM.aad_len_in_bytes;
+	else
+		auth_len = job->msg_len_to_hash_in_bytes +
+				job->hash_start_src_offset_in_bytes;
+
+	total_len = RTE_MAX(auth_len, cipher_len);
+	linear_buf = rte_zmalloc(NULL, total_len + job->auth_tag_output_len_in_bytes, 0);
+	if (linear_buf == NULL) {
+		IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer\n");
+		return -1;
+	}
+
+	for (src_seg = op->sym->m_src; (src_seg != NULL) &&
+			(total_len - lb_offset > 0);
+			src_seg = src_seg->next) {
+		src = rte_pktmbuf_mtod(src_seg, uint8_t *);
+		src_len =  RTE_MIN(src_seg->data_len, total_len - lb_offset);
+		rte_memcpy(linear_buf + lb_offset, src, src_len);
+		lb_offset += src_len;
+	}
+
+	job->src = linear_buf;
+	job->dst = linear_buf + dst_offset;
+	job->user_data2 = linear_buf;
+
+	if (job->hash_alg == IMB_AUTH_AES_GMAC)
+		job->u.GCM.aad = linear_buf;
+
+	if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+		job->auth_tag_output = linear_buf + lb_offset;
+	else
+		job->auth_tag_output = linear_buf + auth_len;
+
+	return 0;
+}
+
+static inline int
+imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg)
+{
+	if (alg == IMB_CIPHER_CHACHA20_POLY1305
+			|| alg == IMB_CIPHER_GCM)
+		return 1;
+	return 0;
+}
 
 /**
  * Process a crypto operation and complete a IMB_JOB job structure for
@@ -1171,7 +1239,8 @@  handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr,
  *
  * @return
  * - 0 on success, the IMB_JOB will be filled
- * - -1 if invalid session, IMB_JOB will not be filled
+ * - -1 if invalid session or errors allocationg SGL linear buffer,
+ *   IMB_JOB will not be filled
  */
 static inline int
 set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
@@ -1191,6 +1260,7 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 	uint32_t total_len;
 	IMB_JOB base_job;
 	uint8_t sgl = 0;
+	uint8_t lb_sgl = 0;
 	int ret;
 
 	session = ipsec_mb_get_session_private(qp, op);
@@ -1199,18 +1269,6 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 		return -1;
 	}
 
-	if (op->sym->m_src->nb_segs > 1) {
-		if (session->cipher.mode != IMB_CIPHER_GCM
-				&& session->cipher.mode !=
-				IMB_CIPHER_CHACHA20_POLY1305) {
-			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			IPSEC_MB_LOG(ERR, "Device only supports SGL for AES-GCM"
-					" or CHACHA20_POLY1305 algorithms.");
-			return -1;
-		}
-		sgl = 1;
-	}
-
 	/* Set crypto operation */
 	job->chain_order = session->chain_order;
 
@@ -1233,6 +1291,26 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 		job->dec_keys = session->cipher.expanded_aes_keys.decode;
 	}
 
+	if (!op->sym->m_dst) {
+		/* in-place operation */
+		m_dst = m_src;
+		oop = 0;
+	} else if (op->sym->m_dst == op->sym->m_src) {
+		/* in-place operation */
+		m_dst = m_src;
+		oop = 0;
+	} else {
+		/* out-of-place operation */
+		m_dst = op->sym->m_dst;
+		oop = 1;
+	}
+
+	if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) {
+		sgl = 1;
+		if (!imb_lib_support_sgl_algo(session->cipher.mode))
+			lb_sgl = 1;
+	}
+
 	switch (job->hash_alg) {
 	case IMB_AUTH_AES_XCBC:
 		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
@@ -1331,20 +1409,6 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 		m_offset = 0;
 	}
 
-	if (!op->sym->m_dst) {
-		/* in-place operation */
-		m_dst = m_src;
-		oop = 0;
-	} else if (op->sym->m_dst == op->sym->m_src) {
-		/* in-place operation */
-		m_dst = m_src;
-		oop = 0;
-	} else {
-		/* out-of-place operation */
-		m_dst = op->sym->m_dst;
-		oop = 1;
-	}
-
 	/* Set digest output location */
 	if (job->hash_alg != IMB_AUTH_NULL &&
 			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
@@ -1435,7 +1499,7 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
 				session, oop, auth_off_in_bytes,
 				ciph_off_in_bytes, auth_len_in_bytes,
-				ciph_len_in_bytes);
+				ciph_len_in_bytes, lb_sgl);
 		job->msg_len_to_hash_in_bits = op->sym->auth.data.length;
 
 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -1452,7 +1516,7 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
 				session, oop, auth_off_in_bytes,
 				ciph_off_in_bytes, auth_len_in_bytes,
-				ciph_len_in_bytes);
+				ciph_len_in_bytes, lb_sgl);
 		job->msg_len_to_hash_in_bytes = auth_len_in_bytes;
 
 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -1464,7 +1528,7 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 				session, oop, op->sym->auth.data.offset,
 				op->sym->cipher.data.offset,
 				op->sym->auth.data.length,
-				op->sym->cipher.data.length);
+				op->sym->cipher.data.length, lb_sgl);
 		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
 
 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -1525,6 +1589,10 @@  set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
 	job->user_data = op;
 
 	if (sgl) {
+
+		if (lb_sgl)
+			return handle_sgl_linear(job, op, m_offset, session);
+
 		base_job = *job;
 		job->sgl_state = IMB_SGL_INIT;
 		job = IMB_SUBMIT_JOB(mb_mgr);
@@ -1695,6 +1763,49 @@  generate_digest(IMB_JOB *job, struct rte_crypto_op *op,
 			sess->auth.req_digest_len);
 }
 
+static void
+post_process_sgl_linear(struct rte_crypto_op *op, IMB_JOB *job,
+		struct aesni_mb_session *sess, uint8_t *linear_buf)
+{
+
+	int lb_offset = 0;
+	struct rte_mbuf *m_dst = op->sym->m_dst == NULL ?
+			op->sym->m_src : op->sym->m_dst;
+	uint16_t total_len, dst_len;
+	uint64_t cipher_len, auth_len;
+	uint8_t *dst;
+
+	if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
+			job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN)
+		cipher_len = (job->msg_len_to_cipher_in_bits >> 3) +
+				(job->cipher_start_src_offset_in_bits >> 3);
+	else
+		cipher_len = job->msg_len_to_cipher_in_bytes +
+				job->cipher_start_src_offset_in_bytes;
+
+	if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
+			job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN)
+		auth_len = (job->msg_len_to_hash_in_bits >> 3) +
+				job->hash_start_src_offset_in_bytes;
+	else if (job->hash_alg == IMB_AUTH_AES_GMAC)
+		auth_len = job->u.GCM.aad_len_in_bytes;
+	else
+		auth_len = job->msg_len_to_hash_in_bytes +
+				job->hash_start_src_offset_in_bytes;
+
+	total_len = RTE_MAX(auth_len, cipher_len);
+
+	if (sess->auth.operation != RTE_CRYPTO_AUTH_OP_VERIFY)
+		total_len += job->auth_tag_output_len_in_bytes;
+
+	for (; (m_dst != NULL) && (total_len - lb_offset > 0); m_dst = m_dst->next) {
+		dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
+		dst_len = RTE_MIN(m_dst->data_len, total_len - lb_offset);
+		rte_memcpy(dst, linear_buf + lb_offset, dst_len);
+		lb_offset += dst_len;
+	}
+}
+
 /**
  * Process a completed job and return rte_mbuf which job processed
  *
@@ -1712,6 +1823,7 @@  post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
 	struct aesni_mb_session *sess = NULL;
 	uint32_t driver_id = ipsec_mb_get_driver_id(
 						IPSEC_MB_PMD_TYPE_AESNI_MB);
+	uint8_t *linear_buf = NULL;
 
 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
 	uint8_t is_docsis_sec = 0;
@@ -1740,6 +1852,14 @@  post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
 		case IMB_STATUS_COMPLETED:
 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 
+			if ((op->sym->m_src->nb_segs > 1 ||
+					(op->sym->m_dst != NULL &&
+					op->sym->m_dst->nb_segs > 1)) &&
+					!imb_lib_support_sgl_algo(sess->cipher.mode)) {
+				linear_buf = (uint8_t *) job->user_data2;
+				post_process_sgl_linear(op, job, sess, linear_buf);
+			}
+
 			if (job->hash_alg == IMB_AUTH_NULL)
 				break;
 
@@ -1766,6 +1886,7 @@  post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
 		default:
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		}
+		rte_free(linear_buf);
 	}
 
 	/* Free session if a session-less crypto op */
@@ -2248,7 +2369,11 @@  RTE_INIT(ipsec_mb_register_aesni_mb)
 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
 			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
-			RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT;
 
 	aesni_mb_data->internals_priv_size = 0;
 	aesni_mb_data->ops = &aesni_mb_pmd_ops;