[v2,3/3] crypto/qat: handle Single Pass Crypto Requests on GEN3 QAT

Message ID 20190927154739.26404-4-adamx.dybkowski@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series QAT: handle Single Pass GCM |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Dybkowski, AdamX Sept. 27, 2019, 3:47 p.m. UTC
  This patch improves the performance of AES GCM by using
the Single Pass Crypto Request functionality when running
on GEN3 QAT. Falls back to classic chained mode on older
hardware.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 doc/guides/rel_notes/release_19_11.rst |  7 +++
 drivers/crypto/qat/qat_sym.c           | 13 +++-
 drivers/crypto/qat/qat_sym_session.c   | 86 ++++++++++++++++++++++++--
 drivers/crypto/qat/qat_sym_session.h   |  9 ++-
 4 files changed, 107 insertions(+), 8 deletions(-)
  

Comments

Fiona Trahe Oct. 3, 2019, 1:04 p.m. UTC | #1
Hi Adam,

> -----Original Message-----
> From: Dybkowski, AdamX
> Sent: Friday, September 27, 2019 4:48 PM
> To: dev@dpdk.org; Trahe, Fiona <fiona.trahe@intel.com>; Kusztal, ArkadiuszX
> <arkadiuszx.kusztal@intel.com>; akhil.goyal@nxp.com
> Cc: Dybkowski, AdamX <adamx.dybkowski@intel.com>
> Subject: [PATCH v2 3/3] crypto/qat: handle Single Pass Crypto Requests on GEN3 QAT
> 
> This patch improves the performance of AES GCM by using
> the Single Pass Crypto Request functionality when running
> on GEN3 QAT. Falls back to classic chained mode on older
> hardware.
> 
> Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
> ---
>  doc/guides/rel_notes/release_19_11.rst |  7 +++
>  drivers/crypto/qat/qat_sym.c           | 13 +++-
>  drivers/crypto/qat/qat_sym_session.c   | 86 ++++++++++++++++++++++++--
>  drivers/crypto/qat/qat_sym_session.h   |  9 ++-
>  4 files changed, 107 insertions(+), 8 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
> index 573683da4..4817b7f23 100644
> --- a/doc/guides/rel_notes/release_19_11.rst
> +++ b/doc/guides/rel_notes/release_19_11.rst
> @@ -61,6 +61,13 @@ New Features
>    Added stateful decompression support in the Intel QuickAssist Technology PMD.
>    Please note that stateful compression is not supported.
> 
> +* **Enabled Single Pass GCM acceleration on QAT GEN3.**
> +
> +  Added support for Single Pass GCM, available on QAT GEN3 only (Intel
> +  QuickAssist Technology C4xxx). It is automatically chosen instead of the
> +  classic chained mode when running on QAT GEN3, significantly improving
> +  the performance of AES GCM operations.
> +
>  Removed Items
>  -------------
> 
> diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
> index 46ef27a6d..5ff4aa1e5 100644
> --- a/drivers/crypto/qat/qat_sym.c
> +++ b/drivers/crypto/qat/qat_sym.c
> @@ -1,5 +1,5 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2015-2018 Intel Corporation
> + * Copyright(c) 2015-2019 Intel Corporation
>   */
> 
>  #include <openssl/evp.h>
> @@ -12,6 +12,7 @@
> 
>  #include "qat_sym.h"
> 
> +
>  /** Decrypt a single partial block
>   *  Depends on openssl libcrypto
>   *  Uses ECB+XOR to do CFB encryption, same result, more performant
> @@ -195,7 +196,8 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
>  	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
>  	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
>  	cipher_param = (void *)&qat_req->serv_specif_rqpars;
> -	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
> +	auth_param = (void *)((uint8_t *)cipher_param +
> +			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
> 
>  	if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
>  			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
> @@ -593,6 +595,13 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
>  		qat_req->comn_mid.dest_data_addr = dst_buf_start;
>  	}
> 
> +	/* Handle Single-Pass GCM */
> +	if (ctx->is_single_pass) {
> +		cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
> +		cipher_param->spc_auth_res_addr =
> +				op->sym->aead.digest.phys_addr;
> +	}
> +
>  #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
>  	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
>  			sizeof(struct icp_qat_fw_la_bulk_req));
> diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
> index e5167b3fa..7d0f4a69d 100644
> --- a/drivers/crypto/qat/qat_sym_session.c
> +++ b/drivers/crypto/qat/qat_sym_session.c
> @@ -450,7 +450,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
>  		break;
>  	case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
>  		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> -			ret = qat_sym_session_configure_aead(xform,
> +			ret = qat_sym_session_configure_aead(dev, xform,
>  					session);
>  			if (ret < 0)
>  				return ret;
> @@ -467,7 +467,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
>  		break;
>  	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
>  		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> -			ret = qat_sym_session_configure_aead(xform,
> +			ret = qat_sym_session_configure_aead(dev, xform,
>  					session);
>  			if (ret < 0)
>  				return ret;
> @@ -503,6 +503,72 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
>  	return 0;
>  }
> 
> +static int
> +qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals,
> +		struct qat_sym_session *session,
> +		struct rte_crypto_aead_xform *aead_xform)
> +{
> +	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
> +
> +	if (qat_dev_gen == QAT_GEN3 &&
> +			aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) {
> +		/* Use faster Single-Pass GCM */
[Fiona] Need to set min_qat_dev_gen in session here. 
Crypto sessions can be built independently of the device. Catches a very unlikely corner case.
If e.g. platform had a gen1 and gen3 device, did the session init on the gen3, then attached it to an op sent to gen1,
This min_qat_dev_gen would catch it. Same situation possible with ZUC so we added that check then.
  

Patch

diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
index 573683da4..4817b7f23 100644
--- a/doc/guides/rel_notes/release_19_11.rst
+++ b/doc/guides/rel_notes/release_19_11.rst
@@ -61,6 +61,13 @@  New Features
   Added stateful decompression support in the Intel QuickAssist Technology PMD.
   Please note that stateful compression is not supported.
 
+* **Enabled Single Pass GCM acceleration on QAT GEN3.**
+
+  Added support for Single Pass GCM, available on QAT GEN3 only (Intel
+  QuickAssist Technology C4xxx). It is automatically chosen instead of the
+  classic chained mode when running on QAT GEN3, significantly improving
+  the performance of AES GCM operations.
+
 Removed Items
 -------------
 
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 46ef27a6d..5ff4aa1e5 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -12,6 +12,7 @@ 
 
 #include "qat_sym.h"
 
+
 /** Decrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -195,7 +196,8 @@  qat_sym_build_request(void *in_op, uint8_t *out_msg,
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
 	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
 
 	if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
 			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
@@ -593,6 +595,13 @@  qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		qat_req->comn_mid.dest_data_addr = dst_buf_start;
 	}
 
+	/* Handle Single-Pass GCM */
+	if (ctx->is_single_pass) {
+		cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
+		cipher_param->spc_auth_res_addr =
+				op->sym->aead.digest.phys_addr;
+	}
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
 	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
 			sizeof(struct icp_qat_fw_la_bulk_req));
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index e5167b3fa..7d0f4a69d 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -450,7 +450,7 @@  qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		break;
 	case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-			ret = qat_sym_session_configure_aead(xform,
+			ret = qat_sym_session_configure_aead(dev, xform,
 					session);
 			if (ret < 0)
 				return ret;
@@ -467,7 +467,7 @@  qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
 		if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-			ret = qat_sym_session_configure_aead(xform,
+			ret = qat_sym_session_configure_aead(dev, xform,
 					session);
 			if (ret < 0)
 				return ret;
@@ -503,6 +503,72 @@  qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	return 0;
 }
 
+static int
+qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals,
+		struct qat_sym_session *session,
+		struct rte_crypto_aead_xform *aead_xform)
+{
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+
+	if (qat_dev_gen == QAT_GEN3 &&
+			aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) {
+		/* Use faster Single-Pass GCM */
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+				(void *) &session->fw_req.serv_specif_rqpars;
+
+		session->is_single_pass = 1;
+		session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
+		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
+		session->cipher_iv.offset = aead_xform->iv.offset;
+		session->cipher_iv.length = aead_xform->iv.length;
+		if (qat_sym_session_aead_create_cd_cipher(session,
+				aead_xform->key.data, aead_xform->key.length))
+			return -EINVAL;
+		session->aad_len = aead_xform->aad_length;
+		session->digest_length = aead_xform->digest_length;
+		if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+			session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+			session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+			ICP_QAT_FW_LA_RET_AUTH_SET(
+				session->fw_req.comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_RET_AUTH_RES);
+		} else {
+			session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+			session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+			ICP_QAT_FW_LA_CMP_AUTH_SET(
+				session->fw_req.comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_CMP_AUTH_RES);
+		}
+		ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+				session->fw_req.comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+		ICP_QAT_FW_LA_PROTO_SET(
+				session->fw_req.comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+				session->fw_req.comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		session->fw_req.comn_hdr.service_cmd_id =
+				ICP_QAT_FW_LA_CMD_CIPHER;
+		session->cd.cipher.cipher_config.val =
+				ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+					ICP_QAT_HW_CIPHER_AEAD_MODE,
+					session->qat_cipher_alg,
+					ICP_QAT_HW_CIPHER_NO_CONVERT,
+					session->qat_dir);
+		QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
+				aead_xform->digest_length,
+				QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+				QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+		session->cd.cipher.cipher_config.reserved =
+				ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
+					aead_xform->aad_length);
+		cipher_param->spc_aad_sz = aead_xform->aad_length;
+		cipher_param->spc_auth_res_sz = aead_xform->digest_length;
+	}
+	return 0;
+}
+
 int
 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 				struct rte_crypto_sym_xform *xform,
@@ -646,7 +712,8 @@  qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 }
 
 int
-qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+qat_sym_session_configure_aead(struct rte_cryptodev *dev,
+				struct rte_crypto_sym_xform *xform,
 				struct qat_sym_session *session)
 {
 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
@@ -684,6 +751,17 @@  qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
 		return -EINVAL;
 	}
 
+	session->is_single_pass = 0;
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+		/* Use faster Single-Pass GCM if possible */
+		int res = qat_sym_session_handle_single_pass(
+				dev->data->dev_private, session, aead_xform);
+		if (res < 0)
+			return res;
+		if (session->is_single_pass)
+			return 0;
+	}
+
 	if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1444,7 +1522,7 @@  int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
 	struct icp_qat_fw_la_auth_req_params *auth_param =
 		(struct icp_qat_fw_la_auth_req_params *)
 		((char *)&req_tmpl->serv_specif_rqpars +
-		sizeof(struct icp_qat_fw_la_cipher_req_params));
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
 	uint16_t state1_size = 0, state2_size = 0;
 	uint16_t hash_offset, cd_size;
 	uint32_t *aad_len = NULL;
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index ce1ca5af8..98985d686 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -25,6 +25,9 @@ 
 #define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
 #define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
 
+/* 96-bit case of IV for CCP/GCM single pass algorithm */
+#define QAT_AES_GCM_SPC_IV_SIZE 12
+
 
 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
@@ -78,6 +81,7 @@  struct qat_sym_session {
 	rte_spinlock_t lock;	/* protects this struct */
 	enum qat_device_gen min_qat_dev_gen;
 	uint8_t aes_cmac;
+	uint8_t is_single_pass;
 };
 
 int
@@ -91,7 +95,8 @@  qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private);
 
 int
-qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+qat_sym_session_configure_aead(struct rte_cryptodev *dev,
+				struct rte_crypto_sym_xform *xform,
 				struct qat_sym_session *session);
 
 int