[v6,2/3] crypto/qat: add sm2 encryption/decryption function
Checks
Commit Message
This commit adds SM2 elliptic curve based asymmetric
encryption and decryption to the Intel QuickAssist
Technology PMD.
Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
doc/guides/cryptodevs/features/qat.ini | 1 +
doc/guides/rel_notes/release_24_11.rst | 4 +
.../common/qat/qat_adf/icp_qat_fw_mmp_ids.h | 3 +
drivers/common/qat/qat_adf/qat_pke.h | 20 +++
drivers/crypto/qat/qat_asym.c | 140 +++++++++++++++++-
5 files changed, 162 insertions(+), 6 deletions(-)
Comments
On Tue, 22 Oct 2024 20:05:59 +0100
Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com> wrote:
> + uint32_t alg_bytesize = cookie->alg_bytesize;
> +
> + rte_memcpy(asym_op->sm2.c1.x.data, cookie->output_array[0], alg_bytesize);
> + rte_memcpy(asym_op->sm2.c1.y.data, cookie->output_array[1], alg_bytesize);
> + rte_memcpy(asym_op->sm2.kp.x.data, cookie->output_array[2], alg_bytesize);
> + rte_memcpy(asym_op->sm2.kp.y.data, cookie->output_array[3], alg_bytesize);
Since the copy is small and not in the fast path, there is no reason to use rte_memcpy().
The memcpy() function is as fast inlines and has more checking from gcc, coverity, ASAN
so it is preferred.
> This commit adds SM2 elliptic curve based asymmetric
> encryption and decryption to the Intel QuickAssist
> Technology PMD.
>
> Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
> ---
> doc/guides/cryptodevs/features/qat.ini | 1 +
> doc/guides/rel_notes/release_24_11.rst | 4 +
> .../common/qat/qat_adf/icp_qat_fw_mmp_ids.h | 3 +
> drivers/common/qat/qat_adf/qat_pke.h | 20 +++
> drivers/crypto/qat/qat_asym.c | 140 +++++++++++++++++-
> 5 files changed, 162 insertions(+), 6 deletions(-)
>
> diff --git a/doc/guides/cryptodevs/features/qat.ini
> b/doc/guides/cryptodevs/features/qat.ini
> index f41d29158f..219dd1e011 100644
> --- a/doc/guides/cryptodevs/features/qat.ini
> +++ b/doc/guides/cryptodevs/features/qat.ini
> @@ -71,6 +71,7 @@ ZUC EIA3 = Y
> AES CMAC (128) = Y
> SM3 = Y
> SM3 HMAC = Y
> +SM2 = Y
SM2 is asymmetric algo. Please move it in asymmetric ones.
>
> ;
> ; Supported AEAD algorithms of the 'qat' crypto driver.
> diff --git a/doc/guides/rel_notes/release_24_11.rst
> b/doc/guides/rel_notes/release_24_11.rst
> index 0f91dae987..2404753e54 100644
> --- a/doc/guides/rel_notes/release_24_11.rst
> +++ b/doc/guides/rel_notes/release_24_11.rst
> @@ -247,6 +247,10 @@ New Features
> Added ability for node to advertise and update multiple xstat counters,
> that can be retrieved using ``rte_graph_cluster_stats_get``.
>
> +* **Updated the QuickAssist Technology (QAT) Crypto PMD.**
> +
> + * Added SM2 encryption and decryption algorithms.
> +
>
> Removed Items
> -------------
> diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h
> b/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h
> index 630c6e1a9b..aa49612ca1 100644
> --- a/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h
> +++ b/drivers/common/qat/qat_adf/icp_qat_fw_mmp_ids.h
> @@ -1542,6 +1542,9 @@ icp_qat_fw_mmp_ecdsa_verify_gfp_521_input::in in
> @endlink
> * @li no output parameters
> */
>
> +#define PKE_ECSM2_ENCRYPTION 0x25221720
> +#define PKE_ECSM2_DECRYPTION 0x201716e6
> +
> #define PKE_LIVENESS 0x00000001
> /**< Functionality ID for PKE_LIVENESS
> * @li 0 input parameter(s)
> diff --git a/drivers/common/qat/qat_adf/qat_pke.h
> b/drivers/common/qat/qat_adf/qat_pke.h
> index f88932a275..ac051e965d 100644
> --- a/drivers/common/qat/qat_adf/qat_pke.h
> +++ b/drivers/common/qat/qat_adf/qat_pke.h
> @@ -334,4 +334,24 @@ get_sm2_ecdsa_verify_function(void)
> return qat_function;
> }
>
> +static struct qat_asym_function
> +get_sm2_encryption_function(void)
> +{
> + struct qat_asym_function qat_function = {
> + PKE_ECSM2_ENCRYPTION, 32
> + };
> +
> + return qat_function;
> +}
> +
> +static struct qat_asym_function
> +get_sm2_decryption_function(void)
> +{
> + struct qat_asym_function qat_function = {
> + PKE_ECSM2_DECRYPTION, 32
> + };
> +
> + return qat_function;
> +}
> +
> #endif
> diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
> index 9e97582e22..991684135c 100644
> --- a/drivers/crypto/qat/qat_asym.c
> +++ b/drivers/crypto/qat/qat_asym.c
> @@ -933,6 +933,15 @@ sm2_ecdsa_sign_set_input(struct
> icp_qat_fw_pke_request *qat_req,
> qat_req->input_param_count = 3;
> qat_req->output_param_count = 2;
>
> + HEXDUMP("SM2 K test", asym_op->sm2.k.data,
> + cookie->alg_bytesize);
> + HEXDUMP("SM2 K", cookie->input_array[0],
> + cookie->alg_bytesize);
> + HEXDUMP("SM2 msg", cookie->input_array[1],
> + cookie->alg_bytesize);
> + HEXDUMP("SM2 pkey", cookie->input_array[2],
> + cookie->alg_bytesize);
> +
> return RTE_CRYPTO_OP_STATUS_SUCCESS;
> }
>
> @@ -983,6 +992,114 @@ sm2_ecdsa_sign_collect(struct rte_crypto_asym_op
> *asym_op,
> return RTE_CRYPTO_OP_STATUS_SUCCESS;
> }
>
> +static int
> +sm2_encryption_set_input(struct icp_qat_fw_pke_request *qat_req,
> + struct qat_asym_op_cookie *cookie,
> + const struct rte_crypto_asym_op *asym_op,
> + const struct rte_crypto_asym_xform *xform)
> +{
> + const struct qat_asym_function qat_function =
> + get_sm2_encryption_function();
> + const uint32_t qat_func_alignsize =
> + qat_function.bytesize;
> +
> + SET_PKE_LN(asym_op->sm2.k, qat_func_alignsize, 0);
> + SET_PKE_LN(xform->ec.q.x, qat_func_alignsize, 1);
> + SET_PKE_LN(xform->ec.q.y, qat_func_alignsize, 2);
> +
> + cookie->alg_bytesize = qat_function.bytesize;
> + cookie->qat_func_alignsize = qat_function.bytesize;
> + qat_req->pke_hdr.cd_pars.func_id = qat_function.func_id;
> + qat_req->input_param_count = 3;
> + qat_req->output_param_count = 4;
> +
> + HEXDUMP("SM2 K", cookie->input_array[0],
> + qat_func_alignsize);
> + HEXDUMP("SM2 Q.x", cookie->input_array[1],
> + qat_func_alignsize);
> + HEXDUMP("SM2 Q.y", cookie->input_array[2],
> + qat_func_alignsize);
> +
> + return RTE_CRYPTO_OP_STATUS_SUCCESS;
> +}
> +
> +static uint8_t
> +sm2_encryption_collect(struct rte_crypto_asym_op *asym_op,
> + const struct qat_asym_op_cookie *cookie)
> +{
> + uint32_t alg_bytesize = cookie->alg_bytesize;
> +
> + rte_memcpy(asym_op->sm2.c1.x.data, cookie->output_array[0],
> alg_bytesize);
> + rte_memcpy(asym_op->sm2.c1.y.data, cookie->output_array[1],
> alg_bytesize);
> + rte_memcpy(asym_op->sm2.kp.x.data, cookie->output_array[2],
> alg_bytesize);
> + rte_memcpy(asym_op->sm2.kp.y.data, cookie->output_array[3],
> alg_bytesize);
> + asym_op->sm2.c1.x.length = alg_bytesize;
> + asym_op->sm2.c1.y.length = alg_bytesize;
> + asym_op->sm2.kp.x.length = alg_bytesize;
> + asym_op->sm2.kp.y.length = alg_bytesize;
> +
> + HEXDUMP("c1[x1]", cookie->output_array[0],
> + alg_bytesize);
> + HEXDUMP("c1[y]", cookie->output_array[1],
> + alg_bytesize);
> + HEXDUMP("kp[x]", cookie->output_array[2],
> + alg_bytesize);
> + HEXDUMP("kp[y]", cookie->output_array[3],
> + alg_bytesize);
> + return RTE_CRYPTO_OP_STATUS_SUCCESS;
> +}
> +
> +
> +static int
> +sm2_decryption_set_input(struct icp_qat_fw_pke_request *qat_req,
> + struct qat_asym_op_cookie *cookie,
> + const struct rte_crypto_asym_op *asym_op,
> + const struct rte_crypto_asym_xform *xform)
> +{
> + const struct qat_asym_function qat_function =
> + get_sm2_decryption_function();
> + const uint32_t qat_func_alignsize =
> + qat_function.bytesize;
> +
> + SET_PKE_LN(xform->ec.pkey, qat_func_alignsize, 0);
> + SET_PKE_LN(asym_op->sm2.c1.x, qat_func_alignsize, 1);
> + SET_PKE_LN(asym_op->sm2.c1.y, qat_func_alignsize, 2);
> +
> + cookie->alg_bytesize = qat_function.bytesize;
> + cookie->qat_func_alignsize = qat_function.bytesize;
> + qat_req->pke_hdr.cd_pars.func_id = qat_function.func_id;
> + qat_req->input_param_count = 3;
> + qat_req->output_param_count = 2;
> +
> + HEXDUMP("d", cookie->input_array[0],
> + qat_func_alignsize);
> + HEXDUMP("c1[x]", cookie->input_array[1],
> + qat_func_alignsize);
> + HEXDUMP("c1[y]", cookie->input_array[2],
> + qat_func_alignsize);
> +
> + return RTE_CRYPTO_OP_STATUS_SUCCESS;
> +}
> +
> +
> +static uint8_t
> +sm2_decryption_collect(struct rte_crypto_asym_op *asym_op,
> + const struct qat_asym_op_cookie *cookie)
> +{
> + uint32_t alg_bytesize = cookie->alg_bytesize;
> +
> + rte_memcpy(asym_op->sm2.kp.x.data, cookie->output_array[0],
> alg_bytesize);
> + rte_memcpy(asym_op->sm2.kp.y.data, cookie->output_array[1],
> alg_bytesize);
> + asym_op->sm2.kp.x.length = alg_bytesize;
> + asym_op->sm2.kp.y.length = alg_bytesize;
> +
> + HEXDUMP("kp[x]", cookie->output_array[0],
> + alg_bytesize);
> + HEXDUMP("kp[y]", cookie->output_array[1],
> + alg_bytesize);
> + return RTE_CRYPTO_OP_STATUS_SUCCESS;
> +}
> +
> static int
> asym_set_input(struct icp_qat_fw_pke_request *qat_req,
> struct qat_asym_op_cookie *cookie,
> @@ -1015,14 +1132,20 @@ asym_set_input(struct icp_qat_fw_pke_request
> *qat_req,
> asym_op, xform);
> }
> case RTE_CRYPTO_ASYM_XFORM_SM2:
> - if (asym_op->sm2.op_type ==
> - RTE_CRYPTO_ASYM_OP_VERIFY) {
> + if (asym_op->sm2.op_type ==
> RTE_CRYPTO_ASYM_OP_ENCRYPT) {
> + return sm2_encryption_set_input(qat_req, cookie,
> + asym_op, xform);
> + } else if (asym_op->sm2.op_type ==
> RTE_CRYPTO_ASYM_OP_DECRYPT) {
> + return sm2_decryption_set_input(qat_req, cookie,
> + asym_op, xform);
> + } else if (asym_op->sm2.op_type ==
> RTE_CRYPTO_ASYM_OP_VERIFY) {
> return sm2_ecdsa_verify_set_input(qat_req, cookie,
> asym_op, xform);
> - } else {
> + } else if (asym_op->sm2.op_type ==
> RTE_CRYPTO_ASYM_OP_SIGN) {
> return sm2_ecdsa_sign_set_input(qat_req, cookie,
> asym_op, xform);
> }
> + break;
> default:
> QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform");
> return -EINVAL;
> @@ -1114,7 +1237,13 @@ qat_asym_collect_response(struct rte_crypto_op
> *op,
> case RTE_CRYPTO_ASYM_XFORM_ECDH:
> return ecdh_collect(asym_op, cookie);
> case RTE_CRYPTO_ASYM_XFORM_SM2:
> - return sm2_ecdsa_sign_collect(asym_op, cookie);
> + if (asym_op->sm2.op_type ==
> RTE_CRYPTO_ASYM_OP_ENCRYPT)
> + return sm2_encryption_collect(asym_op, cookie);
> + else if (asym_op->sm2.op_type ==
> RTE_CRYPTO_ASYM_OP_DECRYPT)
> + return sm2_decryption_collect(asym_op, cookie);
> + else
> + return sm2_ecdsa_sign_collect(asym_op, cookie);
> +
> default:
> QAT_LOG(ERR, "Not supported xform type");
> return RTE_CRYPTO_OP_STATUS_ERROR;
> @@ -1386,9 +1515,8 @@ qat_asym_session_configure(struct rte_cryptodev
> *dev __rte_unused,
> case RTE_CRYPTO_ASYM_XFORM_ECDSA:
> case RTE_CRYPTO_ASYM_XFORM_ECPM:
> case RTE_CRYPTO_ASYM_XFORM_ECDH:
> - session_set_ec(qat_session, xform);
> - break;
> case RTE_CRYPTO_ASYM_XFORM_SM2:
> + session_set_ec(qat_session, xform);
> break;
> default:
> ret = -ENOTSUP;
> --
> 2.17.1
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Wednesday, October 23, 2024 2:47 AM
> To: Kusztal, ArkadiuszX <arkadiuszx.kusztal@intel.com>
> Cc: dev@dpdk.org; gakhil@marvell.com; Dooley, Brian
> <brian.dooley@intel.com>
> Subject: Re: [PATCH v6 2/3] crypto/qat: add sm2 encryption/decryption function
>
> On Tue, 22 Oct 2024 20:05:59 +0100
> Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com> wrote:
>
> > + uint32_t alg_bytesize = cookie->alg_bytesize;
> > +
> > + rte_memcpy(asym_op->sm2.c1.x.data, cookie->output_array[0],
> alg_bytesize);
> > + rte_memcpy(asym_op->sm2.c1.y.data, cookie->output_array[1],
> alg_bytesize);
> > + rte_memcpy(asym_op->sm2.kp.x.data, cookie->output_array[2],
> alg_bytesize);
> > + rte_memcpy(asym_op->sm2.kp.y.data, cookie->output_array[3],
> > +alg_bytesize);
>
> Since the copy is small and not in the fast path, there is no reason to use
> rte_memcpy().
> The memcpy() function is as fast inlines and has more checking from gcc,
> coverity, ASAN so it is preferred.
This function is called by the crypto_dequeue_op_burst function and in some other cases (like RSA) there may be a 1024 bytes per copy operation.
If you think that a regular memcpy will do no worse there, I may change it.
@@ -71,6 +71,7 @@ ZUC EIA3 = Y
AES CMAC (128) = Y
SM3 = Y
SM3 HMAC = Y
+SM2 = Y
;
; Supported AEAD algorithms of the 'qat' crypto driver.
@@ -247,6 +247,10 @@ New Features
Added ability for node to advertise and update multiple xstat counters,
that can be retrieved using ``rte_graph_cluster_stats_get``.
+* **Updated the QuickAssist Technology (QAT) Crypto PMD.**
+
+ * Added SM2 encryption and decryption algorithms.
+
Removed Items
-------------
@@ -1542,6 +1542,9 @@ icp_qat_fw_mmp_ecdsa_verify_gfp_521_input::in in @endlink
* @li no output parameters
*/
+#define PKE_ECSM2_ENCRYPTION 0x25221720
+#define PKE_ECSM2_DECRYPTION 0x201716e6
+
#define PKE_LIVENESS 0x00000001
/**< Functionality ID for PKE_LIVENESS
* @li 0 input parameter(s)
@@ -334,4 +334,24 @@ get_sm2_ecdsa_verify_function(void)
return qat_function;
}
+static struct qat_asym_function
+get_sm2_encryption_function(void)
+{
+ struct qat_asym_function qat_function = {
+ PKE_ECSM2_ENCRYPTION, 32
+ };
+
+ return qat_function;
+}
+
+static struct qat_asym_function
+get_sm2_decryption_function(void)
+{
+ struct qat_asym_function qat_function = {
+ PKE_ECSM2_DECRYPTION, 32
+ };
+
+ return qat_function;
+}
+
#endif
@@ -933,6 +933,15 @@ sm2_ecdsa_sign_set_input(struct icp_qat_fw_pke_request *qat_req,
qat_req->input_param_count = 3;
qat_req->output_param_count = 2;
+ HEXDUMP("SM2 K test", asym_op->sm2.k.data,
+ cookie->alg_bytesize);
+ HEXDUMP("SM2 K", cookie->input_array[0],
+ cookie->alg_bytesize);
+ HEXDUMP("SM2 msg", cookie->input_array[1],
+ cookie->alg_bytesize);
+ HEXDUMP("SM2 pkey", cookie->input_array[2],
+ cookie->alg_bytesize);
+
return RTE_CRYPTO_OP_STATUS_SUCCESS;
}
@@ -983,6 +992,114 @@ sm2_ecdsa_sign_collect(struct rte_crypto_asym_op *asym_op,
return RTE_CRYPTO_OP_STATUS_SUCCESS;
}
+static int
+sm2_encryption_set_input(struct icp_qat_fw_pke_request *qat_req,
+ struct qat_asym_op_cookie *cookie,
+ const struct rte_crypto_asym_op *asym_op,
+ const struct rte_crypto_asym_xform *xform)
+{
+ const struct qat_asym_function qat_function =
+ get_sm2_encryption_function();
+ const uint32_t qat_func_alignsize =
+ qat_function.bytesize;
+
+ SET_PKE_LN(asym_op->sm2.k, qat_func_alignsize, 0);
+ SET_PKE_LN(xform->ec.q.x, qat_func_alignsize, 1);
+ SET_PKE_LN(xform->ec.q.y, qat_func_alignsize, 2);
+
+ cookie->alg_bytesize = qat_function.bytesize;
+ cookie->qat_func_alignsize = qat_function.bytesize;
+ qat_req->pke_hdr.cd_pars.func_id = qat_function.func_id;
+ qat_req->input_param_count = 3;
+ qat_req->output_param_count = 4;
+
+ HEXDUMP("SM2 K", cookie->input_array[0],
+ qat_func_alignsize);
+ HEXDUMP("SM2 Q.x", cookie->input_array[1],
+ qat_func_alignsize);
+ HEXDUMP("SM2 Q.y", cookie->input_array[2],
+ qat_func_alignsize);
+
+ return RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+static uint8_t
+sm2_encryption_collect(struct rte_crypto_asym_op *asym_op,
+ const struct qat_asym_op_cookie *cookie)
+{
+ uint32_t alg_bytesize = cookie->alg_bytesize;
+
+ rte_memcpy(asym_op->sm2.c1.x.data, cookie->output_array[0], alg_bytesize);
+ rte_memcpy(asym_op->sm2.c1.y.data, cookie->output_array[1], alg_bytesize);
+ rte_memcpy(asym_op->sm2.kp.x.data, cookie->output_array[2], alg_bytesize);
+ rte_memcpy(asym_op->sm2.kp.y.data, cookie->output_array[3], alg_bytesize);
+ asym_op->sm2.c1.x.length = alg_bytesize;
+ asym_op->sm2.c1.y.length = alg_bytesize;
+ asym_op->sm2.kp.x.length = alg_bytesize;
+ asym_op->sm2.kp.y.length = alg_bytesize;
+
+ HEXDUMP("c1[x1]", cookie->output_array[0],
+ alg_bytesize);
+ HEXDUMP("c1[y]", cookie->output_array[1],
+ alg_bytesize);
+ HEXDUMP("kp[x]", cookie->output_array[2],
+ alg_bytesize);
+ HEXDUMP("kp[y]", cookie->output_array[3],
+ alg_bytesize);
+ return RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+
+static int
+sm2_decryption_set_input(struct icp_qat_fw_pke_request *qat_req,
+ struct qat_asym_op_cookie *cookie,
+ const struct rte_crypto_asym_op *asym_op,
+ const struct rte_crypto_asym_xform *xform)
+{
+ const struct qat_asym_function qat_function =
+ get_sm2_decryption_function();
+ const uint32_t qat_func_alignsize =
+ qat_function.bytesize;
+
+ SET_PKE_LN(xform->ec.pkey, qat_func_alignsize, 0);
+ SET_PKE_LN(asym_op->sm2.c1.x, qat_func_alignsize, 1);
+ SET_PKE_LN(asym_op->sm2.c1.y, qat_func_alignsize, 2);
+
+ cookie->alg_bytesize = qat_function.bytesize;
+ cookie->qat_func_alignsize = qat_function.bytesize;
+ qat_req->pke_hdr.cd_pars.func_id = qat_function.func_id;
+ qat_req->input_param_count = 3;
+ qat_req->output_param_count = 2;
+
+ HEXDUMP("d", cookie->input_array[0],
+ qat_func_alignsize);
+ HEXDUMP("c1[x]", cookie->input_array[1],
+ qat_func_alignsize);
+ HEXDUMP("c1[y]", cookie->input_array[2],
+ qat_func_alignsize);
+
+ return RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+
+static uint8_t
+sm2_decryption_collect(struct rte_crypto_asym_op *asym_op,
+ const struct qat_asym_op_cookie *cookie)
+{
+ uint32_t alg_bytesize = cookie->alg_bytesize;
+
+ rte_memcpy(asym_op->sm2.kp.x.data, cookie->output_array[0], alg_bytesize);
+ rte_memcpy(asym_op->sm2.kp.y.data, cookie->output_array[1], alg_bytesize);
+ asym_op->sm2.kp.x.length = alg_bytesize;
+ asym_op->sm2.kp.y.length = alg_bytesize;
+
+ HEXDUMP("kp[x]", cookie->output_array[0],
+ alg_bytesize);
+ HEXDUMP("kp[y]", cookie->output_array[1],
+ alg_bytesize);
+ return RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
static int
asym_set_input(struct icp_qat_fw_pke_request *qat_req,
struct qat_asym_op_cookie *cookie,
@@ -1015,14 +1132,20 @@ asym_set_input(struct icp_qat_fw_pke_request *qat_req,
asym_op, xform);
}
case RTE_CRYPTO_ASYM_XFORM_SM2:
- if (asym_op->sm2.op_type ==
- RTE_CRYPTO_ASYM_OP_VERIFY) {
+ if (asym_op->sm2.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+ return sm2_encryption_set_input(qat_req, cookie,
+ asym_op, xform);
+ } else if (asym_op->sm2.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+ return sm2_decryption_set_input(qat_req, cookie,
+ asym_op, xform);
+ } else if (asym_op->sm2.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
return sm2_ecdsa_verify_set_input(qat_req, cookie,
asym_op, xform);
- } else {
+ } else if (asym_op->sm2.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
return sm2_ecdsa_sign_set_input(qat_req, cookie,
asym_op, xform);
}
+ break;
default:
QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform");
return -EINVAL;
@@ -1114,7 +1237,13 @@ qat_asym_collect_response(struct rte_crypto_op *op,
case RTE_CRYPTO_ASYM_XFORM_ECDH:
return ecdh_collect(asym_op, cookie);
case RTE_CRYPTO_ASYM_XFORM_SM2:
- return sm2_ecdsa_sign_collect(asym_op, cookie);
+ if (asym_op->sm2.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT)
+ return sm2_encryption_collect(asym_op, cookie);
+ else if (asym_op->sm2.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT)
+ return sm2_decryption_collect(asym_op, cookie);
+ else
+ return sm2_ecdsa_sign_collect(asym_op, cookie);
+
default:
QAT_LOG(ERR, "Not supported xform type");
return RTE_CRYPTO_OP_STATUS_ERROR;
@@ -1386,9 +1515,8 @@ qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
case RTE_CRYPTO_ASYM_XFORM_ECDSA:
case RTE_CRYPTO_ASYM_XFORM_ECPM:
case RTE_CRYPTO_ASYM_XFORM_ECDH:
- session_set_ec(qat_session, xform);
- break;
case RTE_CRYPTO_ASYM_XFORM_SM2:
+ session_set_ec(qat_session, xform);
break;
default:
ret = -ENOTSUP;