From patchwork Tue Oct 26 17:25:12 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ji, Kai" X-Patchwork-Id: 102985 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 20D0FA0547; Tue, 26 Oct 2021 19:25:29 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1EB7B410FA; Tue, 26 Oct 2021 19:25:27 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 781AC40E0F for ; Tue, 26 Oct 2021 19:25:23 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="228723392" X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="228723392" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2021 10:25:22 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="494305756" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga007.fm.intel.com with ESMTP; 26 Oct 2021 10:25:21 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Date: Tue, 26 Oct 2021 18:25:12 +0100 Message-Id: <20211026172518.20183-2-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add-in enqueue/dequeue op burst and build-request skeleton functions for qat crypto driver refactor. Signed-off-by: Kai Ji Acked-by: Fan Zhang --- drivers/common/qat/qat_qp.c | 16 +++++++++++++ drivers/common/qat/qat_qp.h | 22 +++++++++++++++++ drivers/crypto/qat/qat_asym.c | 35 ++++++++++++++++++++++++++++ drivers/crypto/qat/qat_asym.h | 15 ++++++++++++ drivers/crypto/qat/qat_sym.c | 25 ++++++++++++++++++++ drivers/crypto/qat/qat_sym.h | 16 +++++++++++++ drivers/crypto/qat/qat_sym_session.h | 6 +++++ 7 files changed, 135 insertions(+) diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index cde421eb77..0fda890075 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -549,6 +549,22 @@ adf_modulo(uint32_t data, uint32_t modulo_mask) return data & modulo_mask; } +uint16_t +refactor_qat_enqueue_op_burst(__rte_unused void *qp, + __rte_unused qat_op_build_request_t op_build_request, + __rte_unused void **ops, __rte_unused uint16_t nb_ops) +{ + return 0; +} + +uint16_t +refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops, + __rte_unused qat_op_dequeue_t qat_dequeue_process_response, + __rte_unused uint16_t nb_ops) +{ + return 0; +} + uint16_t qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) { diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h index deafb407b3..c5f115310c 100644 --- a/drivers/common/qat/qat_qp.h +++ b/drivers/common/qat/qat_qp.h @@ -36,6 +36,19 @@ struct qat_queue { /* number of responses processed since last CSR head write */ }; +/** + * @brief Function prototype to build a QAT request. + * @param opaque: an opaque data may be used to store context may be useful + * between 2 enqueue operations. + **/ +typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg, + void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen); + +typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie, + uint64_t *dequeue_err_count __rte_unused); + +#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE 2 + struct qat_qp { void *mmap_bar_addr; struct qat_queue tx_q; @@ -44,6 +57,7 @@ struct qat_qp { struct rte_mempool *op_cookie_pool; void **op_cookies; uint32_t nb_descriptors; + uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE]; enum qat_device_gen qat_dev_gen; enum qat_service_type service_type; struct qat_pci_device *qat_dev; @@ -77,6 +91,14 @@ struct qat_qp_config { const char *service_str; }; +uint16_t +refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request, + void **ops, uint16_t nb_ops); + +uint16_t +refactor_qat_dequeue_op_burst(void *qp, void **ops, + qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops); + uint16_t qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c index 85973812a8..d5b4c66d68 100644 --- a/drivers/crypto/qat/qat_asym.c +++ b/drivers/crypto/qat/qat_asym.c @@ -456,6 +456,41 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op, return 0; } +static __rte_always_inline int +refactor_qat_asym_build_request(__rte_unused void *in_op, + __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie, + __rte_unused uint64_t *opaque, + __rte_unused enum qat_device_gen dev_gen) +{ + return 0; +} + +int +refactor_qat_asym_process_response(__rte_unused void **op, + __rte_unused uint8_t *resp, + __rte_unused void *op_cookie, + __rte_unused uint64_t *dequeue_err_count) +{ + return 0; +} + +uint16_t +qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return refactor_qat_enqueue_op_burst(qp, + refactor_qat_asym_build_request, + (void **)ops, nb_ops); +} + +uint16_t +qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return refactor_qat_dequeue_op_burst(qp, (void **)ops, + refactor_qat_asym_process_response, nb_ops); +} + int qat_asym_build_request(void *in_op, uint8_t *out_msg, diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h index 308b6b2e0b..50c2641eba 100644 --- a/drivers/crypto/qat/qat_asym.h +++ b/drivers/crypto/qat/qat_asym.h @@ -92,4 +92,19 @@ void qat_asym_process_response(void __rte_unused **op, uint8_t *resp, void *op_cookie); +int +refactor_qat_asym_process_response(__rte_unused void **op, + __rte_unused uint8_t *resp, + __rte_unused void *op_cookie, + __rte_unused uint64_t *dequeue_err_count); + +uint16_t +qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops); + + +uint16_t +qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops); + #endif /* _QAT_ASYM_H_ */ diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index 93b257522b..a92874cd27 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -210,6 +210,31 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op, ICP_QAT_FW_LA_NO_PROTO); } +static __rte_always_inline int +refactor_qat_sym_build_request(__rte_unused void *in_op, + __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie, + __rte_unused uint64_t *opaque, + __rte_unused enum qat_device_gen dev_gen) +{ + return 0; +} + +uint16_t +refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request, + (void **)ops, nb_ops); +} + +uint16_t +refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return refactor_qat_dequeue_op_burst(qp, (void **)ops, + refactor_qat_sym_process_response, nb_ops); +} + int qat_sym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, enum qat_device_gen qat_dev_gen) diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h index e3ec7f0de4..17b2c871bd 100644 --- a/drivers/crypto/qat/qat_sym.h +++ b/drivers/crypto/qat/qat_sym.h @@ -54,6 +54,22 @@ struct qat_sym_op_cookie { } opt; }; +static __rte_always_inline int +refactor_qat_sym_process_response(__rte_unused void **op, + __rte_unused uint8_t *resp, __rte_unused void *op_cookie, + __rte_unused uint64_t *dequeue_err_count) +{ + return 0; +} + +uint16_t +refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops); + +uint16_t +refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops); + int qat_sym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, enum qat_device_gen qat_dev_gen); diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h index 6ebc176729..73493ec864 100644 --- a/drivers/crypto/qat/qat_sym_session.h +++ b/drivers/crypto/qat/qat_sym_session.h @@ -55,6 +55,11 @@ #define QAT_SESSION_IS_SLICE_SET(flags, flag) \ (!!((flags) & (flag))) +struct qat_sym_session; + +typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie); + enum qat_sym_proto_flag { QAT_CRYPTO_PROTO_FLAG_NONE = 0, QAT_CRYPTO_PROTO_FLAG_CCM = 1, @@ -107,6 +112,7 @@ struct qat_sym_session { /* Some generations need different setup of counter */ uint32_t slice_types; enum qat_sym_proto_flag qat_proto_flag; + qat_sym_build_request_t build_request[2]; }; int From patchwork Tue Oct 26 17:25:13 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ji, Kai" X-Patchwork-Id: 102986 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5D94DA0547; Tue, 26 Oct 2021 19:25:53 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 49F6D410F5; Tue, 26 Oct 2021 19:25:53 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id E9E21410DD for ; Tue, 26 Oct 2021 19:25:50 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="228723397" X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="228723397" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2021 10:25:24 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="494305888" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga007.fm.intel.com with ESMTP; 26 Oct 2021 10:25:23 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Date: Tue, 26 Oct 2021 18:25:13 +0100 Message-Id: <20211026172518.20183-3-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add-in refactored qat symmetirc build request function implementation (qat_sym_refactor.c & qat_sym_refactor.h) Add-in QAT sym build ops in auth, cipher, chain and aead operation for QAT generation 1 Signed-off-by: Kai Ji Acked-by: Fan Zhang --- drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 1071 ++++++++++++++++++ drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 895 +++++++++++++++ drivers/crypto/qat/qat_sym.h | 8 + drivers/crypto/qat/qat_sym_hw_dp.c | 8 - drivers/crypto/qat/qat_sym_refactor.c | 409 +++++++ drivers/crypto/qat/qat_sym_refactor.h | 402 +++++++ 6 files changed, 2785 insertions(+), 8 deletions(-) create mode 100644 drivers/crypto/qat/qat_sym_refactor.c create mode 100644 drivers/crypto/qat/qat_sym_refactor.h diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h index 67a4d2cb2c..07020741bd 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h @@ -8,14 +8,1082 @@ #include #include "qat_crypto.h" #include "qat_sym_session.h" +#include "qat_sym.h" + +#define QAT_BASE_GEN1_SYM_CAPABILITIES \ + QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \ + CAP_RNG(digest_size, 1, 20, 1)), \ + QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ + CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \ + QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \ + CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \ + QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ + CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \ + QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \ + CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \ + CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \ + CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \ + CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_CIPHER_CAP(AES_XTS, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_CIPHER_CAP(SNOW3G_UEA2, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_CIPHER_CAP(KASUMI_F8, CAP_SET(block_size, 8), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \ + QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \ + CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_CIPHER_CAP(3DES_CBC, CAP_SET(block_size, 8), \ + CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \ + QAT_SYM_CIPHER_CAP(3DES_CTR, CAP_SET(block_size, 8), \ + CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \ + QAT_SYM_CIPHER_CAP(DES_CBC, CAP_SET(block_size, 8), \ + CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \ + QAT_SYM_CIPHER_CAP(DES_DOCSISBPI, CAP_SET(block_size, 8), \ + CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)) + +#define QAT_BASE_GEN1_ASYM_CAPABILITIES \ + QAT_ASYM_CAP(MODEX, 0, 1, 512, 1), \ + QAT_ASYM_CAP(MODINV, 0, 1, 512, 1), \ + QAT_ASYM_CAP(RSA, \ + ((1 << RTE_CRYPTO_ASYM_OP_SIGN) | \ + (1 << RTE_CRYPTO_ASYM_OP_VERIFY) | \ + (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) | \ + (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)), \ + 64, 512, 64) + +#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \ + QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \ + CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \ + +#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \ + QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 32, 32, 0), \ + CAP_RNG(digest_size, 16, 16, 0), \ + CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)) + +#define QAT_BASE_GEN4_SYM_CAPABILITIES \ + QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \ + CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \ + CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \ + CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \ + CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \ + CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \ + CAP_RNG(digest_size, 1, 20, 1)), \ + QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \ + CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \ + CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ + QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ + QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ + CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \ + QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \ + CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \ + QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \ + CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ + CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \ + +#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \ + RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n) + +#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \ + (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \ + ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status)) + +static __rte_always_inline int +op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; + uint8_t *encr = encrypted_iv; + + /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_decrypt_err; + + for (; srclen != 0; --srclen, ++dst, ++src, ++encr) + *dst = *src ^ *encr; + + return 0; + +cipher_decrypt_err: + QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed"); + return -EINVAL; +} + +static __rte_always_inline uint32_t +qat_bpicipher_preprocess(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { + /* Decrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely((sym_op->m_dst != NULL) + && (sym_op->m_dst != sym_op->m_src))) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = last_block - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:", + dst, last_block_len); +#endif + op_bpi_cipher_decrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:", + dst, last_block_len); +#endif + } + + return sym_op->cipher.data.length - last_block_len; +} + +static __rte_always_inline int +qat_chk_len_in_bits_auth(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { + if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) || + (op->sym->auth.data.length % BYTE_LENGTH != 0))) + return -EINVAL; + return 1; + } + return 0; +} + +static __rte_always_inline int +qat_chk_len_in_bits_cipher(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) || + ((op->sym->cipher.data.offset % + BYTE_LENGTH) != 0))) + return -EINVAL; + return 1; + } + return 0; +} + +static __rte_always_inline int32_t +qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req, + void *opaque, struct qat_sym_op_cookie *cookie, + struct rte_crypto_vec *src_vec, uint16_t n_src, + struct rte_crypto_vec *dst_vec, uint16_t n_dst) +{ + struct qat_sgl *list; + uint32_t i; + uint32_t tl_src = 0, total_len_src, total_len_dst; + uint64_t src_data_start = 0, dst_data_start = 0; + int is_sgl = n_src > 1 || n_dst > 1; + + if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER || + n_dst > QAT_SYM_SGL_MAX_NUMBER)) + return -1; + + if (likely(!is_sgl)) { + src_data_start = src_vec[0].iova; + tl_src = total_len_src = + src_vec[0].len; + if (unlikely(n_dst)) { /* oop */ + total_len_dst = dst_vec[0].len; + + dst_data_start = dst_vec[0].iova; + if (unlikely(total_len_src != total_len_dst)) + return -EINVAL; + } else { + dst_data_start = src_data_start; + total_len_dst = tl_src; + } + } else { /* sgl */ + total_len_dst = total_len_src = 0; + + ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + + list = (struct qat_sgl *)&cookie->qat_sgl_src; + for (i = 0; i < n_src; i++) { + list->buffers[i].len = src_vec[i].len; + list->buffers[i].resrvd = 0; + list->buffers[i].addr = src_vec[i].iova; + if (tl_src + src_vec[i].len > UINT32_MAX) { + QAT_DP_LOG(ERR, "Message too long"); + return -1; + } + tl_src += src_vec[i].len; + } + + list->num_bufs = i; + src_data_start = cookie->qat_sgl_src_phys_addr; + + if (unlikely(n_dst > 0)) { /* oop sgl */ + uint32_t tl_dst = 0; + + list = (struct qat_sgl *)&cookie->qat_sgl_dst; + + for (i = 0; i < n_dst; i++) { + list->buffers[i].len = dst_vec[i].len; + list->buffers[i].resrvd = 0; + list->buffers[i].addr = dst_vec[i].iova; + if (tl_dst + dst_vec[i].len > UINT32_MAX) { + QAT_DP_LOG(ERR, "Message too long"); + return -ENOTSUP; + } + + tl_dst += dst_vec[i].len; + } + + if (tl_src != tl_dst) + return -EINVAL; + list->num_bufs = i; + dst_data_start = cookie->qat_sgl_dst_phys_addr; + } else + dst_data_start = src_data_start; + } + + req->comn_mid.src_data_addr = src_data_start; + req->comn_mid.dest_data_addr = dst_data_start; + req->comn_mid.src_length = total_len_src; + req->comn_mid.dst_length = total_len_dst; + req->comn_mid.opaque_data = (uintptr_t)opaque; + + return tl_src; +} + +static __rte_always_inline uint64_t +qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op, + struct qat_sym_session *ctx, + struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused, + struct rte_crypto_va_iova_ptr *digest __rte_unused) +{ + uint32_t cipher_len = 0, cipher_ofs = 0; + int n_src = 0; + int ret; + + ret = qat_chk_len_in_bits_cipher(ctx, op); + switch (ret) { + case 1: + cipher_len = op->sym->cipher.data.length >> 3; + cipher_ofs = op->sym->cipher.data.offset >> 3; + break; + case 0: + if (ctx->bpi_ctx) { + /* DOCSIS - only send complete blocks to device. + * Process any partial block using CFB mode. + * Even if 0 complete blocks, still send this to device + * to get into rx queue for post-process and dequeuing + */ + cipher_len = qat_bpicipher_preprocess(ctx, op); + cipher_ofs = op->sym->cipher.data.offset; + } else { + cipher_len = op->sym->cipher.data.length; + cipher_ofs = op->sym->cipher.data.offset; + } + break; + default: + QAT_DP_LOG(ERR, + "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return UINT64_MAX; + } + + cipher_iv->va = rte_crypto_op_ctod_offset(op, void *, + ctx->cipher_iv.offset); + cipher_iv->iova = rte_crypto_op_ctophys_offset(op, + ctx->cipher_iv.offset); + + n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, + cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); + if (n_src < 0 || n_src > op->sym->m_src->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return UINT64_MAX; + } + + in_sgl->num = n_src; + + /* Out-Of-Place operation */ + if (unlikely((op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src))) { + int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs, + cipher_len, out_sgl->vec, + QAT_SYM_SGL_MAX_NUMBER); + + if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return UINT64_MAX; + } + + out_sgl->num = n_dst; + } else + out_sgl->num = 0; + + return 0; +} + +static __rte_always_inline uint64_t +qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op, + struct qat_sym_session *ctx, + struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl, + struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused, + struct rte_crypto_va_iova_ptr *auth_iv, + struct rte_crypto_va_iova_ptr *digest) +{ + uint32_t auth_ofs = 0, auth_len = 0; + int n_src, ret; + + ret = qat_chk_len_in_bits_auth(ctx, op); + switch (ret) { + case 1: + auth_ofs = op->sym->auth.data.offset >> 3; + auth_len = op->sym->auth.data.length >> 3; + auth_iv->va = rte_crypto_op_ctod_offset(op, void *, + ctx->auth_iv.offset); + auth_iv->iova = rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); + break; + case 0: + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* AES-GMAC */ + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + auth_iv->va = rte_crypto_op_ctod_offset(op, void *, + ctx->auth_iv.offset); + auth_iv->iova = rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); + } else { + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + auth_iv->va = NULL; + auth_iv->iova = 0; + } + break; + default: + QAT_DP_LOG(ERR, + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return UINT64_MAX; + } + + n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs, + auth_ofs + auth_len, in_sgl->vec, + QAT_SYM_SGL_MAX_NUMBER); + if (n_src < 0 || n_src > op->sym->m_src->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return UINT64_MAX; + } + + in_sgl->num = n_src; + + /* Out-Of-Place operation */ + if (unlikely((op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src))) { + int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs, + auth_ofs + auth_len, out_sgl->vec, + QAT_SYM_SGL_MAX_NUMBER); + + if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return UINT64_MAX; + } + out_sgl->num = n_dst; + } else + out_sgl->num = 0; + + digest->va = (void *)op->sym->auth.digest.data; + digest->iova = op->sym->auth.digest.phys_addr; + + return 0; +} + +static __rte_always_inline uint64_t +qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + struct qat_sym_session *ctx, + struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *auth_iv_or_aad, + struct rte_crypto_va_iova_ptr *digest) +{ + union rte_crypto_sym_ofs ofs; + uint32_t min_ofs = 0, max_len = 0; + uint32_t cipher_len = 0, cipher_ofs = 0; + uint32_t auth_len = 0, auth_ofs = 0; + int is_oop = (op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src); + int is_sgl = op->sym->m_src->nb_segs > 1; + int n_src; + int ret; + + if (unlikely(is_oop)) + is_sgl |= op->sym->m_dst->nb_segs > 1; + + cipher_iv->va = rte_crypto_op_ctod_offset(op, void *, + ctx->cipher_iv.offset); + cipher_iv->iova = rte_crypto_op_ctophys_offset(op, + ctx->cipher_iv.offset); + auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *, + ctx->auth_iv.offset); + auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); + digest->va = (void *)op->sym->auth.digest.data; + digest->iova = op->sym->auth.digest.phys_addr; + + ret = qat_chk_len_in_bits_cipher(ctx, op); + switch (ret) { + case 1: + cipher_len = op->sym->aead.data.length >> 3; + cipher_ofs = op->sym->aead.data.offset >> 3; + break; + case 0: + cipher_len = op->sym->aead.data.length; + cipher_ofs = op->sym->aead.data.offset; + break; + default: + QAT_DP_LOG(ERR, + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + ret = qat_chk_len_in_bits_auth(ctx, op); + switch (ret) { + case 1: + auth_len = op->sym->auth.data.length >> 3; + auth_ofs = op->sym->auth.data.offset >> 3; + break; + case 0: + auth_len = op->sym->auth.data.length; + auth_ofs = op->sym->auth.data.offset; + break; + default: + QAT_DP_LOG(ERR, + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; + max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len); + + /* digest in buffer check. Needed only for wireless algos */ + if (ret == 1) { + /* Handle digest-encrypted cases, i.e. + * auth-gen-then-cipher-encrypt and + * cipher-decrypt-then-auth-verify + */ + uint64_t auth_end_iova; + + if (unlikely(is_sgl)) { + uint32_t remaining_off = auth_ofs + auth_len; + struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst : + op->sym->m_src); + + while (remaining_off >= rte_pktmbuf_data_len(sgl_buf) + && sgl_buf->next != NULL) { + remaining_off -= rte_pktmbuf_data_len(sgl_buf); + sgl_buf = sgl_buf->next; + } + + auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset( + sgl_buf, remaining_off); + } else + auth_end_iova = (is_oop ? + rte_pktmbuf_iova(op->sym->m_dst) : + rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs + + auth_len; + + /* Then check if digest-encrypted conditions are met */ + if ((auth_ofs + auth_len < cipher_ofs + cipher_len) && + (digest->iova == auth_end_iova)) + max_len = RTE_MAX(max_len, auth_ofs + auth_len + + ctx->digest_length); + } + + n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len, + in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); + if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return -1; + } + in_sgl->num = n_src; + + if (unlikely((op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src))) { + int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs, + max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); + + if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return -1; + } + out_sgl->num = n_dst; + } else + out_sgl->num = 0; + + ofs.ofs.cipher.head = cipher_ofs; + ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len; + ofs.ofs.auth.head = auth_ofs; + ofs.ofs.auth.tail = max_len - auth_ofs - auth_len; + + return ofs.raw; +} + +static __rte_always_inline uint64_t +qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op, + struct qat_sym_session *ctx, + struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *auth_iv_or_aad, + struct rte_crypto_va_iova_ptr *digest) +{ + uint32_t cipher_len = 0, cipher_ofs = 0; + int32_t n_src = 0; + + cipher_iv->va = rte_crypto_op_ctod_offset(op, void *, + ctx->cipher_iv.offset); + cipher_iv->iova = rte_crypto_op_ctophys_offset(op, + ctx->cipher_iv.offset); + auth_iv_or_aad->va = (void *)op->sym->aead.aad.data; + auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr; + digest->va = (void *)op->sym->aead.digest.data; + digest->iova = op->sym->aead.digest.phys_addr; + + cipher_len = op->sym->aead.data.length; + cipher_ofs = op->sym->aead.data.offset; + + n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len, + in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); + if (n_src < 0 || n_src > op->sym->m_src->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return UINT64_MAX; + } + in_sgl->num = n_src; + + /* Out-Of-Place operation */ + if (unlikely((op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src))) { + int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs, + cipher_len, out_sgl->vec, + QAT_SYM_SGL_MAX_NUMBER); + if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return UINT64_MAX; + } + + out_sgl->num = n_dst; + } else + out_sgl->num = 0; + + return 0; +} + +static __rte_always_inline void +qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param, + struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len, + struct icp_qat_fw_la_bulk_req *qat_req) +{ + /* copy IV into request if it fits */ + if (iv_len <= sizeof(cipher_param->u.cipher_IV_array)) + rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va, + iv_len); + else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova; + } +} + +static __rte_always_inline void +qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n) +{ + uint32_t i; + + for (i = 0; i < n; i++) + sta[i] = status; +} + +static __rte_always_inline void +enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param; + + cipher_param = (void *)&req->serv_specif_rqpars; + + /* cipher IV */ + qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; +} + +static __rte_always_inline void +enqueue_one_auth_job_gen1(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + + cipher_param = (void *)&req->serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); + + auth_param->auth_off = ofs.ofs.auth.head; + auth_param->auth_len = data_len - ofs.ofs.auth.head - + ofs.ofs.auth.tail; + auth_param->auth_res_addr = digest->iova; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + auth_param->u1.aad_adr = auth_iv->iova; + break; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va, + ctx->auth_iv.length); + break; + default: + break; + } +} + +static __rte_always_inline int +enqueue_one_chain_job_gen1(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_vec *src_vec, + uint16_t n_src_vecs, + struct rte_crypto_vec *dst_vec, + uint16_t n_dst_vecs, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + struct rte_crypto_vec *cvec = n_dst_vecs > 0 ? + dst_vec : src_vec; + rte_iova_t auth_iova_end; + int cipher_len, auth_len; + int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1; + + cipher_param = (void *)&req->serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); + + cipher_len = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; + auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail; + + if (unlikely(cipher_len < 0 || auth_len < 0)) + return -1; + + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = cipher_len; + qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req); + + auth_param->auth_off = ofs.ofs.auth.head; + auth_param->auth_len = auth_len; + auth_param->auth_res_addr = digest->iova; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + auth_param->u1.aad_adr = auth_iv->iova; + break; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + break; + default: + break; + } + + if (unlikely(is_sgl)) { + /* sgl */ + int i = n_dst_vecs ? n_dst_vecs : n_src_vecs; + uint32_t remaining_off = data_len - ofs.ofs.auth.tail; + + while (remaining_off >= cvec->len && i >= 1) { + i--; + remaining_off -= cvec->len; + cvec++; + } + + auth_iova_end = cvec->iova + remaining_off; + } else + auth_iova_end = cvec[0].iova + auth_param->auth_off + + auth_param->auth_len; + + /* Then check if digest-encrypted conditions are met */ + if ((auth_param->auth_off + auth_param->auth_len < + cipher_param->cipher_offset + cipher_param->cipher_length) && + (digest->iova == auth_iova_end)) { + /* Handle partial digest encryption */ + if (cipher_param->cipher_offset + cipher_param->cipher_length < + auth_param->auth_off + auth_param->auth_len + + ctx->digest_length && !is_sgl) + req->comn_mid.dst_length = req->comn_mid.src_length = + auth_param->auth_off + auth_param->auth_len + + ctx->digest_length; + struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_DIGEST_IN_BUFFER); + } + + return 0; +} + +static __rte_always_inline void +enqueue_one_aead_job_gen1(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + struct icp_qat_fw_la_auth_req_params *auth_param = + (void *)((uint8_t *)&req->serv_specif_rqpars + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); + uint8_t *aad_data; + uint8_t aad_ccm_real_len; + uint8_t aad_len_field_sz; + uint32_t msg_len_be; + rte_iova_t aad_iova = 0; + uint8_t q; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + rte_memcpy(cipher_param->u.cipher_IV_array, iv->va, + ctx->cipher_iv.length); + aad_iova = aad->iova; + break; + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + aad_data = aad->va; + aad_iova = aad->iova; + aad_ccm_real_len = 0; + aad_len_field_sz = 0; + msg_len_be = rte_bswap32((uint32_t)data_len - + ofs.ofs.cipher.head); + + if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { + aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; + aad_ccm_real_len = ctx->aad_len - + ICP_QAT_HW_CCM_AAD_B0_LEN - + ICP_QAT_HW_CCM_AAD_LEN_INFO; + } else { + aad_data = iv->va; + aad_iova = iv->iova; + } + + q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length; + aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS( + aad_len_field_sz, ctx->digest_length, q); + if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET + (q - + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), + (uint8_t *)&msg_len_be, + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); + } else { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)&msg_len_be + + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE + - q), q); + } + + if (aad_len_field_sz > 0) { + *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] = + rte_bswap16(aad_ccm_real_len); + + if ((aad_ccm_real_len + aad_len_field_sz) + % ICP_QAT_HW_CCM_AAD_B0_LEN) { + uint8_t pad_len = 0; + uint8_t pad_idx = 0; + + pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - + ((aad_ccm_real_len + + aad_len_field_sz) % + ICP_QAT_HW_CCM_AAD_B0_LEN); + pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + + aad_ccm_real_len + + aad_len_field_sz; + memset(&aad_data[pad_idx], 0, pad_len); + } + } + + rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)iv->va + + ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length); + *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = + q - ICP_QAT_HW_CCM_NONCE_OFFSET; + + rte_memcpy((uint8_t *)aad->va + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, + ctx->cipher_iv.length); + break; + default: + break; + } + + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; + auth_param->auth_off = ofs.ofs.cipher.head; + auth_param->auth_len = cipher_param->cipher_length; + auth_param->auth_res_addr = digest->iova; + auth_param->u1.aad_adr = aad_iova; +} extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1; extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1; +/* -----------------GEN 1 sym crypto op data path APIs ---------------- */ +int +qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie); + +int +qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie); + +int +qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie); + +int +qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie); + +/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */ +int +qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest __rte_unused, + struct rte_crypto_va_iova_ptr *aad __rte_unused, + void *user_data); + +uint32_t +qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status); + +int +qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv __rte_unused, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + void *user_data); + +uint32_t +qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status); + +int +qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + void *user_data); + +uint32_t +qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status); + +int +qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + void *user_data); + +uint32_t +qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status); + +void * +qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx, + int *dequeue_status, enum rte_crypto_op_status *op_status); + +uint32_t +qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx, + rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, + uint32_t max_nb_to_dequeue, + rte_cryptodev_raw_post_dequeue_t post_dequeue, + void **out_user_data, uint8_t is_user_data_array, + uint32_t *n_success_jobs, int *return_status); + +int +qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); + +int +qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); + +int +qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx); + /* -----------------GENx control path APIs ---------------- */ uint64_t qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev); +int +qat_sym_crypto_set_session_gen1(void *cryptodev, void *session); + void qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session, uint8_t hash_flag); @@ -23,6 +1091,9 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session, struct qat_capabilities_info qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev); +int +qat_asym_crypto_set_session_gen1(void *cryptodev, void *session); + uint64_t qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev); diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index e156f194e2..e1fd14956b 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -269,6 +269,901 @@ qat_sym_create_security_gen1(void *cryptodev) #endif +int +qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, NULL, NULL); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, NULL, NULL); +#endif + + return 0; +} + +int +qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr auth_iv; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl, + NULL, &auth_iv, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL, + &auth_iv, NULL, &digest); +#endif + + return 0; +} + +int +qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr aad; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &aad, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, &aad, &digest); +#endif + + return 0; +} + +int +qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0}; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr auth_iv; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &auth_iv, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num, + out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv, + ofs, total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + &auth_iv, &digest); +#endif + + return 0; +} + +int +qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest __rte_unused, + struct rte_crypto_va_iova_ptr *aad __rte_unused, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + struct qat_sym_op_cookie *cookie; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + + data_len = qat_sym_build_req_set_data(req, user_data, cookie, + data, n_data_vecs, NULL, 0); + if (unlikely(data_len < 0)) + return -1; + + enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv, + NULL, NULL, NULL); +#endif + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + + return 0; +} + +uint32_t +qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + struct qat_sym_op_cookie *cookie = + qp->op_cookies[tail >> tx_queue->trailz]; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_build_req_set_data(req, user_data[i], + cookie, vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + if (unlikely(data_len < 0)) + break; + enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs, + (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec, + vec->src_sgl[i].num, &vec->iv[i], + NULL, NULL, NULL); +#endif + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +int +qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv __rte_unused, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_build_req_set_data(req, user_data, cookie, + data, n_data_vecs, NULL, 0); + if (unlikely(data_len < 0)) + return -1; + + enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL, + auth_iv, NULL, digest); +#endif + return 0; +} + +uint32_t +qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + struct qat_sym_op_cookie *cookie = + qp->op_cookies[tail >> tx_queue->trailz]; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (unlikely(data_len < 0)) + break; + enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i], + &vec->auth_iv[i], ofs, (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, &vec->auth_iv[i], + NULL, &vec->digest[i]); +#endif + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +int +qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_build_req_set_data(req, user_data, cookie, + data, n_data_vecs, NULL, 0); + if (unlikely(data_len < 0)) + return -1; + + if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs, + NULL, 0, cipher_iv, digest, auth_iv, ofs, + (uint32_t)data_len))) + return -1; + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv, + auth_iv, NULL, digest); +#endif + return 0; +} + +uint32_t +qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + struct qat_sym_op_cookie *cookie = + qp->op_cookies[tail >> tx_queue->trailz]; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (unlikely(data_len < 0)) + break; + + if (unlikely(enqueue_one_chain_job_gen1(ctx, req, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + NULL, 0, + &vec->iv[i], &vec->digest[i], + &vec->auth_iv[i], ofs, (uint32_t)data_len))) + break; + + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec, + vec->src_sgl[i].num, &vec->iv[i], + &vec->auth_iv[i], + NULL, &vec->digest[i]); +#endif + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +int +qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_build_req_set_data(req, user_data, cookie, + data, n_data_vecs, NULL, 0); + if (unlikely(data_len < 0)) + return -1; + + enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv, + NULL, aad, digest); +#endif + return 0; +} + +uint32_t +qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + struct qat_sym_op_cookie *cookie = + qp->op_cookies[tail >> tx_queue->trailz]; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (unlikely(data_len < 0)) + break; + + enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i], + &vec->digest[i], &vec->aad[i], ofs, + (uint32_t)data_len); + + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec, + vec->src_sgl[i].num, &vec->iv[i], NULL, + &vec->aad[i], &vec->digest[i]); +#endif + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + + +uint32_t +qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx, + rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, + uint32_t max_nb_to_dequeue, + rte_cryptodev_raw_post_dequeue_t post_dequeue, + void **out_user_data, uint8_t is_user_data_array, + uint32_t *n_success_jobs, int *return_status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *rx_queue = &qp->rx_q; + struct icp_qat_fw_comn_resp *resp; + void *resp_opaque; + uint32_t i, n, inflight; + uint32_t head; + uint8_t status; + + *n_success_jobs = 0; + *return_status = 0; + head = dp_ctx->head; + + inflight = qp->enqueued - qp->dequeued; + if (unlikely(inflight == 0)) + return 0; + + resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr + + head); + /* no operation ready */ + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) + return 0; + + resp_opaque = (void *)(uintptr_t)resp->opaque_data; + /* get the dequeue count */ + if (get_dequeue_count) { + n = get_dequeue_count(resp_opaque); + if (unlikely(n == 0)) + return 0; + } else { + if (unlikely(max_nb_to_dequeue == 0)) + return 0; + n = max_nb_to_dequeue; + } + + out_user_data[0] = resp_opaque; + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); + post_dequeue(resp_opaque, 0, status); + *n_success_jobs += status; + + head = (head + rx_queue->msg_size) & rx_queue->modulo_mask; + + /* we already finished dequeue when n == 1 */ + if (unlikely(n == 1)) { + i = 1; + goto end_deq; + } + + if (is_user_data_array) { + for (i = 1; i < n; i++) { + resp = (struct icp_qat_fw_comn_resp *)( + (uint8_t *)rx_queue->base_addr + head); + if (unlikely(*(uint32_t *)resp == + ADF_RING_EMPTY_SIG)) + goto end_deq; + out_user_data[i] = (void *)(uintptr_t)resp->opaque_data; + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); + *n_success_jobs += status; + post_dequeue(out_user_data[i], i, status); + head = (head + rx_queue->msg_size) & + rx_queue->modulo_mask; + } + + goto end_deq; + } + + /* opaque is not array */ + for (i = 1; i < n; i++) { + resp = (struct icp_qat_fw_comn_resp *)( + (uint8_t *)rx_queue->base_addr + head); + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) + goto end_deq; + head = (head + rx_queue->msg_size) & + rx_queue->modulo_mask; + post_dequeue(resp_opaque, i, status); + *n_success_jobs += status; + } + +end_deq: + dp_ctx->head = head; + dp_ctx->cached_dequeue += i; + return i; +} + +void * +qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx, + int *dequeue_status, enum rte_crypto_op_status *op_status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *rx_queue = &qp->rx_q; + register struct icp_qat_fw_comn_resp *resp; + + resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr + + dp_ctx->head); + + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) + return NULL; + + dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) & + rx_queue->modulo_mask; + dp_ctx->cached_dequeue++; + + *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ? + RTE_CRYPTO_OP_STATUS_SUCCESS : + RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + *dequeue_status = 0; + return (void *)(uintptr_t)resp->opaque_data; +} + +int +qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n) +{ + struct qat_qp *qp = qp_data; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + + if (unlikely(dp_ctx->cached_enqueue != n)) + return -1; + + qp->enqueued += n; + qp->stats.enqueued_count += n; + + tx_queue->tail = dp_ctx->tail; + + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, + tx_queue->hw_bundle_number, + tx_queue->hw_queue_number, tx_queue->tail); + tx_queue->csr_tail = tx_queue->tail; + dp_ctx->cached_enqueue = 0; + + return 0; +} + +int +qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n) +{ + struct qat_qp *qp = qp_data; + struct qat_queue *rx_queue = &qp->rx_q; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + + if (unlikely(dp_ctx->cached_dequeue != n)) + return -1; + + rx_queue->head = dp_ctx->head; + rx_queue->nb_processed_responses += n; + qp->dequeued += n; + qp->stats.dequeued_count += n; + if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) { + uint32_t old_head, new_head; + uint32_t max_head; + + old_head = rx_queue->csr_head; + new_head = rx_queue->head; + max_head = qp->nb_descriptors * rx_queue->msg_size; + + /* write out free descriptors */ + void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head; + + if (new_head < old_head) { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, + max_head - old_head); + memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE, + new_head); + } else { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - + old_head); + } + rx_queue->nb_processed_responses = 0; + rx_queue->csr_head = new_head; + + /* write current head to CSR */ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, + rx_queue->hw_bundle_number, rx_queue->hw_queue_number, + new_head); + } + + dp_ctx->cached_dequeue = 0; + return 0; +} + +int +qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx) +{ + struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx; + struct qat_sym_session *ctx = _ctx; + + raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1; + raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1; + raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1; + raw_dp_ctx->dequeue_done = qat_sym_dp_denqueue_done_gen1; + + if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && + !ctx->is_gmac) { + /* AES-GCM or AES-CCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { + raw_dp_ctx->enqueue_burst = + qat_sym_dp_enqueue_aead_jobs_gen1; + raw_dp_ctx->enqueue = + qat_sym_dp_enqueue_single_aead_gen1; + } else { + raw_dp_ctx->enqueue_burst = + qat_sym_dp_enqueue_chain_jobs_gen1; + raw_dp_ctx->enqueue = + qat_sym_dp_enqueue_single_chain_gen1; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) { + raw_dp_ctx->enqueue_burst = + qat_sym_dp_enqueue_aead_jobs_gen1; + raw_dp_ctx->enqueue = + qat_sym_dp_enqueue_single_aead_gen1; + } else { + raw_dp_ctx->enqueue_burst = + qat_sym_dp_enqueue_cipher_jobs_gen1; + raw_dp_ctx->enqueue = + qat_sym_dp_enqueue_single_cipher_gen1; + } + } else + return -1; + + return 0; +} + +int +qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session) +{ + struct qat_sym_session *ctx = session; + qat_sym_build_request_t build_request = NULL; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int handle_mixed = 0; + + if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && + !ctx->is_gmac) { + /* AES-GCM or AES-CCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { + /* do_aead = 1; */ + build_request = qat_sym_build_op_aead_gen1; + } else { + /* do_auth = 1; do_cipher = 1; */ + build_request = qat_sym_build_op_chain_gen1; + handle_mixed = 1; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { + /* do_auth = 1; do_cipher = 0;*/ + build_request = qat_sym_build_op_auth_gen1; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + /* do_auth = 0; do_cipher = 1; */ + build_request = qat_sym_build_op_cipher_gen1; + } + + if (!build_request) + return 0; + ctx->build_request[proc_type] = build_request; + + if (!handle_mixed) + return 0; + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + return -ENOTSUP; + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + return -ENOTSUP; + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + return -ENOTSUP; + } + + return 0; +} + RTE_INIT(qat_sym_crypto_gen1_init) { qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1; diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h index 17b2c871bd..4801bd50a7 100644 --- a/drivers/crypto/qat/qat_sym.h +++ b/drivers/crypto/qat/qat_sym.h @@ -54,6 +54,14 @@ struct qat_sym_op_cookie { } opt; }; +struct qat_sym_dp_ctx { + struct qat_sym_session *session; + uint32_t tail; + uint32_t head; + uint16_t cached_enqueue; + uint16_t cached_dequeue; +}; + static __rte_always_inline int refactor_qat_sym_process_response(__rte_unused void **op, __rte_unused uint8_t *resp, __rte_unused void *op_cookie, diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c index 12825e448b..94589458d0 100644 --- a/drivers/crypto/qat/qat_sym_hw_dp.c +++ b/drivers/crypto/qat/qat_sym_hw_dp.c @@ -13,14 +13,6 @@ #include "qat_sym_session.h" #include "qat_qp.h" -struct qat_sym_dp_ctx { - struct qat_sym_session *session; - uint32_t tail; - uint32_t head; - uint16_t cached_enqueue; - uint16_t cached_dequeue; -}; - static __rte_always_inline int32_t qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_vec *data, uint16_t n_data_vecs) diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c new file mode 100644 index 0000000000..0412902e70 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_refactor.c @@ -0,0 +1,409 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2019 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include + +#include "qat_sym.h" +#include "qat_crypto.h" +#include "qat_qp.h" + +uint8_t qat_sym_driver_id; + +struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS]; + +/* An rte_driver is needed in the registration of both the device and the driver + * with cryptodev. + * The actual qat pci's rte_driver can't be used as its name represents + * the whole pci device with all services. Think of this as a holder for a name + * for the crypto part of the pci device. + */ +static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD); +static const struct rte_driver cryptodev_qat_sym_driver = { + .name = qat_sym_drv_name, + .alias = qat_sym_drv_name +}; + +void +qat_sym_init_op_cookie(void *op_cookie) +{ + struct qat_sym_op_cookie *cookie = op_cookie; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_dst); + + cookie->opt.spc_gmac.cd_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + opt.spc_gmac.cd_cipher); +} + +static __rte_always_inline int +qat_sym_build_request(void *in_op, uint8_t *out_msg, + void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen) +{ + struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; + void *sess = (void *)opaque[0]; + qat_sym_build_request_t build_request = (void *)opaque[1]; + struct qat_sym_session *ctx = NULL; + + if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) { + ctx = get_sym_session_private_data(op->sym->session, + qat_sym_driver_id); + if (unlikely(!ctx)) { + QAT_DP_LOG(ERR, "No session for this device"); + return -EINVAL; + } + if (sess != ctx) { + struct rte_cryptodev *cdev; + struct qat_cryptodev_private *internals; + enum rte_proc_type_t proc_type; + + cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id); + internals = cdev->data->dev_private; + proc_type = rte_eal_process_type(); + + if (internals->qat_dev->qat_dev_gen != dev_gen) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + + if (unlikely(ctx->build_request[proc_type] == NULL)) { + int ret = + qat_sym_gen_dev_ops[dev_gen].set_session( + (void *)cdev, sess); + if (ret < 0) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + } + + build_request = ctx->build_request[proc_type]; + opaque[0] = (uintptr_t)ctx; + opaque[1] = (uintptr_t)build_request; + } + } + +#ifdef RTE_LIB_SECURITY + else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + if (sess != (void *)op->sym->sec_session) { + struct rte_cryptodev *cdev; + struct qat_cryptodev_private *internals; + enum rte_proc_type_t proc_type; + + ctx = get_sec_session_private_data( + op->sym->sec_session); + if (unlikely(!ctx)) { + QAT_DP_LOG(ERR, "No session for this device"); + return -EINVAL; + } + if (unlikely(ctx->bpi_ctx == NULL)) { + QAT_DP_LOG(ERR, "QAT PMD only supports security" + " operation requests for" + " DOCSIS, op (%p) is not for" + " DOCSIS.", op); + return -EINVAL; + } else if (unlikely(((op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src)) || + op->sym->m_src->nb_segs > 1)) { + QAT_DP_LOG(ERR, "OOP and/or multi-segment" + " buffers not supported for" + " DOCSIS security."); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id); + internals = cdev->data->dev_private; + proc_type = rte_eal_process_type(); + + if (internals->qat_dev->qat_dev_gen != dev_gen) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + + if (unlikely(ctx->build_request[proc_type] == NULL)) { + int ret = + qat_sym_gen_dev_ops[dev_gen].set_session( + (void *)cdev, sess); + if (ret < 0) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + } + + sess = (void *)op->sym->sec_session; + build_request = ctx->build_request[proc_type]; + opaque[0] = (uintptr_t)sess; + opaque[1] = (uintptr_t)build_request; + } + } +#endif + else { /* RTE_CRYPTO_OP_SESSIONLESS */ + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + QAT_LOG(DEBUG, "QAT does not support sessionless operation"); + return -1; + } + + return build_request(op, (void *)ctx, out_msg, op_cookie); +} + +uint16_t +qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_enqueue_op_burst(qp, qat_sym_build_request, + (void **)ops, nb_ops); +} + +uint16_t +qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, + qat_sym_process_response, nb_ops); +} + +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, + struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) +{ + int i = 0, ret = 0; + struct qat_device_info *qat_dev_instance = + &qat_pci_devs[qat_pci_dev->qat_dev_id]; + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_dev_instance->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_cryptodev_private) + }; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + struct rte_cryptodev *cryptodev; + struct qat_cryptodev_private *internals; + struct qat_capabilities_info capa_info; + const struct rte_cryptodev_capabilities *capabilities; + const struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; + uint64_t capa_size; + + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "sym"); + QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); + + if (gen_dev_ops->cryptodev_ops == NULL) { + QAT_LOG(ERR, "Device %s does not support symmetric crypto", + name); + return -(EFAULT); + } + + /* + * All processes must use same driver id so they can share sessions. + * Store driver_id so we can validate that all processes have the same + * value, typically they have, but could differ if binaries built + * separately. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + qat_pci_dev->qat_sym_driver_id = + qat_sym_driver_id; + } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (qat_pci_dev->qat_sym_driver_id != + qat_sym_driver_id) { + QAT_LOG(ERR, + "Device %s have different driver id than corresponding device in primary process", + name); + return -(EFAULT); + } + } + + /* Populate subset device to use in cryptodev device creation */ + qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver; + qat_dev_instance->sym_rte_dev.numa_node = + qat_dev_instance->pci_dev->device.numa_node; + qat_dev_instance->sym_rte_dev.devargs = NULL; + + cryptodev = rte_cryptodev_pmd_create(name, + &(qat_dev_instance->sym_rte_dev), &init_params); + + if (cryptodev == NULL) + return -ENODEV; + + qat_dev_instance->sym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = qat_sym_driver_id; + cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; + + cryptodev->enqueue_burst = qat_sym_enqueue_burst; + cryptodev->dequeue_burst = qat_sym_dequeue_burst; + + cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + +#ifdef RTE_LIB_SECURITY + if (gen_dev_ops->create_security_ctx) { + cryptodev->security_ctx = + gen_dev_ops->create_security_ctx((void *)cryptodev); + if (cryptodev->security_ctx == NULL) { + QAT_LOG(ERR, "rte_security_ctx memory alloc failed"); + ret = -ENOMEM; + goto error; + } + + cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; + QAT_LOG(INFO, "Device %s rte_security support enabled", name); + } else { + QAT_LOG(INFO, "Device %s rte_security support disabled", name); + } +#endif + snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, + "QAT_SYM_CAPA_GEN_%d", + qat_pci_dev->qat_dev_gen); + + internals = cryptodev->data->dev_private; + internals->qat_dev = qat_pci_dev; + + internals->dev_id = cryptodev->data->dev_id; + + capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); + capabilities = capa_info.data; + capa_size = capa_info.size; + + internals->capa_mz = rte_memzone_lookup(capa_memz_name); + if (internals->capa_mz == NULL) { + internals->capa_mz = rte_memzone_reserve(capa_memz_name, + capa_size, rte_socket_id(), 0); + if (internals->capa_mz == NULL) { + QAT_LOG(DEBUG, + "Error allocating memzone for capabilities, " + "destroying PMD for %s", + name); + ret = -EFAULT; + goto error; + } + } + + memcpy(internals->capa_mz->addr, capabilities, capa_size); + internals->qat_dev_capabilities = internals->capa_mz->addr; + + while (1) { + if (qat_dev_cmd_param[i].name == NULL) + break; + if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME)) + internals->min_enq_burst_threshold = + qat_dev_cmd_param[i].val; + i++; + } + + internals->service_type = QAT_SERVICE_SYMMETRIC; + qat_pci_dev->sym_dev = internals; + QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d", + cryptodev->data->name, internals->dev_id); + + return 0; + +error: +#ifdef RTE_LIB_SECURITY + rte_free(cryptodev->security_ctx); + cryptodev->security_ctx = NULL; +#endif + rte_cryptodev_pmd_destroy(cryptodev); + memset(&qat_dev_instance->sym_rte_dev, 0, + sizeof(qat_dev_instance->sym_rte_dev)); + + return ret; +} + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev *cryptodev; + + if (qat_pci_dev == NULL) + return -ENODEV; + if (qat_pci_dev->sym_dev == NULL) + return 0; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_memzone_free(qat_pci_dev->sym_dev->capa_mz); + + /* free crypto device */ + cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id); +#ifdef RTE_LIB_SECURITY + rte_free(cryptodev->security_ctx); + cryptodev->security_ctx = NULL; +#endif + rte_cryptodev_pmd_destroy(cryptodev); + qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL; + qat_pci_dev->sym_dev = NULL; + + return 0; +} + +int +qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *raw_dp_ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update) +{ + struct qat_cryptodev_private *internals = dev->data->dev_private; + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; + struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_sym_gen_dev_ops[qat_dev_gen]; + struct qat_qp *qp; + struct qat_sym_session *ctx; + struct qat_sym_dp_ctx *dp_ctx; + + if (!gen_dev_ops->set_raw_dp_ctx) { + QAT_LOG(ERR, "Device GEN %u does not support raw data path", + qat_dev_gen); + return -ENOTSUP; + } + + qp = dev->data->queue_pairs[qp_id]; + dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data; + + if (!is_update) { + memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) + + sizeof(struct qat_sym_dp_ctx)); + raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id]; + dp_ctx->tail = qp->tx_q.tail; + dp_ctx->head = qp->rx_q.head; + dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0; + } + + if (sess_type != RTE_CRYPTO_OP_WITH_SESSION) + return -EINVAL; + + ctx = (struct qat_sym_session *)get_sym_session_private_data( + session_ctx.crypto_sess, qat_sym_driver_id); + + dp_ctx->session = ctx; + + return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx); +} + +int +qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct qat_sym_dp_ctx); +} + +static struct cryptodev_driver qat_crypto_drv; +RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, + cryptodev_qat_sym_driver, + qat_sym_driver_id); diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h new file mode 100644 index 0000000000..d4bfe8f364 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_refactor.h @@ -0,0 +1,402 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_SYM_H_ +#define _QAT_SYM_H_ + +#include +#ifdef RTE_LIB_SECURITY +#include +#endif + +#include + +#include "qat_common.h" +#include "qat_sym_session.h" +#include "qat_crypto.h" +#include "qat_logs.h" + +#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat + +#define BYTE_LENGTH 8 +/* bpi is only used for partial blocks of DES and AES + * so AES block len can be assumed as max len for iv, src and dst + */ +#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ + +/* Internal capabilities */ +#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0) +#define QAT_SYM_CAP_VALID (1 << 31) + +/* Macro to add a capability */ +#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_##n, \ + b, d \ + }, } \ + }, } \ + } + +#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_##n, \ + b, k, d, a, i \ + }, } \ + }, } \ + } + +#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_##n, \ + b, k, d, a, i \ + }, } \ + }, } \ + } + +#define QAT_SYM_CIPHER_CAP(n, b, k, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_##n, \ + b, k, i \ + }, } \ + }, } \ + } + +/* + * Maximum number of SGL entries + */ +#define QAT_SYM_SGL_MAX_NUMBER 16 + +/* Maximum data length for single pass GMAC: 2^14-1 */ +#define QAT_AES_GMAC_SPC_MAX_SIZE 16383 + +struct qat_sym_session; + +struct qat_sym_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER]; +} __rte_packed __rte_cache_aligned; + +struct qat_sym_op_cookie { + struct qat_sym_sgl qat_sgl_src; + struct qat_sym_sgl qat_sgl_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; + union { + /* Used for Single-Pass AES-GMAC only */ + struct { + struct icp_qat_hw_cipher_algo_blk cd_cipher + __rte_packed __rte_cache_aligned; + phys_addr_t cd_phys_addr; + } spc_gmac; + } opt; +}; + +struct qat_sym_dp_ctx { + struct qat_sym_session *session; + uint32_t tail; + uint32_t head; + uint16_t cached_enqueue; + uint16_t cached_dequeue; +}; + +uint16_t +qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops); + +uint16_t +qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops); + +/** Encrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; + uint8_t *encr = encrypted_iv; + + /* ECB method: encrypt the IV, then XOR this with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_encrypt_err; + + for (; srclen != 0; --srclen, ++dst, ++src, ++encr) + *dst = *src ^ *encr; + + return 0; + +cipher_encrypt_err: + QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed"); + return -EINVAL; +} + +static inline uint32_t +qat_bpicipher_postprocess(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len > 0 && + ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + + /* Encrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset; + + last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = dst - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, + "BPI: dst before post-process:", + dst, last_block_len); +#endif + bpi_cipher_encrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, + "BPI: dst after post-process:", + dst, last_block_len); +#endif + } + return sym_op->cipher.data.length - last_block_len; +} + +#ifdef RTE_LIB_SECURITY +static inline void +qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op) +{ + struct rte_crypto_sym_op *sym_op = op->sym; + uint32_t crc_data_ofs, crc_data_len, crc; + uint8_t *crc_data; + + if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT && + sym_op->auth.data.length != 0) { + + crc_data_ofs = sym_op->auth.data.offset; + crc_data_len = sym_op->auth.data.length; + crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, + crc_data_ofs); + + crc = rte_net_crc_calc(crc_data, crc_data_len, + RTE_NET_CRC32_ETH); + + if (crc != *(uint32_t *)(crc_data + crc_data_len)) + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } +} + +static inline void +qat_crc_generate(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + struct rte_crypto_sym_op *sym_op = op->sym; + uint32_t *crc, crc_data_len; + uint8_t *crc_data; + + if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT && + sym_op->auth.data.length != 0 && + sym_op->m_src->nb_segs == 1) { + + crc_data_len = sym_op->auth.data.length; + crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, + sym_op->auth.data.offset); + crc = (uint32_t *)(crc_data + crc_data_len); + *crc = rte_net_crc_calc(crc_data, crc_data_len, + RTE_NET_CRC32_ETH); + } +} + +static inline void +qat_sym_preprocess_requests(void **ops, uint16_t nb_ops) +{ + struct rte_crypto_op *op; + struct qat_sym_session *ctx; + uint16_t i; + + for (i = 0; i < nb_ops; i++) { + op = (struct rte_crypto_op *)ops[i]; + + if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + ctx = (struct qat_sym_session *) + get_sec_session_private_data( + op->sym->sec_session); + + if (ctx == NULL || ctx->bpi_ctx == NULL) + continue; + + qat_crc_generate(ctx, op); + } + } +} +#endif + +static __rte_always_inline int +qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie, + uint64_t *dequeue_err_count __rte_unused) +{ + struct icp_qat_fw_comn_resp *resp_msg = + (struct icp_qat_fw_comn_resp *)resp; + struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) + (resp_msg->opaque_data); + struct qat_sym_session *sess; + uint8_t is_docsis_sec; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, + sizeof(struct icp_qat_fw_comn_resp)); +#endif + +#ifdef RTE_LIB_SECURITY + if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + /* + * Assuming at this point that if it's a security + * op, that this is for DOCSIS + */ + sess = (struct qat_sym_session *) + get_sec_session_private_data( + rx_op->sym->sec_session); + is_docsis_sec = 1; + } else +#endif + { + sess = (struct qat_sym_session *) + get_sym_session_private_data( + rx_op->sym->session, + qat_sym_driver_id); + is_docsis_sec = 0; + } + + if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != + ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( + resp_msg->comn_hdr.comn_status)) { + + rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } else { + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + if (sess->bpi_ctx) { + qat_bpicipher_postprocess(sess, rx_op); +#ifdef RTE_LIB_SECURITY + if (is_docsis_sec) + qat_crc_verify(sess, rx_op); +#endif + } + } + + if (sess->is_single_pass_gmac) { + struct qat_sym_op_cookie *cookie = + (struct qat_sym_op_cookie *) op_cookie; + memset(cookie->opt.spc_gmac.cd_cipher.key, 0, + sess->auth_key_length); + } + + *op = (void *)rx_op; + + return 1; +} + +int +qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *raw_dp_ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update); + +int +qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev); + +void +qat_sym_init_op_cookie(void *cookie); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG +static __rte_always_inline void +qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req, + struct qat_sym_session *ctx, + struct rte_crypto_vec *vec, uint32_t vec_len, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *auth_iv, + struct rte_crypto_va_iova_ptr *aad, + struct rte_crypto_va_iova_ptr *digest) +{ + uint32_t i; + + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_la_bulk_req)); + for (i = 0; i < vec_len; i++) + QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len); + if (cipher_iv && ctx->cipher_iv.length > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va, + ctx->cipher_iv.length); + if (auth_iv && ctx->auth_iv.length > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va, + ctx->auth_iv.length); + if (aad && ctx->aad_len > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va, + ctx->aad_len); + if (digest && ctx->digest_length > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va, + ctx->digest_length); +} +#else +static __rte_always_inline void +qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused, + struct qat_sym_session *ctx __rte_unused, + struct rte_crypto_vec *vec __rte_unused, + uint32_t vec_len __rte_unused, + struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused, + struct rte_crypto_va_iova_ptr *auth_iv __rte_unused, + struct rte_crypto_va_iova_ptr *aad __rte_unused, + struct rte_crypto_va_iova_ptr *digest __rte_unused) +{} +#endif + +#endif /* _QAT_SYM_H_ */ From patchwork Tue Oct 26 17:25:14 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ji, Kai" X-Patchwork-Id: 102987 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7D327A0547; Tue, 26 Oct 2021 19:25:59 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 54FAD41103; Tue, 26 Oct 2021 19:25:54 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 98D12410E7 for ; Tue, 26 Oct 2021 19:25:51 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="228723404" X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="228723404" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2021 10:25:26 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="494306050" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga007.fm.intel.com with ESMTP; 26 Oct 2021 10:25:24 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Date: Tue, 26 Oct 2021 18:25:14 +0100 Message-Id: <20211026172518.20183-4-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym op refactor X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add-in refactored qat asymmetric build request function implementation (qat_asym_refactor.c & qat_asym_refactor.h) Signed-off-by: Kai Ji Acked-by: Fan Zhang --- drivers/crypto/qat/dev/qat_asym_pmd_gen1.c | 7 + drivers/crypto/qat/qat_asym_refactor.c | 994 +++++++++++++++++++++ drivers/crypto/qat/qat_asym_refactor.h | 125 +++ 3 files changed, 1126 insertions(+) create mode 100644 drivers/crypto/qat/qat_asym_refactor.c create mode 100644 drivers/crypto/qat/qat_asym_refactor.h diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c index 9ed1f21d9d..99d90fa56c 100644 --- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c @@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1( return feature_flags; } +int +qat_asym_crypto_set_session_gen1(void *cdev __rte_unused, + void *session __rte_unused) +{ + return 0; +} + RTE_INIT(qat_asym_crypto_gen1_init) { qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops = diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c new file mode 100644 index 0000000000..8e789920cb --- /dev/null +++ b/drivers/crypto/qat/qat_asym_refactor.c @@ -0,0 +1,994 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 - 2021 Intel Corporation + */ + + +#include + +#include + +#include "icp_qat_fw_pke.h" +#include "icp_qat_fw.h" +#include "qat_pke_functionality_arrays.h" + +#include "qat_device.h" + +#include "qat_logs.h" +#include "qat_asym.h" + +uint8_t qat_asym_driver_id; + +struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS]; + +void +qat_asym_init_op_cookie(void *op_cookie) +{ + int j; + struct qat_asym_op_cookie *cookie = op_cookie; + + cookie->input_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + input_params_ptrs); + + cookie->output_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + output_params_ptrs); + + for (j = 0; j < 8; j++) { + cookie->input_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + input_array[j]); + cookie->output_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + output_array[j]); + } +} + +int +qat_asym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *mempool) +{ + int err = 0; + void *sess_private_data; + struct qat_asym_session *session; + + if (rte_mempool_get(mempool, &sess_private_data)) { + QAT_LOG(ERR, + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + session = sess_private_data; + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + if (xform->modex.exponent.length == 0 || + xform->modex.modulus.length == 0) { + QAT_LOG(ERR, "Invalid mod exp input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + if (xform->modinv.modulus.length == 0) { + QAT_LOG(ERR, "Invalid mod inv input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + if (xform->rsa.n.length == 0) { + QAT_LOG(ERR, "Invalid rsa input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END + || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) { + QAT_LOG(ERR, "Invalid asymmetric crypto xform"); + err = -EINVAL; + goto error; + } else { + QAT_LOG(ERR, "Asymmetric crypto xform not implemented"); + err = -EINVAL; + goto error; + } + + session->xform = xform; + qat_asym_build_req_tmpl(sess_private_data); + set_asym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +error: + rte_mempool_put(mempool, sess_private_data); + return err; +} + +unsigned int +qat_asym_session_get_private_size( + struct rte_cryptodev *dev __rte_unused) +{ + return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8); +} + +void +qat_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_asym_session_private_data(sess, index); + struct qat_asym_session *s = (struct qat_asym_session *)sess_priv; + + if (sess_priv) { + memset(s, 0, qat_asym_session_get_private_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + + set_asym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +/* An rte_driver is needed in the registration of both the device and the driver + * with cryptodev. + * The actual qat pci's rte_driver can't be used as its name represents + * the whole pci device with all services. Think of this as a holder for a name + * for the crypto part of the pci device. + */ +static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD); +static const struct rte_driver cryptodev_qat_asym_driver = { + .name = qat_asym_drv_name, + .alias = qat_asym_drv_name +}; + + +static void +qat_clear_arrays(struct qat_asym_op_cookie *cookie, + int in_count, int out_count, int in_size, int out_size) +{ + int i; + + for (i = 0; i < in_count; i++) + memset(cookie->input_array[i], 0x0, in_size); + for (i = 0; i < out_count; i++) + memset(cookie->output_array[i], 0x0, out_size); +} + +static void +qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie, + enum rte_crypto_asym_xform_type alg, int in_size, int out_size) +{ + if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX) + qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS, + QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size, + out_size); + else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV) + qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS, + QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size, + out_size); +} + +static void +qat_asym_collect_response(struct rte_crypto_op *rx_op, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + size_t alg_size, alg_size_in_bytes = 0; + struct rte_crypto_asym_op *asym_op = rx_op->asym; + + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + rte_crypto_param n = xform->modex.modulus; + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + uint8_t *modexp_result = asym_op->modex.result.data; + + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + rte_memcpy(modexp_result + + (asym_op->modex.result.length - + n.length), + cookie->output_array[0] + alg_size_in_bytes + - n.length, n.length + ); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result", + cookie->output_array[0], + alg_size_in_bytes); + +#endif + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + rte_crypto_param n = xform->modinv.modulus; + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + uint8_t *modinv_result = asym_op->modinv.result.data; + + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + rte_memcpy(modinv_result + + (asym_op->modinv.result.length - n.length), + cookie->output_array[0] + alg_size_in_bytes + - n.length, n.length); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || + asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_ENCRYPT) { + uint8_t *rsa_result = asym_op->rsa.cipher.data; + + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + uint8_t *rsa_result = asym_op->rsa.cipher.data; + + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = + RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", + cookie->output_array[0], + alg_size_in_bytes); +#endif + break; + default: + QAT_LOG(ERR, "Padding not supported"); + rx_op->status = + RTE_CRYPTO_OP_STATUS_ERROR; + break; + } + } + } else { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_DECRYPT) { + uint8_t *rsa_result = asym_op->rsa.message.data; + + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + break; + default: + QAT_LOG(ERR, "Padding not supported"); + rx_op->status = + RTE_CRYPTO_OP_STATUS_ERROR; + break; + } +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message", + rsa_result, alg_size_in_bytes); +#endif + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_SIGN) { + uint8_t *rsa_result = asym_op->rsa.sign.data; + + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } + } + } + qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes, + alg_size_in_bytes); +} + +int +qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, + void *op_cookie, __rte_unused uint64_t *dequeue_err_count) +{ + struct qat_asym_session *ctx; + struct icp_qat_fw_pke_resp *resp_msg = + (struct icp_qat_fw_pke_resp *)resp; + struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) + (resp_msg->opaque); + struct qat_asym_op_cookie *cookie = op_cookie; + + if (cookie->error) { + cookie->error = 0; + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Cookie status returned error"); + } else { + if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( + resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) { + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Asymmetric response status" + " returned error"); + } + if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) { + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Asymmetric common status" + " returned error"); + } + } + + if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + ctx = (struct qat_asym_session *)get_asym_session_private_data( + rx_op->asym->session, qat_asym_driver_id); + qat_asym_collect_response(rx_op, cookie, ctx->xform); + } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform); + } + *op = rx_op; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg, + sizeof(struct icp_qat_fw_pke_resp)); +#endif + + return 1; +} + +#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg)) + +static int +qat_asym_get_sz_and_func_id(const uint32_t arr[][2], + size_t arr_sz, size_t *size, uint32_t *func_id) +{ + size_t i; + + for (i = 0; i < arr_sz; i++) { + if (*size <= arr[i][0]) { + *size = arr[i][0]; + *func_id = arr[i][1]; + return 0; + } + } + return -1; +} + +static size_t +max_of(int n, ...) +{ + va_list args; + size_t len = 0, num; + int i; + + va_start(args, n); + len = va_arg(args, size_t); + + for (i = 0; i < n - 1; i++) { + num = va_arg(args, size_t); + if (num > len) + len = num; + } + va_end(args); + + return len; +} + +static int +qat_asym_check_nonzero(rte_crypto_param n) +{ + if (n.length < 8) { + /* Not a case for any cryptograpic function except for DH + * generator which very often can be of one byte length + */ + size_t i; + + if (n.data[n.length - 1] == 0x0) { + for (i = 0; i < n.length - 1; i++) + if (n.data[i] != 0x0) + break; + if (i == n.length - 1) + return -(EINVAL); + } + } else if (*(uint64_t *)&n.data[ + n.length - 8] == 0) { + /* Very likely it is zeroed modulus */ + size_t i; + + for (i = 0; i < n.length - 8; i++) + if (n.data[i] != 0x0) + break; + if (i == n.length - 8) + return -(EINVAL); + } + + return 0; +} + +static int +qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op, + struct icp_qat_fw_pke_request *qat_req, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + int err = 0; + size_t alg_size; + size_t alg_size_in_bytes; + uint32_t func_id = 0; + + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + err = qat_asym_check_nonzero(xform->modex.modulus); + if (err) { + QAT_LOG(ERR, "Empty modulus in modular exponentiation," + " aborting this operation"); + return err; + } + + alg_size_in_bytes = max_of(3, asym_op->modex.base.length, + xform->modex.exponent.length, + xform->modex.modulus.length); + alg_size = alg_size_in_bytes << 3; + + if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE, + sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE), + &alg_size, &func_id)) { + return -(EINVAL); + } + + alg_size_in_bytes = alg_size >> 3; + rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - + asym_op->modex.base.length + , asym_op->modex.base.data, + asym_op->modex.base.length); + rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - + xform->modex.exponent.length + , xform->modex.exponent.data, + xform->modex.exponent.length); + rte_memcpy(cookie->input_array[2] + alg_size_in_bytes - + xform->modex.modulus.length, + xform->modex.modulus.data, + xform->modex.modulus.length); + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS; + qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus", + cookie->input_array[2], + alg_size_in_bytes); +#endif + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + err = qat_asym_check_nonzero(xform->modinv.modulus); + if (err) { + QAT_LOG(ERR, "Empty modulus in modular multiplicative" + " inverse, aborting this operation"); + return err; + } + + alg_size_in_bytes = max_of(2, asym_op->modinv.base.length, + xform->modinv.modulus.length); + alg_size = alg_size_in_bytes << 3; + + if (xform->modinv.modulus.data[ + xform->modinv.modulus.length - 1] & 0x01) { + if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD, + sizeof(MOD_INV_IDS_ODD)/ + sizeof(*MOD_INV_IDS_ODD), + &alg_size, &func_id)) { + return -(EINVAL); + } + } else { + if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN, + sizeof(MOD_INV_IDS_EVEN)/ + sizeof(*MOD_INV_IDS_EVEN), + &alg_size, &func_id)) { + return -(EINVAL); + } + } + + alg_size_in_bytes = alg_size >> 3; + rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - + asym_op->modinv.base.length + , asym_op->modinv.base.data, + asym_op->modinv.base.length); + rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - + xform->modinv.modulus.length + , xform->modinv.modulus.data, + xform->modinv.modulus.length); + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + qat_req->input_param_count = + QAT_ASYM_MODINV_NUM_IN_PARAMS; + qat_req->output_param_count = + QAT_ASYM_MODINV_NUM_OUT_PARAMS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus", + cookie->input_array[1], + alg_size_in_bytes); +#endif + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + err = qat_asym_check_nonzero(xform->rsa.n); + if (err) { + QAT_LOG(ERR, "Empty modulus in RSA" + " inverse, aborting this operation"); + return err; + } + + alg_size_in_bytes = xform->rsa.n.length; + alg_size = alg_size_in_bytes << 3; + + qat_req->input_param_count = + QAT_ASYM_RSA_NUM_IN_PARAMS; + qat_req->output_param_count = + QAT_ASYM_RSA_NUM_OUT_PARAMS; + + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || + asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + + if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS, + sizeof(RSA_ENC_IDS)/ + sizeof(*RSA_ENC_IDS), + &alg_size, &func_id)) { + err = -(EINVAL); + QAT_LOG(ERR, + "Not supported RSA parameter size (key)"); + return err; + } + alg_size_in_bytes = alg_size >> 3; + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_ENCRYPT) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0] + + alg_size_in_bytes - + asym_op->rsa.message.length + , asym_op->rsa.message.data, + asym_op->rsa.message.length); + break; + default: + err = -(EINVAL); + QAT_LOG(ERR, + "Invalid RSA padding (Encryption)"); + return err; + } +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message", + cookie->input_array[0], + alg_size_in_bytes); +#endif + } else { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0], + asym_op->rsa.sign.data, + alg_size_in_bytes); + break; + default: + err = -(EINVAL); + QAT_LOG(ERR, + "Invalid RSA padding (Verify)"); + return err; + } + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature", + cookie->input_array[0], + alg_size_in_bytes); +#endif + + } + rte_memcpy(cookie->input_array[1] + + alg_size_in_bytes - + xform->rsa.e.length + , xform->rsa.e.data, + xform->rsa.e.length); + rte_memcpy(cookie->input_array[2] + + alg_size_in_bytes - + xform->rsa.n.length, + xform->rsa.n.data, + xform->rsa.n.length); + + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus", + cookie->input_array[2], + alg_size_in_bytes); +#endif + } else { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_DECRYPT) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0] + + alg_size_in_bytes - + asym_op->rsa.cipher.length, + asym_op->rsa.cipher.data, + asym_op->rsa.cipher.length); + break; + default: + QAT_LOG(ERR, + "Invalid padding of RSA (Decrypt)"); + return -(EINVAL); + } + + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_SIGN) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0] + + alg_size_in_bytes - + asym_op->rsa.message.length, + asym_op->rsa.message.data, + asym_op->rsa.message.length); + break; + default: + QAT_LOG(ERR, + "Invalid padding of RSA (Signature)"); + return -(EINVAL); + } + } + if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) { + + qat_req->input_param_count = + QAT_ASYM_RSA_QT_NUM_IN_PARAMS; + if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS, + sizeof(RSA_DEC_CRT_IDS)/ + sizeof(*RSA_DEC_CRT_IDS), + &alg_size, &func_id)) { + return -(EINVAL); + } + alg_size_in_bytes = alg_size >> 3; + + rte_memcpy(cookie->input_array[1] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.p.length + , xform->rsa.qt.p.data, + xform->rsa.qt.p.length); + rte_memcpy(cookie->input_array[2] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.q.length + , xform->rsa.qt.q.data, + xform->rsa.qt.q.length); + rte_memcpy(cookie->input_array[3] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.dP.length + , xform->rsa.qt.dP.data, + xform->rsa.qt.dP.length); + rte_memcpy(cookie->input_array[4] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.dQ.length + , xform->rsa.qt.dQ.data, + xform->rsa.qt.dQ.length); + rte_memcpy(cookie->input_array[5] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.qInv.length + , xform->rsa.qt.qInv.data, + xform->rsa.qt.qInv.length); + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "C", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "p", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "q", + cookie->input_array[2], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, + "dP", cookie->input_array[3], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, + "dQ", cookie->input_array[4], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, + "qInv", cookie->input_array[5], + alg_size_in_bytes); +#endif + } else if (xform->rsa.key_type == + RTE_RSA_KEY_TYPE_EXP) { + if (qat_asym_get_sz_and_func_id( + RSA_DEC_IDS, + sizeof(RSA_DEC_IDS)/ + sizeof(*RSA_DEC_IDS), + &alg_size, &func_id)) { + return -(EINVAL); + } + alg_size_in_bytes = alg_size >> 3; + rte_memcpy(cookie->input_array[1] + + alg_size_in_bytes - + xform->rsa.d.length, + xform->rsa.d.data, + xform->rsa.d.length); + rte_memcpy(cookie->input_array[2] + + alg_size_in_bytes - + xform->rsa.n.length, + xform->rsa.n.data, + xform->rsa.n.length); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n", + cookie->input_array[2], + alg_size_in_bytes); +#endif + + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + } else { + QAT_LOG(ERR, "Invalid RSA key type"); + return -(EINVAL); + } + } + } else { + QAT_LOG(ERR, "Invalid asymmetric crypto xform"); + return -(EINVAL); + } + return 0; +} + +static __rte_always_inline int +qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, + __rte_unused uint64_t *opaque, + __rte_unused enum qat_device_gen dev_gen) +{ + struct qat_asym_session *ctx; + struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; + struct rte_crypto_asym_op *asym_op = op->asym; + struct icp_qat_fw_pke_request *qat_req = + (struct icp_qat_fw_pke_request *)out_msg; + struct qat_asym_op_cookie *cookie = + (struct qat_asym_op_cookie *)op_cookie; + int err = 0; + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + ctx = (struct qat_asym_session *) + get_asym_session_private_data( + op->asym->session, qat_asym_driver_id); + if (unlikely(ctx == NULL)) { + QAT_LOG(ERR, "Session has not been created for this device"); + goto error; + } + rte_mov64((uint8_t *)qat_req, + (const uint8_t *)&(ctx->req_tmpl)); + err = qat_asym_fill_arrays(asym_op, qat_req, + cookie, ctx->xform); + if (err) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + goto error; + } + } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + qat_fill_req_tmpl(qat_req); + err = qat_asym_fill_arrays(asym_op, qat_req, cookie, + op->asym->xform); + if (err) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + goto error; + } + } else { + QAT_DP_LOG(ERR, "Invalid session/xform settings"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + goto error; + } + + qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; + qat_req->pke_mid.src_data_addr = cookie->input_addr; + qat_req->pke_mid.dest_data_addr = cookie->output_addr; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_pke_request)); +#endif + + return 0; +error: + + qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_pke_request)); +#endif + + qat_req->output_param_count = 0; + qat_req->input_param_count = 0; + qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; + cookie->error |= err; + + return 0; +} + +static uint16_t +qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops, + nb_ops); +} + +static uint16_t +qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response, + nb_ops); +} + +int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, + struct qat_dev_cmd_param *qat_dev_cmd_param) +{ + struct qat_cryptodev_private *internals; + struct rte_cryptodev *cryptodev; + struct qat_device_info *qat_dev_instance = + &qat_pci_devs[qat_pci_dev->qat_dev_id]; + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_dev_instance->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_cryptodev_private) + }; + struct qat_capabilities_info capa_info; + const struct rte_cryptodev_capabilities *capabilities; + const struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + uint64_t capa_size; + int i = 0; + + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "asym"); + QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); + + if (gen_dev_ops->cryptodev_ops == NULL) { + QAT_LOG(ERR, "Device %s does not support asymmetric crypto", + name); + return -(EFAULT); + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + qat_pci_dev->qat_asym_driver_id = + qat_asym_driver_id; + } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (qat_pci_dev->qat_asym_driver_id != + qat_asym_driver_id) { + QAT_LOG(ERR, + "Device %s have different driver id than corresponding device in primary process", + name); + return -(EFAULT); + } + } + + /* Populate subset device to use in cryptodev device creation */ + qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver; + qat_dev_instance->asym_rte_dev.numa_node = + qat_dev_instance->pci_dev->device.numa_node; + qat_dev_instance->asym_rte_dev.devargs = NULL; + + cryptodev = rte_cryptodev_pmd_create(name, + &(qat_dev_instance->asym_rte_dev), &init_params); + + if (cryptodev == NULL) + return -ENODEV; + + qat_dev_instance->asym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = qat_asym_driver_id; + cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; + + cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst; + cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst; + + cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, + "QAT_ASYM_CAPA_GEN_%d", + qat_pci_dev->qat_dev_gen); + + internals = cryptodev->data->dev_private; + internals->qat_dev = qat_pci_dev; + internals->dev_id = cryptodev->data->dev_id; + + capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); + capabilities = capa_info.data; + capa_size = capa_info.size; + + internals->capa_mz = rte_memzone_lookup(capa_memz_name); + if (internals->capa_mz == NULL) { + internals->capa_mz = rte_memzone_reserve(capa_memz_name, + capa_size, rte_socket_id(), 0); + if (internals->capa_mz == NULL) { + QAT_LOG(DEBUG, + "Error allocating memzone for capabilities, " + "destroying PMD for %s", + name); + rte_cryptodev_pmd_destroy(cryptodev); + memset(&qat_dev_instance->asym_rte_dev, 0, + sizeof(qat_dev_instance->asym_rte_dev)); + return -EFAULT; + } + } + + memcpy(internals->capa_mz->addr, capabilities, capa_size); + internals->qat_dev_capabilities = internals->capa_mz->addr; + + while (1) { + if (qat_dev_cmd_param[i].name == NULL) + break; + if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME)) + internals->min_enq_burst_threshold = + qat_dev_cmd_param[i].val; + i++; + } + + qat_pci_dev->asym_dev = internals; + internals->service_type = QAT_SERVICE_ASYMMETRIC; + QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d", + cryptodev->data->name, internals->dev_id); + return 0; +} + +int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev *cryptodev; + + if (qat_pci_dev == NULL) + return -ENODEV; + if (qat_pci_dev->asym_dev == NULL) + return 0; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_memzone_free(qat_pci_dev->asym_dev->capa_mz); + + /* free crypto device */ + cryptodev = rte_cryptodev_pmd_get_dev( + qat_pci_dev->asym_dev->dev_id); + rte_cryptodev_pmd_destroy(cryptodev); + qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL; + qat_pci_dev->asym_dev = NULL; + + return 0; +} + +static struct cryptodev_driver qat_crypto_drv; +RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, + cryptodev_qat_asym_driver, + qat_asym_driver_id); diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h new file mode 100644 index 0000000000..9ecabdbe8f --- /dev/null +++ b/drivers/crypto/qat/qat_asym_refactor.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _QAT_ASYM_H_ +#define _QAT_ASYM_H_ + +#include +#include +#include "icp_qat_fw_pke.h" +#include "qat_device.h" +#include "qat_crypto.h" +#include "icp_qat_fw.h" + +/** Intel(R) QAT Asymmetric Crypto PMD driver name */ +#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym + +typedef uint64_t large_int_ptr; +#define MAX_PKE_PARAMS 8 +#define QAT_PKE_MAX_LN_SIZE 512 +#define _PKE_ALIGN_ __rte_aligned(8) + +#define QAT_ASYM_MAX_PARAMS 8 +#define QAT_ASYM_MODINV_NUM_IN_PARAMS 2 +#define QAT_ASYM_MODINV_NUM_OUT_PARAMS 1 +#define QAT_ASYM_MODEXP_NUM_IN_PARAMS 3 +#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS 1 +#define QAT_ASYM_RSA_NUM_IN_PARAMS 3 +#define QAT_ASYM_RSA_NUM_OUT_PARAMS 1 +#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6 + +/** + * helper function to add an asym capability + * + **/ +#define QAT_ASYM_CAP(n, o, l, r, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \ + {.asym = { \ + .xform_capa = { \ + .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\ + .op_types = o, \ + { \ + .modlen = { \ + .min = l, \ + .max = r, \ + .increment = i \ + }, } \ + } \ + }, \ + } \ + } + +struct qat_asym_op_cookie { + size_t alg_size; + uint64_t error; + rte_iova_t input_addr; + rte_iova_t output_addr; + large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_; + large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_; + union { + uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE]; + uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE]; + } _PKE_ALIGN_; + uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_; +} _PKE_ALIGN_; + +struct qat_asym_session { + struct icp_qat_fw_pke_request req_tmpl; + struct rte_crypto_asym_xform *xform; +}; + +static inline void +qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req) +{ + memset(qat_req, 0, sizeof(*qat_req)); + qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + + qat_req->pke_hdr.hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD + (ICP_QAT_FW_COMN_REQ_FLAG_SET); +} + +static inline void +qat_asym_build_req_tmpl(void *sess_private_data) +{ + struct icp_qat_fw_pke_request *qat_req; + struct qat_asym_session *session = sess_private_data; + + qat_req = &session->req_tmpl; + qat_fill_req_tmpl(qat_req); +} + +int +qat_asym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *mempool); + +unsigned int +qat_asym_session_get_private_size(struct rte_cryptodev *dev); + +void +qat_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess); + +/* + * Process PKE response received from outgoing queue of QAT + * + * @param op a ptr to the rte_crypto_op referred to by + * the response message is returned in this param + * @param resp icp_qat_fw_pke_resp message received from + * outgoing fw message queue + * @param op_cookie Cookie pointer that holds private metadata + * @param dequeue_err_count Error count number pointer + * + */ +int +qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, + void *op_cookie, __rte_unused uint64_t *dequeue_err_count); + +void +qat_asym_init_op_cookie(void *cookie); + +#endif /* _QAT_ASYM_H_ */ From patchwork Tue Oct 26 17:25:15 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ji, Kai" X-Patchwork-Id: 102990 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AD538A0547; Tue, 26 Oct 2021 19:26:22 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 18CB641142; Tue, 26 Oct 2021 19:26:00 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 29F8B410E7 for ; Tue, 26 Oct 2021 19:25:53 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="228723414" X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="228723414" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2021 10:25:27 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="494306154" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga007.fm.intel.com with ESMTP; 26 Oct 2021 10:25:26 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Date: Tue, 26 Oct 2021 18:25:15 +0100 Message-Id: <20211026172518.20183-5-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The patch introduce set_session & set_raw_dp_ctx methods to qat gen dev ops Replace min_qat_dev_gen_id with dev_id, the session will be invalid if the device generation id is not matching during init and ops Signed-off-by: Kai Ji Acked-by: Fan Zhang --- drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c | 90 ++++ drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 467 +++++++++++++++++++ drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 243 ++++++++++ drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 7 +- drivers/crypto/qat/qat_crypto.c | 1 + drivers/crypto/qat/qat_crypto.h | 9 + drivers/crypto/qat/qat_sym.c | 8 +- drivers/crypto/qat/qat_sym_session.c | 106 +---- drivers/crypto/qat/qat_sym_session.h | 2 +- 9 files changed, 832 insertions(+), 101 deletions(-) diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c index b4ec440e05..72609703f9 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c @@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id, return 0; } +void +qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session, + uint8_t hash_flag) +{ + struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr; + struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl = + (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *) + session->fw_req.cd_ctrl.content_desc_ctrl_lw; + + /* Set the Use Extended Protocol Flags bit in LW 1 */ + QAT_FIELD_SET(header->comn_req_flags, + QAT_COMN_EXT_FLAGS_USED, + QAT_COMN_EXT_FLAGS_BITPOS, + QAT_COMN_EXT_FLAGS_MASK); + + /* Set Hash Flags in LW 28 */ + cd_ctrl->hash_flags |= hash_flag; + + /* Set proto flags in LW 1 */ + switch (session->qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + default: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + } +} + +static int +qat_sym_crypto_set_session_gen2(void *cdev, void *session) +{ + struct rte_cryptodev *dev = cdev; + struct qat_sym_session *ctx = session; + const struct qat_cryptodev_private *qat_private = + dev->data->dev_private; + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + if (ret == 0 || ret != -ENOTSUP) + return ret; + + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * but some are not supported by GEN2, so checking here + */ + if ((qat_private->internal_capabilities & + QAT_SYM_CAP_MIXED_CRYPTO) == 0) + return -ENOTSUP; + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + return 0; +} + struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = { /* Device related operations */ @@ -204,6 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init) qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2; qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities = qat_sym_crypto_cap_get_gen2; + qat_sym_gen_dev_ops[QAT_GEN2].set_session = + qat_sym_crypto_set_session_gen2; + qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx = + qat_sym_configure_raw_dp_ctx_gen1; qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; @@ -221,4 +309,6 @@ RTE_INIT(qat_asym_crypto_gen2_init) qat_asym_crypto_cap_get_gen1; qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags = qat_asym_crypto_feature_flags_get_gen1; + qat_asym_gen_dev_ops[QAT_GEN2].set_session = + qat_asym_crypto_set_session_gen1; } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c index d3336cf4a1..6494019050 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -143,6 +143,468 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused) return capa_info; } +static __rte_always_inline void +enqueue_one_aead_job_gen3(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + if (ctx->is_single_pass) { + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + + /* QAT GEN3 uses single pass to treat AEAD as + * cipher operation + */ + cipher_param = (void *)&req->serv_specif_rqpars; + + qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; + + cipher_param->spc_aad_addr = aad->iova; + cipher_param->spc_auth_res_addr = digest->iova; + + return; + } + + enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len); +} + +static __rte_always_inline void +enqueue_one_auth_job_gen3(struct qat_sym_session *ctx, + struct qat_sym_op_cookie *cookie, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl; + struct icp_qat_fw_la_cipher_req_params *cipher_param; + uint32_t ver_key_offset; + uint32_t auth_data_len = data_len - ofs.ofs.auth.head - + ofs.ofs.auth.tail; + + if (!ctx->is_single_pass_gmac || + (auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) { + enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs, + data_len); + return; + } + + cipher_cd_ctrl = (void *) &req->cd_ctrl; + cipher_param = (void *)&req->serv_specif_rqpars; + ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) + + ICP_QAT_HW_GALOIS_128_STATE1_SZ + + ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ + + sizeof(struct icp_qat_hw_cipher_config); + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* AES-GMAC */ + qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length, + req); + } + + /* Fill separate Content Descriptor for this op */ + rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key, + ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? + ctx->cd.cipher.key : + RTE_PTR_ADD(&ctx->cd, ver_key_offset), + ctx->auth_key_length); + cookie->opt.spc_gmac.cd_cipher.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD( + ICP_QAT_HW_CIPHER_AEAD_MODE, + ctx->qat_cipher_alg, + ICP_QAT_HW_CIPHER_NO_CONVERT, + (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? + ICP_QAT_HW_CIPHER_ENCRYPT : + ICP_QAT_HW_CIPHER_DECRYPT)); + QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val, + ctx->digest_length, + QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, + QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); + cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved = + ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len); + + /* Update the request */ + req->cd_pars.u.s.content_desc_addr = + cookie->opt.spc_gmac.cd_phys_addr; + req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL( + sizeof(struct icp_qat_hw_cipher_config) + + ctx->auth_key_length, 8) >> 3; + req->comn_mid.src_length = data_len; + req->comn_mid.dst_length = 0; + + cipher_param->spc_aad_addr = 0; + cipher_param->spc_auth_res_addr = digest->iova; + cipher_param->spc_aad_sz = auth_data_len; + cipher_param->reserved = 0; + cipher_param->spc_auth_res_sz = ctx->digest_length; + + req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; + cipher_cd_ctrl->cipher_cfg_offset = 0; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_SINGLE_PASS_PROTO); + ICP_QAT_FW_LA_PROTO_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); +} + +static int +qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr aad; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &aad, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, &aad, &digest); +#endif + + return 0; +} + +static int +qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr auth_iv; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl, + NULL, &auth_iv, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv, + ofs, total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL, + &auth_iv, NULL, &digest); +#endif + + return 0; +} + +static int +qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session) +{ + struct qat_sym_session *ctx = session; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + /* special single pass build request for GEN3 */ + if (ctx->is_single_pass) + ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3; + else if (ctx->is_single_pass_gmac) + ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3; + + if (ret == 0) + return ret; + + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * this is addressed by GEN3 + */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + return 0; +} + +static int +qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_build_req_set_data(req, user_data, cookie, + data, n_data_vecs, NULL, 0); + if (unlikely(data_len < 0)) + return -1; + + enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv, + NULL, aad, digest); +#endif + return 0; +} + +static uint32_t +qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + struct qat_sym_op_cookie *cookie = + qp->op_cookies[tail >> tx_queue->trailz]; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (unlikely(data_len < 0)) + break; + + enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i], + &vec->digest[i], &vec->aad[i], ofs, + (uint32_t)data_len); + + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec, + vec->src_sgl[i].num, &vec->iv[i], NULL, + &vec->aad[i], &vec->digest[i]); +#endif + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +static int +qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv __rte_unused, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_build_req_set_data(req, user_data, cookie, + data, n_data_vecs, NULL, 0); + if (unlikely(data_len < 0)) + return -1; + + enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + + return 0; +} + +static uint32_t +qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + struct qat_sym_op_cookie *cookie = + qp->op_cookies[tail >> tx_queue->trailz]; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (unlikely(data_len < 0)) + break; + enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i], + &vec->auth_iv[i], ofs, (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +static int +qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx) +{ + struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx; + struct qat_sym_session *ctx = _ctx; + int ret; + + ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx); + if (ret < 0) + return ret; + + if (ctx->is_single_pass) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3; + } else if (ctx->is_single_pass_gmac) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3; + } + + return 0; +} + + RTE_INIT(qat_sym_crypto_gen3_init) { qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1; @@ -150,6 +612,10 @@ RTE_INIT(qat_sym_crypto_gen3_init) qat_sym_crypto_cap_get_gen3; qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; + qat_sym_gen_dev_ops[QAT_GEN3].set_session = + qat_sym_crypto_set_session_gen3; + qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx = + qat_sym_configure_raw_dp_ctx_gen3; #ifdef RTE_LIB_SECURITY qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx = qat_sym_create_security_gen1; @@ -161,4 +627,5 @@ RTE_INIT(qat_asym_crypto_gen3_init) qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL; qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL; qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL; + qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL; } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 37a58c026f..167c95abcf 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -103,11 +103,253 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused) return capa_info; } +static __rte_always_inline void +enqueue_one_aead_job_gen4(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + if (ctx->is_single_pass && ctx->is_ucs) { + struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 = + (void *)&req->serv_specif_rqpars; + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + + /* QAT GEN4 uses single pass to treat AEAD as cipher + * operation + */ + qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, + req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - + ofs.ofs.cipher.head - ofs.ofs.cipher.tail; + + cipher_param_20->spc_aad_addr = aad->iova; + cipher_param_20->spc_auth_res_addr = digest->iova; + + return; + } + + enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len); +} + +static int +qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *qat_req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr aad; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &aad, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, &aad, &digest); +#endif + + return 0; +} + +static int +qat_sym_crypto_set_session_gen4(void *cdev, void *session) +{ + struct qat_sym_session *ctx = session; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + /* special single pass build request for GEN4 */ + if (ctx->is_single_pass && ctx->is_ucs) + ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4; + if (ret == 0) + return ret; + + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * this is addressed by GEN4 + */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + return 0; +} + +static int +qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_build_req_set_data(req, user_data, cookie, + data, n_data_vecs, NULL, 0); + if (unlikely(data_len < 0)) + return -1; + + enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv, + NULL, aad, digest); +#endif + return 0; +} + +static uint32_t +qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + struct qat_sym_op_cookie *cookie = + qp->op_cookies[tail >> tx_queue->trailz]; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (unlikely(data_len < 0)) + break; + + enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i], + &vec->digest[i], &vec->aad[i], ofs, + (uint32_t)data_len); + + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec, + vec->src_sgl[i].num, &vec->iv[i], NULL, + &vec->aad[i], &vec->digest[i]); +#endif + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +static int +qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx) +{ + struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx; + struct qat_sym_session *ctx = _ctx; + int ret; + + ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx); + if (ret < 0) + return ret; + + if (ctx->is_single_pass && ctx->is_ucs) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4; + } + + return 0; +} + RTE_INIT(qat_sym_crypto_gen4_init) { qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1; qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities = qat_sym_crypto_cap_get_gen4; + qat_sym_gen_dev_ops[QAT_GEN4].set_session = + qat_sym_crypto_set_session_gen4; + qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx = + qat_sym_configure_raw_dp_ctx_gen4; qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; #ifdef RTE_LIB_SECURITY @@ -121,4 +363,5 @@ RTE_INIT(qat_asym_crypto_gen4_init) qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL; qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL; qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL; + qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL; } diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index e1fd14956b..e32bdf1c4b 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -6,6 +6,7 @@ #ifdef RTE_LIB_SECURITY #include #endif +#include #include "adf_transport_access_macros.h" #include "icp_qat_fw.h" @@ -154,7 +155,7 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = { }; static struct qat_capabilities_info -qat_sym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev __rte_unused) +qat_sym_crypto_cap_get_gen1(struct qat_pci_device * qat_dev __rte_unused) { struct qat_capabilities_info capa_info; capa_info.data = qat_sym_crypto_caps_gen1; @@ -1169,6 +1170,10 @@ RTE_INIT(qat_sym_crypto_gen1_init) qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1; qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities = qat_sym_crypto_cap_get_gen1; + qat_sym_gen_dev_ops[QAT_GEN1].set_session = + qat_sym_crypto_set_session_gen1; + qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx = + qat_sym_configure_raw_dp_ctx_gen1; qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; #ifdef RTE_LIB_SECURITY diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 01d2439b93..6f17abd404 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -2,6 +2,7 @@ * Copyright(c) 2021 Intel Corporation */ +#include #include "qat_device.h" #include "qat_qp.h" #include "qat_crypto.h" diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index 6eaa15b975..8fe3f0b061 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -48,15 +48,24 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev); typedef void * (*create_security_ctx_t)(void *cryptodev); +typedef int (*set_session_t)(void *cryptodev, void *session); + +typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx); + struct qat_crypto_gen_dev_ops { get_feature_flags_t get_feature_flags; get_capabilities_info_t get_capabilities; struct rte_cryptodev_ops *cryptodev_ops; + set_session_t set_session; + set_raw_dp_ctx_t set_raw_dp_ctx; #ifdef RTE_LIB_SECURITY create_security_ctx_t create_security_ctx; #endif }; +extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[]; +extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[]; + int qat_cryptodev_config(struct rte_cryptodev *dev, struct rte_cryptodev_config *config); diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index a92874cd27..de687de857 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -237,7 +237,7 @@ refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, int qat_sym_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie, enum qat_device_gen qat_dev_gen) + void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen) { int ret = 0; struct qat_sym_session *ctx = NULL; @@ -302,12 +302,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, return -EINVAL; } - if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) { - QAT_DP_LOG(ERR, "Session alg not supported on this device gen"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index 8ca475ca8b..52837d7c9c 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev, return 0; } -static void -qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session, - uint8_t hash_flag) -{ - struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr; - struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl = - (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *) - session->fw_req.cd_ctrl.content_desc_ctrl_lw; - - /* Set the Use Extended Protocol Flags bit in LW 1 */ - QAT_FIELD_SET(header->comn_req_flags, - QAT_COMN_EXT_FLAGS_USED, - QAT_COMN_EXT_FLAGS_BITPOS, - QAT_COMN_EXT_FLAGS_MASK); - - /* Set Hash Flags in LW 28 */ - cd_ctrl->hash_flags |= hash_flag; - - /* Set proto flags in LW 1 */ - switch (session->qat_cipher_alg) { - case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_SNOW_3G_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, 0); - break; - case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, - ICP_QAT_FW_LA_ZUC_3G_PROTO); - break; - default: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, 0); - break; - } -} - -static void -qat_sym_session_handle_mixed(const struct rte_cryptodev *dev, - struct qat_sym_session *session) -{ - const struct qat_cryptodev_private *qat_private = - dev->data->dev_private; - enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities & - QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3; - - if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && - session->qat_cipher_alg != - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, - 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); - } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && - session->qat_cipher_alg != - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, - 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); - } else if ((session->aes_cmac || - session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && - (session->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - session->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, 0); - } -} - int qat_sym_session_set_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) @@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; int ret; int qat_cmd_id; - int handle_mixed = 0; /* Verify the session physical address is known */ rte_iova_t session_paddr = rte_mempool_virt2iova(session); @@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); - session->min_qat_dev_gen = QAT_GEN1; + session->dev_id = internals->dev_id; session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE; session->is_ucs = 0; @@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: @@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: @@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, return -ENOTSUP; } qat_sym_session_finalize(session); - if (handle_mixed) { - /* Special handling of mixed hash+cipher algorithms */ - qat_sym_session_handle_mixed(dev, session); - } - return 0; + return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev, + (void *)session); } static int @@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session, { session->is_single_pass = 1; session->is_auth = 1; - session->min_qat_dev_gen = QAT_GEN3; session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; /* Chacha-Poly is special case that use QAT CTR mode */ if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { @@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) return 0; } -static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, - uint8_t *data_in, - uint8_t *data_out) +static int +partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, + uint8_t *data_in, + uint8_t *data_out) { int digest_size; uint8_t digest[qat_hash_get_digest_size( @@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc, cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; - cdesc->min_qat_dev_gen = QAT_GEN2; } else { total_key_size = cipherkeylen; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; @@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - cdesc->min_qat_dev_gen = QAT_GEN2; break; case ICP_QAT_HW_AUTH_ALGO_MD5: @@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev, session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); - session->min_qat_dev_gen = QAT_GEN1; - /* Get requested QAT command id - should be cipher */ qat_cmd_id = qat_get_cmd_id(xform); if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) { @@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev, { void *sess_private_data; struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; + struct qat_cryptodev_private *internals = cdev->data->dev_private; + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; + struct qat_sym_session *sym_session = NULL; int ret; if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || @@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev, } set_sec_session_private_data(sess, sess_private_data); + sym_session = (struct qat_sym_session *)sess_private_data; + sym_session->dev_id = internals->dev_id; - return ret; + return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev, + sess_private_data); } int diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h index 73493ec864..e1b5edb2f9 100644 --- a/drivers/crypto/qat/qat_sym_session.h +++ b/drivers/crypto/qat/qat_sym_session.h @@ -100,7 +100,7 @@ struct qat_sym_session { uint16_t auth_key_length; uint16_t digest_length; rte_spinlock_t lock; /* protects this struct */ - enum qat_device_gen min_qat_dev_gen; + uint16_t dev_id; uint8_t aes_cmac; uint8_t is_single_pass; uint8_t is_single_pass_gmac; From patchwork Tue Oct 26 17:25:16 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ji, Kai" X-Patchwork-Id: 102988 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 94050A0547; Tue, 26 Oct 2021 19:26:10 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0C0A6410EE; Tue, 26 Oct 2021 19:25:58 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id C8B86410DD for ; Tue, 26 Oct 2021 19:25:52 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="228723419" X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="228723419" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2021 10:25:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="494306277" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga007.fm.intel.com with ESMTP; 26 Oct 2021 10:25:27 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Date: Tue, 26 Oct 2021 18:25:16 +0100 Message-Id: <20211026172518.20183-6-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch introduce op_build_request func in qat enqueue op burst. Add-in qat_dequeue_process_response in qat dequeue op burst. Enable session build request op based on crypto operation Signed-off-by: Kai Ji Acked-by: Fan Zhang --- drivers/common/qat/meson.build | 4 +- drivers/common/qat/qat_qp.c | 60 +++++--------------- drivers/common/qat/qat_qp.h | 32 +++++++---- drivers/compress/qat/qat_comp_pmd.c | 12 +++- drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c | 4 +- drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 4 +- drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 2 +- drivers/crypto/qat/qat_asym_refactor.c | 4 +- drivers/crypto/qat/qat_asym_refactor.h | 2 +- drivers/crypto/qat/qat_sym_hw_dp.c | 2 +- drivers/crypto/qat/qat_sym_refactor.c | 51 +---------------- drivers/crypto/qat/qat_sym_refactor.h | 2 +- 12 files changed, 57 insertions(+), 122 deletions(-) diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build index ce9959d103..b7b18b6c0f 100644 --- a/drivers/common/qat/meson.build +++ b/drivers/common/qat/meson.build @@ -70,8 +70,8 @@ if qat_compress endif if qat_crypto - foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c', - 'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c', + foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c', + 'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c', 'dev/qat_sym_pmd_gen1.c', 'dev/qat_asym_pmd_gen1.c', 'dev/qat_crypto_pmd_gen2.c', diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index 0fda890075..82adda0698 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -15,8 +15,8 @@ #include "qat_logs.h" #include "qat_device.h" #include "qat_qp.h" -#include "qat_sym.h" -#include "qat_asym.h" +#include "qat_sym_refactor.h" +#include "qat_asym_refactor.h" #include "qat_comp.h" #define QAT_CQ_MAX_DEQ_RETRIES 10 @@ -550,23 +550,8 @@ adf_modulo(uint32_t data, uint32_t modulo_mask) } uint16_t -refactor_qat_enqueue_op_burst(__rte_unused void *qp, - __rte_unused qat_op_build_request_t op_build_request, - __rte_unused void **ops, __rte_unused uint16_t nb_ops) -{ - return 0; -} - -uint16_t -refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops, - __rte_unused qat_op_dequeue_t qat_dequeue_process_response, - __rte_unused uint16_t nb_ops) -{ - return 0; -} - -uint16_t -qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) +qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request, + void **ops, uint16_t nb_ops) { register struct qat_queue *queue; struct qat_qp *tmp_qp = (struct qat_qp *)qp; @@ -616,29 +601,18 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) } } -#ifdef BUILD_QAT_SYM +#ifdef RTE_LIB_SECURITY if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) qat_sym_preprocess_requests(ops, nb_ops_possible); #endif + memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque)); + while (nb_ops_sent != nb_ops_possible) { - if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) { -#ifdef BUILD_QAT_SYM - ret = qat_sym_build_request(*ops, base_addr + tail, - tmp_qp->op_cookies[tail >> queue->trailz], - tmp_qp->qat_dev_gen); -#endif - } else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) { - ret = qat_comp_build_request(*ops, base_addr + tail, - tmp_qp->op_cookies[tail >> queue->trailz], - tmp_qp->qat_dev_gen); - } else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) { -#ifdef BUILD_QAT_ASYM - ret = qat_asym_build_request(*ops, base_addr + tail, + ret = op_build_request(*ops, base_addr + tail, tmp_qp->op_cookies[tail >> queue->trailz], - tmp_qp->qat_dev_gen); -#endif - } + tmp_qp->opaque, tmp_qp->qat_dev_gen); + if (ret != 0) { tmp_qp->stats.enqueue_err_count++; /* This message cannot be enqueued */ @@ -833,7 +807,8 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops) } uint16_t -qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) +qat_dequeue_op_burst(void *qp, void **ops, + qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops) { struct qat_queue *rx_queue; struct qat_qp *tmp_qp = (struct qat_qp *)qp; @@ -851,19 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) nb_fw_responses = 1; - if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) - qat_sym_process_response(ops, resp_msg, - tmp_qp->op_cookies[head >> rx_queue->trailz]); - else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) - nb_fw_responses = qat_comp_process_response( + nb_fw_responses = qat_dequeue_process_response( ops, resp_msg, tmp_qp->op_cookies[head >> rx_queue->trailz], &tmp_qp->stats.dequeue_err_count); -#ifdef BUILD_QAT_ASYM - else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) - qat_asym_process_response(ops, resp_msg, - tmp_qp->op_cookies[head >> rx_queue->trailz]); -#endif head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo_mask); diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h index c5f115310c..b4518df7d0 100644 --- a/drivers/common/qat/qat_qp.h +++ b/drivers/common/qat/qat_qp.h @@ -14,6 +14,24 @@ struct qat_pci_device; +/* Default qp configuration for GEN4 devices */ +#define QAT_GEN4_QP_DEFCON (QAT_SERVICE_SYMMETRIC | \ + QAT_SERVICE_SYMMETRIC << 8 | \ + QAT_SERVICE_SYMMETRIC << 16 | \ + QAT_SERVICE_SYMMETRIC << 24) + +/* QAT GEN 4 specific macros */ +#define QAT_GEN4_BUNDLE_NUM 4 +#define QAT_GEN4_QPS_PER_BUNDLE_NUM 1 + +/* Queue pair setup error codes */ +#define QAT_NOMEM 1 +#define QAT_QP_INVALID_DESC_NO 2 +#define QAT_QP_BUSY 3 +#define QAT_PCI_NO_RESOURCE 4 + +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + /** * Structure associated with each queue. */ @@ -92,21 +110,15 @@ struct qat_qp_config { }; uint16_t -refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request, +qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request, void **ops, uint16_t nb_ops); -uint16_t -refactor_qat_dequeue_op_burst(void *qp, void **ops, - qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops); - -uint16_t -qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); - uint16_t qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops); uint16_t -qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops); +qat_dequeue_op_burst(void *qp, void **ops, + qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops); int qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr); @@ -118,7 +130,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev, int qat_qps_per_service(struct qat_pci_device *qat_dev, - enum qat_service_type service); + enum qat_service_type service); const struct qat_qp_hw_data * qat_qp_get_hw_data(struct qat_pci_device *qat_dev, diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c index 9b24d46e97..2f17f8825e 100644 --- a/drivers/compress/qat/qat_comp_pmd.c +++ b/drivers/compress/qat/qat_comp_pmd.c @@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = { .private_xform_free = qat_comp_private_xform_free }; +static uint16_t +qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops, uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response, + nb_ops); +} + static uint16_t qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops, uint16_t nb_ops) { - uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops); + uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops); struct qat_qp *tmp_qp = (struct qat_qp *)qp; if (ret) { @@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops, } else { tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst = - (compressdev_dequeue_pkt_burst_t) - qat_dequeue_op_burst; + qat_comp_dequeue_burst; } } return ret; diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c index 72609703f9..86c124f6c8 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c @@ -5,8 +5,8 @@ #include #include #include "qat_sym_session.h" -#include "qat_sym.h" -#include "qat_asym.h" +#include "qat_sym_refactor.h" +#include "qat_asym_refactor.h" #include "qat_crypto.h" #include "qat_crypto_pmd_gens.h" diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 167c95abcf..108e07ee7f 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -5,8 +5,8 @@ #include #include #include "qat_sym_session.h" -#include "qat_sym.h" -#include "qat_asym.h" +#include "qat_sym_refactor.h" +#include "qat_asym_refactor.h" #include "qat_crypto.h" #include "qat_crypto_pmd_gens.h" diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index e32bdf1c4b..aea56fba4c 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -13,7 +13,7 @@ #include "icp_qat_fw_la.h" #include "qat_sym_session.h" -#include "qat_sym.h" +#include "qat_sym_refactor.h" #include "qat_sym_session.h" #include "qat_crypto.h" #include "qat_crypto_pmd_gens.h" diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c index 8e789920cb..3a9b1d4054 100644 --- a/drivers/crypto/qat/qat_asym_refactor.c +++ b/drivers/crypto/qat/qat_asym_refactor.c @@ -5,7 +5,7 @@ #include -#include +#include #include "icp_qat_fw_pke.h" #include "icp_qat_fw.h" @@ -14,7 +14,7 @@ #include "qat_device.h" #include "qat_logs.h" -#include "qat_asym.h" +#include "qat_asym_refactor.h" uint8_t qat_asym_driver_id; diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h index 9ecabdbe8f..6d3d991bc7 100644 --- a/drivers/crypto/qat/qat_asym_refactor.h +++ b/drivers/crypto/qat/qat_asym_refactor.h @@ -5,7 +5,7 @@ #ifndef _QAT_ASYM_H_ #define _QAT_ASYM_H_ -#include +#include #include #include "icp_qat_fw_pke.h" #include "qat_device.h" diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c index 94589458d0..af75ac2011 100644 --- a/drivers/crypto/qat/qat_sym_hw_dp.c +++ b/drivers/crypto/qat/qat_sym_hw_dp.c @@ -8,7 +8,7 @@ #include "icp_qat_fw.h" #include "icp_qat_fw_la.h" -#include "qat_sym.h" +#include "qat_sym_refactor.h" #include "qat_sym_pmd.h" #include "qat_sym_session.h" #include "qat_qp.h" diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c index 0412902e70..82f078ff1e 100644 --- a/drivers/crypto/qat/qat_sym_refactor.c +++ b/drivers/crypto/qat/qat_sym_refactor.c @@ -10,7 +10,7 @@ #include #include -#include "qat_sym.h" +#include "qat_sym_refactor.h" #include "qat_crypto.h" #include "qat_qp.h" @@ -354,55 +354,6 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev) return 0; } -int -qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, - struct rte_crypto_raw_dp_ctx *raw_dp_ctx, - enum rte_crypto_op_sess_type sess_type, - union rte_cryptodev_session_ctx session_ctx, uint8_t is_update) -{ - struct qat_cryptodev_private *internals = dev->data->dev_private; - enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; - struct qat_crypto_gen_dev_ops *gen_dev_ops = - &qat_sym_gen_dev_ops[qat_dev_gen]; - struct qat_qp *qp; - struct qat_sym_session *ctx; - struct qat_sym_dp_ctx *dp_ctx; - - if (!gen_dev_ops->set_raw_dp_ctx) { - QAT_LOG(ERR, "Device GEN %u does not support raw data path", - qat_dev_gen); - return -ENOTSUP; - } - - qp = dev->data->queue_pairs[qp_id]; - dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data; - - if (!is_update) { - memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) + - sizeof(struct qat_sym_dp_ctx)); - raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id]; - dp_ctx->tail = qp->tx_q.tail; - dp_ctx->head = qp->rx_q.head; - dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0; - } - - if (sess_type != RTE_CRYPTO_OP_WITH_SESSION) - return -EINVAL; - - ctx = (struct qat_sym_session *)get_sym_session_private_data( - session_ctx.crypto_sess, qat_sym_driver_id); - - dp_ctx->session = ctx; - - return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx); -} - -int -qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused) -{ - return sizeof(struct qat_sym_dp_ctx); -} - static struct cryptodev_driver qat_crypto_drv; RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, cryptodev_qat_sym_driver, diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h index d4bfe8f364..44feca8251 100644 --- a/drivers/crypto/qat/qat_sym_refactor.h +++ b/drivers/crypto/qat/qat_sym_refactor.h @@ -5,7 +5,7 @@ #ifndef _QAT_SYM_H_ #define _QAT_SYM_H_ -#include +#include #ifdef RTE_LIB_SECURITY #include #endif From patchwork Tue Oct 26 17:25:17 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ji, Kai" X-Patchwork-Id: 102989 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BBDDAA0547; Tue, 26 Oct 2021 19:26:16 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 101F54113B; Tue, 26 Oct 2021 19:25:59 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 7216241101 for ; Tue, 26 Oct 2021 19:25:53 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="228723421" X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="228723421" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2021 10:25:29 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="494306309" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga007.fm.intel.com with ESMTP; 26 Oct 2021 10:25:28 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji , pablo.de.lara.guarch@intel.com, adamx.dybkowski@intel.com Date: Tue, 26 Oct 2021 18:25:17 +0100 Message-Id: <20211026172518.20183-7-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" test_mixed_auth_cipher: ensure enough space allocate in ibuf & obuf for mbuf to vec conversion test_kasumi_decryption: cipher length update. qat/dev: support sgl oop operation Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms") Cc: pablo.de.lara.guarch@intel.com Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-CMAC") Cc: adamx.dybkowski@intel.com Signed-off-by: Kai Ji --- app/test/test_cryptodev.c | 52 ++++++++++++++---- drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++-- drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++- drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 55 +++++++++++++++++--- 4 files changed, 124 insertions(+), 25 deletions(-) diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index 814a0b401d..dd791a181a 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -179,6 +179,10 @@ post_process_raw_dp_op(void *user_data, uint32_t index __rte_unused, RTE_CRYPTO_OP_STATUS_ERROR; } +static struct crypto_testsuite_params testsuite_params = { NULL }; +struct crypto_testsuite_params *p_testsuite_params = &testsuite_params; +static struct crypto_unittest_params unittest_params; + void process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth, @@ -193,6 +197,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_sgl sgl, dest_sgl; uint32_t max_len; union rte_cryptodev_session_ctx sess; + uint64_t auth_end_iova; uint32_t count = 0; struct rte_crypto_raw_dp_ctx *ctx; uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0, @@ -202,6 +207,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, int ctx_service_size; int32_t status = 0; int enqueue_status, dequeue_status; + struct crypto_unittest_params *ut_params = &unittest_params; + /* oop is not supported in raw hw dp api*/ + int is_sgl = sop->m_src->nb_segs > 1; ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id); if (ctx_service_size < 0) { @@ -267,6 +275,30 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, digest.va = (void *)sop->auth.digest.data; digest.iova = sop->auth.digest.phys_addr; + if (is_sgl) { + uint32_t remaining_off = auth_offset + auth_len; + struct rte_mbuf *sgl_buf = sop->m_src; + + while (remaining_off >= rte_pktmbuf_data_len(sgl_buf) + && sgl_buf->next != NULL) { + remaining_off -= rte_pktmbuf_data_len(sgl_buf); + sgl_buf = sgl_buf->next; + } + + auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset( + sgl_buf, remaining_off); + } else { + /* oop is not supported in raw hw dp api */ + auth_end_iova = rte_pktmbuf_iova(op->sym->m_src) + + auth_offset + auth_len; + } + /* Then check if digest-encrypted conditions are met */ + if ((auth_offset + auth_len < cipher_offset + cipher_len) && + (digest.iova == auth_end_iova) && is_sgl) + max_len = RTE_MAX(max_len, + auth_offset + auth_len + + ut_params->auth_xform.auth.digest_length); + } else if (is_cipher) { cipher_offset = sop->cipher.data.offset; cipher_len = sop->cipher.data.length; @@ -503,10 +535,6 @@ process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op) return op; } -static struct crypto_testsuite_params testsuite_params = { NULL }; -struct crypto_testsuite_params *p_testsuite_params = &testsuite_params; -static struct crypto_unittest_params unittest_params; - static int testsuite_setup(void) { @@ -4077,9 +4105,9 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata) /* Create KASUMI operation */ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data, - tdata->cipher_iv.len, - tdata->ciphertext.len, - tdata->validCipherOffsetInBits.len); + tdata->cipher_iv.len, + RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8), + tdata->validCipherOffsetInBits.len); if (retval < 0) return retval; @@ -7310,6 +7338,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, unsigned int plaintext_len; unsigned int ciphertext_pad_len; unsigned int ciphertext_len; + unsigned int data_len; struct rte_cryptodev_info dev_info; struct rte_crypto_op *op; @@ -7370,21 +7399,22 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, plaintext_len = ceil_byte_length(tdata->plaintext.len_bits); ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16); plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16); + data_len = RTE_MAX(ciphertext_pad_len, plaintext_pad_len); if (verify) { ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf, - ciphertext_pad_len); + data_len); memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len); if (op_mode == OUT_OF_PLACE) - rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len); + rte_pktmbuf_append(ut_params->obuf, data_len); debug_hexdump(stdout, "ciphertext:", ciphertext, ciphertext_len); } else { plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf, - plaintext_pad_len); + data_len); memcpy(plaintext, tdata->plaintext.data, plaintext_len); if (op_mode == OUT_OF_PLACE) - rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len); + rte_pktmbuf_append(ut_params->obuf, data_len); debug_hexdump(stdout, "plaintext:", plaintext, plaintext_len); } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c index 6494019050..c59c25fe8f 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; @@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i], diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 108e07ee7f..1b6cf10589 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index aea56fba4c..85d5e45a42 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], - cookie, vec->src_sgl[i].vec, + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs, @@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i], @@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; @@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; From patchwork Tue Oct 26 17:25:18 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ji, Kai" X-Patchwork-Id: 102991 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D830BA0547; Tue, 26 Oct 2021 19:26:29 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 19C0041147; Tue, 26 Oct 2021 19:26:01 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id D976341102 for ; Tue, 26 Oct 2021 19:25:53 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10149"; a="228723424" X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="228723424" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2021 10:25:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,184,1631602800"; d="scan'208";a="494306368" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga007.fm.intel.com with ESMTP; 26 Oct 2021 10:25:30 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Date: Tue, 26 Oct 2021 18:25:18 +0100 Message-Id: <20211026172518.20183-8-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch rename back refactor code to qat_sym.c and qat_asym.c, meson build file also updated accordingly. Integrate qat hw dp apis and qat pmd function to qat_sym.c and qat_asym.c Signed-off-by: Kai Ji Acked-by: Fan Zhang --- drivers/common/qat/meson.build | 4 +- drivers/common/qat/qat_device.c | 2 +- drivers/common/qat/qat_qp.c | 4 +- drivers/common/qat/qat_qp.h | 2 +- drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c | 4 +- drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 4 +- drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 164 +-- drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 2 +- drivers/crypto/qat/qat_asym.c | 803 +++++++++------ drivers/crypto/qat/qat_asym.h | 90 +- drivers/crypto/qat/qat_asym_pmd.c | 231 ----- drivers/crypto/qat/qat_asym_pmd.h | 54 - drivers/crypto/qat/qat_asym_refactor.c | 994 ------------------ drivers/crypto/qat/qat_asym_refactor.h | 125 --- drivers/crypto/qat/qat_crypto.h | 5 +- drivers/crypto/qat/qat_sym.c | 997 ++++++------------- drivers/crypto/qat/qat_sym.h | 141 ++- drivers/crypto/qat/qat_sym_hw_dp.c | 975 ------------------ drivers/crypto/qat/qat_sym_pmd.c | 251 ----- drivers/crypto/qat/qat_sym_pmd.h | 95 -- drivers/crypto/qat/qat_sym_refactor.c | 360 ------- drivers/crypto/qat/qat_sym_refactor.h | 402 -------- drivers/crypto/qat/qat_sym_session.c | 8 +- 23 files changed, 997 insertions(+), 4720 deletions(-) delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h delete mode 100644 drivers/crypto/qat/qat_asym_refactor.c delete mode 100644 drivers/crypto/qat/qat_asym_refactor.h delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h delete mode 100644 drivers/crypto/qat/qat_sym_refactor.c delete mode 100644 drivers/crypto/qat/qat_sym_refactor.h diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build index b7b18b6c0f..e53b51dcac 100644 --- a/drivers/common/qat/meson.build +++ b/drivers/common/qat/meson.build @@ -70,8 +70,8 @@ if qat_compress endif if qat_crypto - foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c', - 'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c', + foreach f: ['qat_sym.c', 'qat_sym_session.c', + 'qat_asym.c', 'qat_crypto.c', 'dev/qat_sym_pmd_gen1.c', 'dev/qat_asym_pmd_gen1.c', 'dev/qat_crypto_pmd_gen2.c', diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c index 437996f2e8..e1fc1c37ec 100644 --- a/drivers/common/qat/qat_device.c +++ b/drivers/common/qat/qat_device.c @@ -8,7 +8,7 @@ #include "qat_device.h" #include "adf_transport_access_macros.h" -#include "qat_sym_pmd.h" +#include "qat_sym.h" #include "qat_comp_pmd.h" #include "adf_pf2vf_msg.h" #include "qat_pf2vf.h" diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index 82adda0698..56facf0fbe 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -15,8 +15,8 @@ #include "qat_logs.h" #include "qat_device.h" #include "qat_qp.h" -#include "qat_sym_refactor.h" -#include "qat_asym_refactor.h" +#include "qat_sym.h" +#include "qat_asym.h" #include "qat_comp.h" #define QAT_CQ_MAX_DEQ_RETRIES 10 diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h index b4518df7d0..fc77173fc1 100644 --- a/drivers/common/qat/qat_qp.h +++ b/drivers/common/qat/qat_qp.h @@ -130,7 +130,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev, int qat_qps_per_service(struct qat_pci_device *qat_dev, - enum qat_service_type service); + enum qat_service_type service); const struct qat_qp_hw_data * qat_qp_get_hw_data(struct qat_pci_device *qat_dev, diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c index 86c124f6c8..72609703f9 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c @@ -5,8 +5,8 @@ #include #include #include "qat_sym_session.h" -#include "qat_sym_refactor.h" -#include "qat_asym_refactor.h" +#include "qat_sym.h" +#include "qat_asym.h" #include "qat_crypto.h" #include "qat_crypto_pmd_gens.h" diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 1b6cf10589..529878e3e7 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -5,8 +5,8 @@ #include #include #include "qat_sym_session.h" -#include "qat_sym_refactor.h" -#include "qat_asym_refactor.h" +#include "qat_sym.h" +#include "qat_asym.h" #include "qat_crypto.h" #include "qat_crypto_pmd_gens.h" diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h index 07020741bd..b69d4d9091 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h @@ -10,166 +10,6 @@ #include "qat_sym_session.h" #include "qat_sym.h" -#define QAT_BASE_GEN1_SYM_CAPABILITIES \ - QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \ - CAP_RNG(digest_size, 1, 20, 1)), \ - QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ - CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \ - QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \ - CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \ - QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ - CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \ - QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \ - CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \ - CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \ - CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \ - CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_CIPHER_CAP(AES_XTS, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_CIPHER_CAP(SNOW3G_UEA2, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_CIPHER_CAP(KASUMI_F8, CAP_SET(block_size, 8), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \ - QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \ - CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_CIPHER_CAP(3DES_CBC, CAP_SET(block_size, 8), \ - CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \ - QAT_SYM_CIPHER_CAP(3DES_CTR, CAP_SET(block_size, 8), \ - CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \ - QAT_SYM_CIPHER_CAP(DES_CBC, CAP_SET(block_size, 8), \ - CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \ - QAT_SYM_CIPHER_CAP(DES_DOCSISBPI, CAP_SET(block_size, 8), \ - CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)) - -#define QAT_BASE_GEN1_ASYM_CAPABILITIES \ - QAT_ASYM_CAP(MODEX, 0, 1, 512, 1), \ - QAT_ASYM_CAP(MODINV, 0, 1, 512, 1), \ - QAT_ASYM_CAP(RSA, \ - ((1 << RTE_CRYPTO_ASYM_OP_SIGN) | \ - (1 << RTE_CRYPTO_ASYM_OP_VERIFY) | \ - (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) | \ - (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)), \ - 64, 512, 64) - -#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \ - QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \ - CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \ - -#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \ - QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 32, 32, 0), \ - CAP_RNG(digest_size, 16, 16, 0), \ - CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)) - -#define QAT_BASE_GEN4_SYM_CAPABILITIES \ - QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \ - CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \ - CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \ - CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_CIPHER_CAP(AES_DOCSISBPI, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \ - CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_CIPHER_CAP(NULL, CAP_SET(block_size, 1), \ - CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \ - CAP_RNG(digest_size, 1, 20, 1)), \ - QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \ - CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \ - CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \ - QAT_SYM_CIPHER_CAP(AES_CTR, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \ - QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ - CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \ - QAT_SYM_AEAD_CAP(AES_CCM, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \ - CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \ - QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \ - CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \ - CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \ - #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \ RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n) @@ -432,7 +272,7 @@ qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op, cipher_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); - if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { + if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; return UINT64_MAX; } @@ -506,7 +346,7 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op, auth_ofs + auth_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); - if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { + if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; return UINT64_MAX; } diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index 85d5e45a42..8b88443a40 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -13,7 +13,7 @@ #include "icp_qat_fw_la.h" #include "qat_sym_session.h" -#include "qat_sym_refactor.h" +#include "qat_sym.h" #include "qat_sym_session.h" #include "qat_crypto.h" #include "qat_crypto_pmd_gens.h" diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c index d5b4c66d68..b80b81e8da 100644 --- a/drivers/crypto/qat/qat_asym.c +++ b/drivers/crypto/qat/qat_asym.c @@ -1,70 +1,147 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2021 Intel Corporation */ #include -#include "qat_asym.h" +#include + #include "icp_qat_fw_pke.h" #include "icp_qat_fw.h" #include "qat_pke_functionality_arrays.h" -#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg)) +#include "qat_device.h" -static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2], - size_t arr_sz, size_t *size, uint32_t *func_id) +#include "qat_logs.h" +#include "qat_asym.h" + +uint8_t qat_asym_driver_id; + +struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS]; + +void +qat_asym_init_op_cookie(void *op_cookie) { - size_t i; + int j; + struct qat_asym_op_cookie *cookie = op_cookie; - for (i = 0; i < arr_sz; i++) { - if (*size <= arr[i][0]) { - *size = arr[i][0]; - *func_id = arr[i][1]; - return 0; - } + cookie->input_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + input_params_ptrs); + + cookie->output_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + output_params_ptrs); + + for (j = 0; j < 8; j++) { + cookie->input_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + input_array[j]); + cookie->output_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + output_array[j]); } - return -1; } -static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req) +int +qat_asym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *mempool) { - memset(qat_req, 0, sizeof(*qat_req)); - qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + int err = 0; + void *sess_private_data; + struct qat_asym_session *session; + + if (rte_mempool_get(mempool, &sess_private_data)) { + QAT_LOG(ERR, + "Couldn't get object from session mempool"); + return -ENOMEM; + } - qat_req->pke_hdr.hdr_flags = - ICP_QAT_FW_COMN_HDR_FLAGS_BUILD - (ICP_QAT_FW_COMN_REQ_FLAG_SET); + session = sess_private_data; + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + if (xform->modex.exponent.length == 0 || + xform->modex.modulus.length == 0) { + QAT_LOG(ERR, "Invalid mod exp input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + if (xform->modinv.modulus.length == 0) { + QAT_LOG(ERR, "Invalid mod inv input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + if (xform->rsa.n.length == 0) { + QAT_LOG(ERR, "Invalid rsa input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END + || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) { + QAT_LOG(ERR, "Invalid asymmetric crypto xform"); + err = -EINVAL; + goto error; + } else { + QAT_LOG(ERR, "Asymmetric crypto xform not implemented"); + err = -EINVAL; + goto error; + } + + session->xform = xform; + qat_asym_build_req_tmpl(sess_private_data); + set_asym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +error: + rte_mempool_put(mempool, sess_private_data); + return err; } -static inline void qat_asym_build_req_tmpl(void *sess_private_data) +unsigned int +qat_asym_session_get_private_size( + struct rte_cryptodev *dev __rte_unused) { - struct icp_qat_fw_pke_request *qat_req; - struct qat_asym_session *session = sess_private_data; - - qat_req = &session->req_tmpl; - qat_fill_req_tmpl(qat_req); + return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8); } -static size_t max_of(int n, ...) +void +qat_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess) { - va_list args; - size_t len = 0, num; - int i; + uint8_t index = dev->driver_id; + void *sess_priv = get_asym_session_private_data(sess, index); + struct qat_asym_session *s = (struct qat_asym_session *)sess_priv; - va_start(args, n); - len = va_arg(args, size_t); + if (sess_priv) { + memset(s, 0, qat_asym_session_get_private_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - for (i = 0; i < n - 1; i++) { - num = va_arg(args, size_t); - if (num > len) - len = num; + set_asym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); } - va_end(args); - - return len; } -static void qat_clear_arrays(struct qat_asym_op_cookie *cookie, +/* An rte_driver is needed in the registration of both the device and the driver + * with cryptodev. + * The actual qat pci's rte_driver can't be used as its name represents + * the whole pci device with all services. Think of this as a holder for a name + * for the crypto part of the pci device. + */ +static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD); +static const struct rte_driver cryptodev_qat_asym_driver = { + .name = qat_asym_drv_name, + .alias = qat_asym_drv_name +}; + + +static void +qat_clear_arrays(struct qat_asym_op_cookie *cookie, int in_count, int out_count, int in_size, int out_size) { int i; @@ -75,7 +152,8 @@ static void qat_clear_arrays(struct qat_asym_op_cookie *cookie, memset(cookie->output_array[i], 0x0, out_size); } -static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie, +static void +qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie, enum rte_crypto_asym_xform_type alg, int in_size, int out_size) { if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX) @@ -88,7 +166,229 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie, out_size); } -static int qat_asym_check_nonzero(rte_crypto_param n) +static void +qat_asym_collect_response(struct rte_crypto_op *rx_op, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + size_t alg_size, alg_size_in_bytes = 0; + struct rte_crypto_asym_op *asym_op = rx_op->asym; + + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + rte_crypto_param n = xform->modex.modulus; + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + uint8_t *modexp_result = asym_op->modex.result.data; + + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + rte_memcpy(modexp_result + + (asym_op->modex.result.length - + n.length), + cookie->output_array[0] + alg_size_in_bytes + - n.length, n.length + ); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result", + cookie->output_array[0], + alg_size_in_bytes); + +#endif + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + rte_crypto_param n = xform->modinv.modulus; + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + uint8_t *modinv_result = asym_op->modinv.result.data; + + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + rte_memcpy(modinv_result + + (asym_op->modinv.result.length - n.length), + cookie->output_array[0] + + alg_size_in_bytes - n.length, n.length); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || + asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_ENCRYPT) { + uint8_t *rsa_result = asym_op->rsa.cipher.data; + + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + uint8_t *rsa_result = asym_op->rsa.cipher.data; + + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = + RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", + cookie->output_array[0], + alg_size_in_bytes); +#endif + break; + default: + QAT_LOG(ERR, "Padding not supported"); + rx_op->status = + RTE_CRYPTO_OP_STATUS_ERROR; + break; + } + } + } else { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_DECRYPT) { + uint8_t *rsa_result = asym_op->rsa.message.data; + + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + break; + default: + QAT_LOG(ERR, "Padding not supported"); + rx_op->status = + RTE_CRYPTO_OP_STATUS_ERROR; + break; + } +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message", + rsa_result, alg_size_in_bytes); +#endif + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_SIGN) { + uint8_t *rsa_result = asym_op->rsa.sign.data; + + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } + } + } + qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes, + alg_size_in_bytes); +} + +int +qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, + void *op_cookie, __rte_unused uint64_t *dequeue_err_count) +{ + struct qat_asym_session *ctx; + struct icp_qat_fw_pke_resp *resp_msg = + (struct icp_qat_fw_pke_resp *)resp; + struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) + (resp_msg->opaque); + struct qat_asym_op_cookie *cookie = op_cookie; + + if (cookie->error) { + cookie->error = 0; + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Cookie status returned error"); + } else { + if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( + resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) { + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Asymmetric response status" + " returned error"); + } + if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) { + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Asymmetric common status" + " returned error"); + } + } + + if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + ctx = (struct qat_asym_session *)get_asym_session_private_data( + rx_op->asym->session, qat_asym_driver_id); + qat_asym_collect_response(rx_op, cookie, ctx->xform); + } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform); + } + *op = rx_op; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg, + sizeof(struct icp_qat_fw_pke_resp)); +#endif + + return 1; +} + +#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg)) + +static int +qat_asym_get_sz_and_func_id(const uint32_t arr[][2], + size_t arr_sz, size_t *size, uint32_t *func_id) +{ + size_t i; + + for (i = 0; i < arr_sz; i++) { + if (*size <= arr[i][0]) { + *size = arr[i][0]; + *func_id = arr[i][1]; + return 0; + } + } + return -1; +} + +static size_t +max_of(int n, ...) +{ + va_list args; + size_t len = 0, num; + int i; + + va_start(args, n); + len = va_arg(args, size_t); + + for (i = 0; i < n - 1; i++) { + num = va_arg(args, size_t); + if (num > len) + len = num; + } + va_end(args); + + return len; +} + +static int +qat_asym_check_nonzero(rte_crypto_param n) { if (n.length < 8) { /* Not a case for any cryptograpic function except for DH @@ -452,50 +752,14 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op, } else { QAT_LOG(ERR, "Invalid asymmetric crypto xform"); return -(EINVAL); - } - return 0; -} - -static __rte_always_inline int -refactor_qat_asym_build_request(__rte_unused void *in_op, - __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie, - __rte_unused uint64_t *opaque, - __rte_unused enum qat_device_gen dev_gen) -{ - return 0; -} - -int -refactor_qat_asym_process_response(__rte_unused void **op, - __rte_unused uint8_t *resp, - __rte_unused void *op_cookie, - __rte_unused uint64_t *dequeue_err_count) -{ + } return 0; } -uint16_t -qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return refactor_qat_enqueue_op_burst(qp, - refactor_qat_asym_build_request, - (void **)ops, nb_ops); -} - -uint16_t -qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return refactor_qat_dequeue_op_burst(qp, (void **)ops, - refactor_qat_asym_process_response, nb_ops); -} - -int -qat_asym_build_request(void *in_op, - uint8_t *out_msg, - void *op_cookie, - __rte_unused enum qat_device_gen qat_dev_gen) +static __rte_always_inline int +qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, + __rte_unused uint64_t *opaque, + __rte_unused enum qat_device_gen dev_gen) { struct qat_asym_session *ctx; struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; @@ -562,262 +826,161 @@ qat_asym_build_request(void *in_op, return 0; } -static void qat_asym_collect_response(struct rte_crypto_op *rx_op, - struct qat_asym_op_cookie *cookie, - struct rte_crypto_asym_xform *xform) +static uint16_t +qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) { - size_t alg_size, alg_size_in_bytes = 0; - struct rte_crypto_asym_op *asym_op = rx_op->asym; - - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - rte_crypto_param n = xform->modex.modulus; - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - uint8_t *modexp_result = asym_op->modex.result.data; - - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - rte_memcpy(modexp_result + - (asym_op->modex.result.length - - n.length), - cookie->output_array[0] + alg_size_in_bytes - - n.length, n.length - ); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result", - cookie->output_array[0], - alg_size_in_bytes); - -#endif - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - rte_crypto_param n = xform->modinv.modulus; - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - uint8_t *modinv_result = asym_op->modinv.result.data; - - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - rte_memcpy(modinv_result + (asym_op->modinv.result.length - - n.length), - cookie->output_array[0] + alg_size_in_bytes - - n.length, n.length); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || - asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_ENCRYPT) { - uint8_t *rsa_result = asym_op->rsa.cipher.data; - - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } else if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - uint8_t *rsa_result = asym_op->rsa.cipher.data; - - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = - RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", - cookie->output_array[0], - alg_size_in_bytes); -#endif - break; - default: - QAT_LOG(ERR, "Padding not supported"); - rx_op->status = - RTE_CRYPTO_OP_STATUS_ERROR; - break; - } - } - } else { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_DECRYPT) { - uint8_t *rsa_result = asym_op->rsa.message.data; - - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - break; - default: - QAT_LOG(ERR, "Padding not supported"); - rx_op->status = - RTE_CRYPTO_OP_STATUS_ERROR; - break; - } -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message", - rsa_result, alg_size_in_bytes); -#endif - } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) { - uint8_t *rsa_result = asym_op->rsa.sign.data; - - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } - } - } - qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes, - alg_size_in_bytes); + return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops, + nb_ops); } -void -qat_asym_process_response(void **op, uint8_t *resp, - void *op_cookie) +static uint16_t +qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) { - struct qat_asym_session *ctx; - struct icp_qat_fw_pke_resp *resp_msg = - (struct icp_qat_fw_pke_resp *)resp; - struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) - (resp_msg->opaque); - struct qat_asym_op_cookie *cookie = op_cookie; - - if (cookie->error) { - cookie->error = 0; - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; - QAT_DP_LOG(ERR, "Cookie status returned error"); - } else { - if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( - resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) { - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; - QAT_DP_LOG(ERR, "Asymmetric response status" - " returned error"); - } - if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) { - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; - QAT_DP_LOG(ERR, "Asymmetric common status" - " returned error"); - } - } - - if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - ctx = (struct qat_asym_session *)get_asym_session_private_data( - rx_op->asym->session, qat_asym_driver_id); - qat_asym_collect_response(rx_op, cookie, ctx->xform); - } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform); - } - *op = rx_op; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg, - sizeof(struct icp_qat_fw_pke_resp)); -#endif + return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response, + nb_ops); } int -qat_asym_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_asym_xform *xform, - struct rte_cryptodev_asym_session *sess, - struct rte_mempool *mempool) +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, + struct qat_dev_cmd_param *qat_dev_cmd_param) { - int err = 0; - void *sess_private_data; - struct qat_asym_session *session; - - if (rte_mempool_get(mempool, &sess_private_data)) { - QAT_LOG(ERR, - "Couldn't get object from session mempool"); - return -ENOMEM; + struct qat_cryptodev_private *internals; + struct rte_cryptodev *cryptodev; + struct qat_device_info *qat_dev_instance = + &qat_pci_devs[qat_pci_dev->qat_dev_id]; + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_dev_instance->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_cryptodev_private) + }; + struct qat_capabilities_info capa_info; + const struct rte_cryptodev_capabilities *capabilities; + const struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + uint64_t capa_size; + int i = 0; + + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "asym"); + QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); + + if (gen_dev_ops->cryptodev_ops == NULL) { + QAT_LOG(ERR, "Device %s does not support asymmetric crypto", + name); + return -(EFAULT); } - session = sess_private_data; - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - if (xform->modex.exponent.length == 0 || - xform->modex.modulus.length == 0) { - QAT_LOG(ERR, "Invalid mod exp input parameter"); - err = -EINVAL; - goto error; - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - if (xform->modinv.modulus.length == 0) { - QAT_LOG(ERR, "Invalid mod inv input parameter"); - err = -EINVAL; - goto error; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + qat_pci_dev->qat_asym_driver_id = + qat_asym_driver_id; + } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (qat_pci_dev->qat_asym_driver_id != + qat_asym_driver_id) { + QAT_LOG(ERR, + "Device %s have different driver id than corresponding device in primary process", + name); + return -(EFAULT); } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - if (xform->rsa.n.length == 0) { - QAT_LOG(ERR, "Invalid rsa input parameter"); - err = -EINVAL; - goto error; + } + + /* Populate subset device to use in cryptodev device creation */ + qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver; + qat_dev_instance->asym_rte_dev.numa_node = + qat_dev_instance->pci_dev->device.numa_node; + qat_dev_instance->asym_rte_dev.devargs = NULL; + + cryptodev = rte_cryptodev_pmd_create(name, + &(qat_dev_instance->asym_rte_dev), &init_params); + + if (cryptodev == NULL) + return -ENODEV; + + qat_dev_instance->asym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = qat_asym_driver_id; + cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; + + cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst; + cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst; + + cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, + "QAT_ASYM_CAPA_GEN_%d", + qat_pci_dev->qat_dev_gen); + + internals = cryptodev->data->dev_private; + internals->qat_dev = qat_pci_dev; + internals->dev_id = cryptodev->data->dev_id; + + capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); + capabilities = capa_info.data; + capa_size = capa_info.size; + + internals->capa_mz = rte_memzone_lookup(capa_memz_name); + if (internals->capa_mz == NULL) { + internals->capa_mz = rte_memzone_reserve(capa_memz_name, + capa_size, rte_socket_id(), 0); + if (internals->capa_mz == NULL) { + QAT_LOG(DEBUG, + "Error allocating memzone for capabilities, " + "destroying PMD for %s", + name); + rte_cryptodev_pmd_destroy(cryptodev); + memset(&qat_dev_instance->asym_rte_dev, 0, + sizeof(qat_dev_instance->asym_rte_dev)); + return -EFAULT; } - } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END - || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) { - QAT_LOG(ERR, "Invalid asymmetric crypto xform"); - err = -EINVAL; - goto error; - } else { - QAT_LOG(ERR, "Asymmetric crypto xform not implemented"); - err = -EINVAL; - goto error; } - session->xform = xform; - qat_asym_build_req_tmpl(sess_private_data); - set_asym_session_private_data(sess, dev->driver_id, - sess_private_data); + memcpy(internals->capa_mz->addr, capabilities, capa_size); + internals->qat_dev_capabilities = internals->capa_mz->addr; - return 0; -error: - rte_mempool_put(mempool, sess_private_data); - return err; -} + while (1) { + if (qat_dev_cmd_param[i].name == NULL) + break; + if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME)) + internals->min_enq_burst_threshold = + qat_dev_cmd_param[i].val; + i++; + } -unsigned int qat_asym_session_get_private_size( - struct rte_cryptodev *dev __rte_unused) -{ - return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8); + qat_pci_dev->asym_dev = internals; + internals->service_type = QAT_SERVICE_ASYMMETRIC; + QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d", + cryptodev->data->name, internals->dev_id); + return 0; } -void -qat_asym_session_clear(struct rte_cryptodev *dev, - struct rte_cryptodev_asym_session *sess) +int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev) { - uint8_t index = dev->driver_id; - void *sess_priv = get_asym_session_private_data(sess, index); - struct qat_asym_session *s = (struct qat_asym_session *)sess_priv; - - if (sess_priv) { - memset(s, 0, qat_asym_session_get_private_size(dev)); - struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + struct rte_cryptodev *cryptodev; + + if (qat_pci_dev == NULL) + return -ENODEV; + if (qat_pci_dev->asym_dev == NULL) + return 0; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_memzone_free(qat_pci_dev->asym_dev->capa_mz); + + /* free crypto device */ + cryptodev = rte_cryptodev_pmd_get_dev( + qat_pci_dev->asym_dev->dev_id); + rte_cryptodev_pmd_destroy(cryptodev); + qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL; + qat_pci_dev->asym_dev = NULL; - set_asym_session_private_data(sess, index, NULL); - rte_mempool_put(sess_mp, sess_priv); - } + return 0; } + +static struct cryptodev_driver qat_crypto_drv; +RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, + cryptodev_qat_asym_driver, + qat_asym_driver_id); diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h index 50c2641eba..55b1e1b2cc 100644 --- a/drivers/crypto/qat/qat_asym.h +++ b/drivers/crypto/qat/qat_asym.h @@ -8,10 +8,13 @@ #include #include #include "icp_qat_fw_pke.h" -#include "qat_common.h" -#include "qat_asym_pmd.h" +#include "qat_device.h" +#include "qat_crypto.h" #include "icp_qat_fw.h" +/** Intel(R) QAT Asymmetric Crypto PMD driver name */ +#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym + typedef uint64_t large_int_ptr; #define MAX_PKE_PARAMS 8 #define QAT_PKE_MAX_LN_SIZE 512 @@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr; #define QAT_ASYM_RSA_NUM_OUT_PARAMS 1 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6 +/** + * helper function to add an asym capability + * + **/ +#define QAT_ASYM_CAP(n, o, l, r, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \ + {.asym = { \ + .xform_capa = { \ + .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\ + .op_types = o, \ + { \ + .modlen = { \ + .min = l, \ + .max = r, \ + .increment = i \ + }, } \ + } \ + }, \ + } \ + } + struct qat_asym_op_cookie { size_t alg_size; uint64_t error; @@ -45,6 +70,27 @@ struct qat_asym_session { struct rte_crypto_asym_xform *xform; }; +static inline void +qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req) +{ + memset(qat_req, 0, sizeof(*qat_req)); + qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + + qat_req->pke_hdr.hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD + (ICP_QAT_FW_COMN_REQ_FLAG_SET); +} + +static inline void +qat_asym_build_req_tmpl(void *sess_private_data) +{ + struct icp_qat_fw_pke_request *qat_req; + struct qat_asym_session *session = sess_private_data; + + qat_req = &session->req_tmpl; + qat_fill_req_tmpl(qat_req); +} + int qat_asym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_asym_xform *xform, @@ -58,26 +104,6 @@ void qat_asym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_asym_session *sess); -/* - * Build PKE request to be sent to the fw, partially uses template - * request generated during session creation. - * - * @param in_op Pointer to the crypto operation, for every - * service it points to service specific struct. - * @param out_msg Message to be returned to enqueue function - * @param op_cookie Cookie pointer that holds private metadata - * @param qat_dev_gen Generation of QAT hardware - * - * @return - * This function always returns zero, - * it is because of backward compatibility. - * - 0: Always returned - * - */ -int -qat_asym_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie, enum qat_device_gen qat_dev_gen); - /* * Process PKE response received from outgoing queue of QAT * @@ -88,23 +114,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg, * @param op_cookie Cookie pointer that holds private metadata * */ -void -qat_asym_process_response(void __rte_unused **op, uint8_t *resp, - void *op_cookie); - int -refactor_qat_asym_process_response(__rte_unused void **op, - __rte_unused uint8_t *resp, - __rte_unused void *op_cookie, - __rte_unused uint64_t *dequeue_err_count); - -uint16_t -qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); +qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, + void *op_cookie, __rte_unused uint64_t *dequeue_err_count); - -uint16_t -qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); +void +qat_asym_init_op_cookie(void *cookie); #endif /* _QAT_ASYM_H_ */ diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c deleted file mode 100644 index 284b8096fe..0000000000 --- a/drivers/crypto/qat/qat_asym_pmd.c +++ /dev/null @@ -1,231 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation - */ - -#include - -#include "qat_logs.h" - -#include "qat_crypto.h" -#include "qat_asym.h" -#include "qat_asym_pmd.h" - -uint8_t qat_asym_driver_id; -struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS]; - -void -qat_asym_init_op_cookie(void *op_cookie) -{ - int j; - struct qat_asym_op_cookie *cookie = op_cookie; - - cookie->input_addr = rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - input_params_ptrs); - - cookie->output_addr = rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - output_params_ptrs); - - for (j = 0; j < 8; j++) { - cookie->input_params_ptrs[j] = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - input_array[j]); - cookie->output_params_ptrs[j] = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - output_array[j]); - } -} - -static struct rte_cryptodev_ops crypto_qat_ops = { - - /* Device related operations */ - .dev_configure = qat_cryptodev_config, - .dev_start = qat_cryptodev_start, - .dev_stop = qat_cryptodev_stop, - .dev_close = qat_cryptodev_close, - .dev_infos_get = qat_cryptodev_info_get, - - .stats_get = qat_cryptodev_stats_get, - .stats_reset = qat_cryptodev_stats_reset, - .queue_pair_setup = qat_cryptodev_qp_setup, - .queue_pair_release = qat_cryptodev_qp_release, - - /* Crypto related operations */ - .asym_session_get_size = qat_asym_session_get_private_size, - .asym_session_configure = qat_asym_session_configure, - .asym_session_clear = qat_asym_session_clear -}; - -uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_enqueue_op_burst(qp, (void **)ops, nb_ops); -} - -uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_dequeue_op_burst(qp, (void **)ops, nb_ops); -} - -/* An rte_driver is needed in the registration of both the device and the driver - * with cryptodev. - * The actual qat pci's rte_driver can't be used as its name represents - * the whole pci device with all services. Think of this as a holder for a name - * for the crypto part of the pci device. - */ -static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD); -static const struct rte_driver cryptodev_qat_asym_driver = { - .name = qat_asym_drv_name, - .alias = qat_asym_drv_name -}; - -int -qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, - struct qat_dev_cmd_param *qat_dev_cmd_param) -{ - int i = 0; - struct qat_device_info *qat_dev_instance = - &qat_pci_devs[qat_pci_dev->qat_dev_id]; - struct rte_cryptodev_pmd_init_params init_params = { - .name = "", - .socket_id = qat_dev_instance->pci_dev->device.numa_node, - .private_data_size = sizeof(struct qat_cryptodev_private) - }; - struct qat_capabilities_info capa_info; - const struct rte_cryptodev_capabilities *capabilities; - const struct qat_crypto_gen_dev_ops *gen_dev_ops = - &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; - char name[RTE_CRYPTODEV_NAME_MAX_LEN]; - char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - struct rte_cryptodev *cryptodev; - struct qat_cryptodev_private *internals; - uint64_t capa_size; - - if (gen_dev_ops->cryptodev_ops == NULL) { - QAT_LOG(ERR, "Device %s does not support asymmetric crypto", - name); - return -EFAULT; - } - - snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", - qat_pci_dev->name, "asym"); - QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - qat_pci_dev->qat_asym_driver_id = - qat_asym_driver_id; - } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - if (qat_pci_dev->qat_asym_driver_id != - qat_asym_driver_id) { - QAT_LOG(ERR, - "Device %s have different driver id than corresponding device in primary process", - name); - return -(EFAULT); - } - } - - /* Populate subset device to use in cryptodev device creation */ - qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver; - qat_dev_instance->asym_rte_dev.numa_node = - qat_dev_instance->pci_dev->device.numa_node; - qat_dev_instance->asym_rte_dev.devargs = NULL; - - cryptodev = rte_cryptodev_pmd_create(name, - &(qat_dev_instance->asym_rte_dev), &init_params); - - if (cryptodev == NULL) - return -ENODEV; - - qat_dev_instance->asym_rte_dev.name = cryptodev->data->name; - cryptodev->driver_id = qat_asym_driver_id; - cryptodev->dev_ops = &crypto_qat_ops; - - cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst; - cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst; - - - cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, - "QAT_ASYM_CAPA_GEN_%d", - qat_pci_dev->qat_dev_gen); - - internals = cryptodev->data->dev_private; - internals->qat_dev = qat_pci_dev; - internals->dev_id = cryptodev->data->dev_id; - internals->service_type = QAT_SERVICE_ASYMMETRIC; - - capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); - capabilities = capa_info.data; - capa_size = capa_info.size; - - internals->capa_mz = rte_memzone_lookup(capa_memz_name); - if (internals->capa_mz == NULL) { - internals->capa_mz = rte_memzone_reserve(capa_memz_name, - capa_size, rte_socket_id(), 0); - if (internals->capa_mz == NULL) { - QAT_LOG(DEBUG, - "Error allocating memzone for capabilities, " - "destroying PMD for %s", - name); - rte_cryptodev_pmd_destroy(cryptodev); - memset(&qat_dev_instance->asym_rte_dev, 0, - sizeof(qat_dev_instance->asym_rte_dev)); - return -EFAULT; - } - } - - memcpy(internals->capa_mz->addr, capabilities, capa_size); - internals->qat_dev_capabilities = internals->capa_mz->addr; - - while (1) { - if (qat_dev_cmd_param[i].name == NULL) - break; - if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME)) - internals->min_enq_burst_threshold = - qat_dev_cmd_param[i].val; - i++; - } - - qat_pci_dev->asym_dev = internals; - - rte_cryptodev_pmd_probing_finish(cryptodev); - - QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d", - cryptodev->data->name, internals->dev_id); - return 0; -} - -int -qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev) -{ - struct rte_cryptodev *cryptodev; - - if (qat_pci_dev == NULL) - return -ENODEV; - if (qat_pci_dev->asym_dev == NULL) - return 0; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_memzone_free(qat_pci_dev->asym_dev->capa_mz); - - /* free crypto device */ - cryptodev = rte_cryptodev_pmd_get_dev( - qat_pci_dev->asym_dev->dev_id); - rte_cryptodev_pmd_destroy(cryptodev); - qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL; - qat_pci_dev->asym_dev = NULL; - - return 0; -} - -static struct cryptodev_driver qat_crypto_drv; -RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, - cryptodev_qat_asym_driver, - qat_asym_driver_id); diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h deleted file mode 100644 index fd6b406248..0000000000 --- a/drivers/crypto/qat/qat_asym_pmd.h +++ /dev/null @@ -1,54 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation - */ - - -#ifndef _QAT_ASYM_PMD_H_ -#define _QAT_ASYM_PMD_H_ - -#include -#include "qat_crypto.h" -#include "qat_device.h" - -/** Intel(R) QAT Asymmetric Crypto PMD driver name */ -#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym - - -/** - * Helper function to add an asym capability - * - **/ -#define QAT_ASYM_CAP(n, o, l, r, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \ - {.asym = { \ - .xform_capa = { \ - .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\ - .op_types = o, \ - { \ - .modlen = { \ - .min = l, \ - .max = r, \ - .increment = i \ - }, } \ - } \ - }, \ - } \ - } - -extern uint8_t qat_asym_driver_id; - -extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[]; - -void -qat_asym_init_op_cookie(void *op_cookie); - -uint16_t -qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -uint16_t -qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -#endif /* _QAT_ASYM_PMD_H_ */ diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c deleted file mode 100644 index 3a9b1d4054..0000000000 --- a/drivers/crypto/qat/qat_asym_refactor.c +++ /dev/null @@ -1,994 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 - 2021 Intel Corporation - */ - - -#include - -#include - -#include "icp_qat_fw_pke.h" -#include "icp_qat_fw.h" -#include "qat_pke_functionality_arrays.h" - -#include "qat_device.h" - -#include "qat_logs.h" -#include "qat_asym_refactor.h" - -uint8_t qat_asym_driver_id; - -struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS]; - -void -qat_asym_init_op_cookie(void *op_cookie) -{ - int j; - struct qat_asym_op_cookie *cookie = op_cookie; - - cookie->input_addr = rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - input_params_ptrs); - - cookie->output_addr = rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - output_params_ptrs); - - for (j = 0; j < 8; j++) { - cookie->input_params_ptrs[j] = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - input_array[j]); - cookie->output_params_ptrs[j] = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_asym_op_cookie, - output_array[j]); - } -} - -int -qat_asym_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_asym_xform *xform, - struct rte_cryptodev_asym_session *sess, - struct rte_mempool *mempool) -{ - int err = 0; - void *sess_private_data; - struct qat_asym_session *session; - - if (rte_mempool_get(mempool, &sess_private_data)) { - QAT_LOG(ERR, - "Couldn't get object from session mempool"); - return -ENOMEM; - } - - session = sess_private_data; - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - if (xform->modex.exponent.length == 0 || - xform->modex.modulus.length == 0) { - QAT_LOG(ERR, "Invalid mod exp input parameter"); - err = -EINVAL; - goto error; - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - if (xform->modinv.modulus.length == 0) { - QAT_LOG(ERR, "Invalid mod inv input parameter"); - err = -EINVAL; - goto error; - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - if (xform->rsa.n.length == 0) { - QAT_LOG(ERR, "Invalid rsa input parameter"); - err = -EINVAL; - goto error; - } - } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END - || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) { - QAT_LOG(ERR, "Invalid asymmetric crypto xform"); - err = -EINVAL; - goto error; - } else { - QAT_LOG(ERR, "Asymmetric crypto xform not implemented"); - err = -EINVAL; - goto error; - } - - session->xform = xform; - qat_asym_build_req_tmpl(sess_private_data); - set_asym_session_private_data(sess, dev->driver_id, - sess_private_data); - - return 0; -error: - rte_mempool_put(mempool, sess_private_data); - return err; -} - -unsigned int -qat_asym_session_get_private_size( - struct rte_cryptodev *dev __rte_unused) -{ - return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8); -} - -void -qat_asym_session_clear(struct rte_cryptodev *dev, - struct rte_cryptodev_asym_session *sess) -{ - uint8_t index = dev->driver_id; - void *sess_priv = get_asym_session_private_data(sess, index); - struct qat_asym_session *s = (struct qat_asym_session *)sess_priv; - - if (sess_priv) { - memset(s, 0, qat_asym_session_get_private_size(dev)); - struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - - set_asym_session_private_data(sess, index, NULL); - rte_mempool_put(sess_mp, sess_priv); - } -} - -/* An rte_driver is needed in the registration of both the device and the driver - * with cryptodev. - * The actual qat pci's rte_driver can't be used as its name represents - * the whole pci device with all services. Think of this as a holder for a name - * for the crypto part of the pci device. - */ -static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD); -static const struct rte_driver cryptodev_qat_asym_driver = { - .name = qat_asym_drv_name, - .alias = qat_asym_drv_name -}; - - -static void -qat_clear_arrays(struct qat_asym_op_cookie *cookie, - int in_count, int out_count, int in_size, int out_size) -{ - int i; - - for (i = 0; i < in_count; i++) - memset(cookie->input_array[i], 0x0, in_size); - for (i = 0; i < out_count; i++) - memset(cookie->output_array[i], 0x0, out_size); -} - -static void -qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie, - enum rte_crypto_asym_xform_type alg, int in_size, int out_size) -{ - if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX) - qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS, - QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size, - out_size); - else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV) - qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS, - QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size, - out_size); -} - -static void -qat_asym_collect_response(struct rte_crypto_op *rx_op, - struct qat_asym_op_cookie *cookie, - struct rte_crypto_asym_xform *xform) -{ - size_t alg_size, alg_size_in_bytes = 0; - struct rte_crypto_asym_op *asym_op = rx_op->asym; - - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - rte_crypto_param n = xform->modex.modulus; - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - uint8_t *modexp_result = asym_op->modex.result.data; - - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - rte_memcpy(modexp_result + - (asym_op->modex.result.length - - n.length), - cookie->output_array[0] + alg_size_in_bytes - - n.length, n.length - ); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result", - cookie->output_array[0], - alg_size_in_bytes); - -#endif - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - rte_crypto_param n = xform->modinv.modulus; - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - uint8_t *modinv_result = asym_op->modinv.result.data; - - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - rte_memcpy(modinv_result + - (asym_op->modinv.result.length - n.length), - cookie->output_array[0] + alg_size_in_bytes - - n.length, n.length); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || - asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_ENCRYPT) { - uint8_t *rsa_result = asym_op->rsa.cipher.data; - - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } else if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - uint8_t *rsa_result = asym_op->rsa.cipher.data; - - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = - RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", - cookie->output_array[0], - alg_size_in_bytes); -#endif - break; - default: - QAT_LOG(ERR, "Padding not supported"); - rx_op->status = - RTE_CRYPTO_OP_STATUS_ERROR; - break; - } - } - } else { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_DECRYPT) { - uint8_t *rsa_result = asym_op->rsa.message.data; - - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - break; - default: - QAT_LOG(ERR, "Padding not supported"); - rx_op->status = - RTE_CRYPTO_OP_STATUS_ERROR; - break; - } -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message", - rsa_result, alg_size_in_bytes); -#endif - } else if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_SIGN) { - uint8_t *rsa_result = asym_op->rsa.sign.data; - - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } - } - } - qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes, - alg_size_in_bytes); -} - -int -qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, - void *op_cookie, __rte_unused uint64_t *dequeue_err_count) -{ - struct qat_asym_session *ctx; - struct icp_qat_fw_pke_resp *resp_msg = - (struct icp_qat_fw_pke_resp *)resp; - struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) - (resp_msg->opaque); - struct qat_asym_op_cookie *cookie = op_cookie; - - if (cookie->error) { - cookie->error = 0; - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; - QAT_DP_LOG(ERR, "Cookie status returned error"); - } else { - if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( - resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) { - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; - QAT_DP_LOG(ERR, "Asymmetric response status" - " returned error"); - } - if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) { - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; - QAT_DP_LOG(ERR, "Asymmetric common status" - " returned error"); - } - } - - if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - ctx = (struct qat_asym_session *)get_asym_session_private_data( - rx_op->asym->session, qat_asym_driver_id); - qat_asym_collect_response(rx_op, cookie, ctx->xform); - } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform); - } - *op = rx_op; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg, - sizeof(struct icp_qat_fw_pke_resp)); -#endif - - return 1; -} - -#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg)) - -static int -qat_asym_get_sz_and_func_id(const uint32_t arr[][2], - size_t arr_sz, size_t *size, uint32_t *func_id) -{ - size_t i; - - for (i = 0; i < arr_sz; i++) { - if (*size <= arr[i][0]) { - *size = arr[i][0]; - *func_id = arr[i][1]; - return 0; - } - } - return -1; -} - -static size_t -max_of(int n, ...) -{ - va_list args; - size_t len = 0, num; - int i; - - va_start(args, n); - len = va_arg(args, size_t); - - for (i = 0; i < n - 1; i++) { - num = va_arg(args, size_t); - if (num > len) - len = num; - } - va_end(args); - - return len; -} - -static int -qat_asym_check_nonzero(rte_crypto_param n) -{ - if (n.length < 8) { - /* Not a case for any cryptograpic function except for DH - * generator which very often can be of one byte length - */ - size_t i; - - if (n.data[n.length - 1] == 0x0) { - for (i = 0; i < n.length - 1; i++) - if (n.data[i] != 0x0) - break; - if (i == n.length - 1) - return -(EINVAL); - } - } else if (*(uint64_t *)&n.data[ - n.length - 8] == 0) { - /* Very likely it is zeroed modulus */ - size_t i; - - for (i = 0; i < n.length - 8; i++) - if (n.data[i] != 0x0) - break; - if (i == n.length - 8) - return -(EINVAL); - } - - return 0; -} - -static int -qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op, - struct icp_qat_fw_pke_request *qat_req, - struct qat_asym_op_cookie *cookie, - struct rte_crypto_asym_xform *xform) -{ - int err = 0; - size_t alg_size; - size_t alg_size_in_bytes; - uint32_t func_id = 0; - - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - err = qat_asym_check_nonzero(xform->modex.modulus); - if (err) { - QAT_LOG(ERR, "Empty modulus in modular exponentiation," - " aborting this operation"); - return err; - } - - alg_size_in_bytes = max_of(3, asym_op->modex.base.length, - xform->modex.exponent.length, - xform->modex.modulus.length); - alg_size = alg_size_in_bytes << 3; - - if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE, - sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE), - &alg_size, &func_id)) { - return -(EINVAL); - } - - alg_size_in_bytes = alg_size >> 3; - rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - - asym_op->modex.base.length - , asym_op->modex.base.data, - asym_op->modex.base.length); - rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - - xform->modex.exponent.length - , xform->modex.exponent.data, - xform->modex.exponent.length); - rte_memcpy(cookie->input_array[2] + alg_size_in_bytes - - xform->modex.modulus.length, - xform->modex.modulus.data, - xform->modex.modulus.length); - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS; - qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent", - cookie->input_array[1], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus", - cookie->input_array[2], - alg_size_in_bytes); -#endif - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - err = qat_asym_check_nonzero(xform->modinv.modulus); - if (err) { - QAT_LOG(ERR, "Empty modulus in modular multiplicative" - " inverse, aborting this operation"); - return err; - } - - alg_size_in_bytes = max_of(2, asym_op->modinv.base.length, - xform->modinv.modulus.length); - alg_size = alg_size_in_bytes << 3; - - if (xform->modinv.modulus.data[ - xform->modinv.modulus.length - 1] & 0x01) { - if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD, - sizeof(MOD_INV_IDS_ODD)/ - sizeof(*MOD_INV_IDS_ODD), - &alg_size, &func_id)) { - return -(EINVAL); - } - } else { - if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN, - sizeof(MOD_INV_IDS_EVEN)/ - sizeof(*MOD_INV_IDS_EVEN), - &alg_size, &func_id)) { - return -(EINVAL); - } - } - - alg_size_in_bytes = alg_size >> 3; - rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - - asym_op->modinv.base.length - , asym_op->modinv.base.data, - asym_op->modinv.base.length); - rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - - xform->modinv.modulus.length - , xform->modinv.modulus.data, - xform->modinv.modulus.length); - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - qat_req->input_param_count = - QAT_ASYM_MODINV_NUM_IN_PARAMS; - qat_req->output_param_count = - QAT_ASYM_MODINV_NUM_OUT_PARAMS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus", - cookie->input_array[1], - alg_size_in_bytes); -#endif - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - err = qat_asym_check_nonzero(xform->rsa.n); - if (err) { - QAT_LOG(ERR, "Empty modulus in RSA" - " inverse, aborting this operation"); - return err; - } - - alg_size_in_bytes = xform->rsa.n.length; - alg_size = alg_size_in_bytes << 3; - - qat_req->input_param_count = - QAT_ASYM_RSA_NUM_IN_PARAMS; - qat_req->output_param_count = - QAT_ASYM_RSA_NUM_OUT_PARAMS; - - if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || - asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - - if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS, - sizeof(RSA_ENC_IDS)/ - sizeof(*RSA_ENC_IDS), - &alg_size, &func_id)) { - err = -(EINVAL); - QAT_LOG(ERR, - "Not supported RSA parameter size (key)"); - return err; - } - alg_size_in_bytes = alg_size >> 3; - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_ENCRYPT) { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0] + - alg_size_in_bytes - - asym_op->rsa.message.length - , asym_op->rsa.message.data, - asym_op->rsa.message.length); - break; - default: - err = -(EINVAL); - QAT_LOG(ERR, - "Invalid RSA padding (Encryption)"); - return err; - } -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message", - cookie->input_array[0], - alg_size_in_bytes); -#endif - } else { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0], - asym_op->rsa.sign.data, - alg_size_in_bytes); - break; - default: - err = -(EINVAL); - QAT_LOG(ERR, - "Invalid RSA padding (Verify)"); - return err; - } - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature", - cookie->input_array[0], - alg_size_in_bytes); -#endif - - } - rte_memcpy(cookie->input_array[1] + - alg_size_in_bytes - - xform->rsa.e.length - , xform->rsa.e.data, - xform->rsa.e.length); - rte_memcpy(cookie->input_array[2] + - alg_size_in_bytes - - xform->rsa.n.length, - xform->rsa.n.data, - xform->rsa.n.length); - - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key", - cookie->input_array[1], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus", - cookie->input_array[2], - alg_size_in_bytes); -#endif - } else { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_DECRYPT) { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0] - + alg_size_in_bytes - - asym_op->rsa.cipher.length, - asym_op->rsa.cipher.data, - asym_op->rsa.cipher.length); - break; - default: - QAT_LOG(ERR, - "Invalid padding of RSA (Decrypt)"); - return -(EINVAL); - } - - } else if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_SIGN) { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0] - + alg_size_in_bytes - - asym_op->rsa.message.length, - asym_op->rsa.message.data, - asym_op->rsa.message.length); - break; - default: - QAT_LOG(ERR, - "Invalid padding of RSA (Signature)"); - return -(EINVAL); - } - } - if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) { - - qat_req->input_param_count = - QAT_ASYM_RSA_QT_NUM_IN_PARAMS; - if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS, - sizeof(RSA_DEC_CRT_IDS)/ - sizeof(*RSA_DEC_CRT_IDS), - &alg_size, &func_id)) { - return -(EINVAL); - } - alg_size_in_bytes = alg_size >> 3; - - rte_memcpy(cookie->input_array[1] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.p.length - , xform->rsa.qt.p.data, - xform->rsa.qt.p.length); - rte_memcpy(cookie->input_array[2] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.q.length - , xform->rsa.qt.q.data, - xform->rsa.qt.q.length); - rte_memcpy(cookie->input_array[3] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.dP.length - , xform->rsa.qt.dP.data, - xform->rsa.qt.dP.length); - rte_memcpy(cookie->input_array[4] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.dQ.length - , xform->rsa.qt.dQ.data, - xform->rsa.qt.dQ.length); - rte_memcpy(cookie->input_array[5] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.qInv.length - , xform->rsa.qt.qInv.data, - xform->rsa.qt.qInv.length); - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "C", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "p", - cookie->input_array[1], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "q", - cookie->input_array[2], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, - "dP", cookie->input_array[3], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, - "dQ", cookie->input_array[4], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, - "qInv", cookie->input_array[5], - alg_size_in_bytes); -#endif - } else if (xform->rsa.key_type == - RTE_RSA_KEY_TYPE_EXP) { - if (qat_asym_get_sz_and_func_id( - RSA_DEC_IDS, - sizeof(RSA_DEC_IDS)/ - sizeof(*RSA_DEC_IDS), - &alg_size, &func_id)) { - return -(EINVAL); - } - alg_size_in_bytes = alg_size >> 3; - rte_memcpy(cookie->input_array[1] + - alg_size_in_bytes - - xform->rsa.d.length, - xform->rsa.d.data, - xform->rsa.d.length); - rte_memcpy(cookie->input_array[2] + - alg_size_in_bytes - - xform->rsa.n.length, - xform->rsa.n.data, - xform->rsa.n.length); -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d", - cookie->input_array[1], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n", - cookie->input_array[2], - alg_size_in_bytes); -#endif - - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - } else { - QAT_LOG(ERR, "Invalid RSA key type"); - return -(EINVAL); - } - } - } else { - QAT_LOG(ERR, "Invalid asymmetric crypto xform"); - return -(EINVAL); - } - return 0; -} - -static __rte_always_inline int -qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, - __rte_unused uint64_t *opaque, - __rte_unused enum qat_device_gen dev_gen) -{ - struct qat_asym_session *ctx; - struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; - struct rte_crypto_asym_op *asym_op = op->asym; - struct icp_qat_fw_pke_request *qat_req = - (struct icp_qat_fw_pke_request *)out_msg; - struct qat_asym_op_cookie *cookie = - (struct qat_asym_op_cookie *)op_cookie; - int err = 0; - - op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - ctx = (struct qat_asym_session *) - get_asym_session_private_data( - op->asym->session, qat_asym_driver_id); - if (unlikely(ctx == NULL)) { - QAT_LOG(ERR, "Session has not been created for this device"); - goto error; - } - rte_mov64((uint8_t *)qat_req, - (const uint8_t *)&(ctx->req_tmpl)); - err = qat_asym_fill_arrays(asym_op, qat_req, - cookie, ctx->xform); - if (err) { - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - goto error; - } - } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - qat_fill_req_tmpl(qat_req); - err = qat_asym_fill_arrays(asym_op, qat_req, cookie, - op->asym->xform); - if (err) { - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - goto error; - } - } else { - QAT_DP_LOG(ERR, "Invalid session/xform settings"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - goto error; - } - - qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; - qat_req->pke_mid.src_data_addr = cookie->input_addr; - qat_req->pke_mid.dest_data_addr = cookie->output_addr; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_pke_request)); -#endif - - return 0; -error: - - qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_pke_request)); -#endif - - qat_req->output_param_count = 0; - qat_req->input_param_count = 0; - qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; - cookie->error |= err; - - return 0; -} - -static uint16_t -qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops, - nb_ops); -} - -static uint16_t -qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response, - nb_ops); -} - -int -qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, - struct qat_dev_cmd_param *qat_dev_cmd_param) -{ - struct qat_cryptodev_private *internals; - struct rte_cryptodev *cryptodev; - struct qat_device_info *qat_dev_instance = - &qat_pci_devs[qat_pci_dev->qat_dev_id]; - struct rte_cryptodev_pmd_init_params init_params = { - .name = "", - .socket_id = qat_dev_instance->pci_dev->device.numa_node, - .private_data_size = sizeof(struct qat_cryptodev_private) - }; - struct qat_capabilities_info capa_info; - const struct rte_cryptodev_capabilities *capabilities; - const struct qat_crypto_gen_dev_ops *gen_dev_ops = - &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; - char name[RTE_CRYPTODEV_NAME_MAX_LEN]; - char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - uint64_t capa_size; - int i = 0; - - snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", - qat_pci_dev->name, "asym"); - QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); - - if (gen_dev_ops->cryptodev_ops == NULL) { - QAT_LOG(ERR, "Device %s does not support asymmetric crypto", - name); - return -(EFAULT); - } - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - qat_pci_dev->qat_asym_driver_id = - qat_asym_driver_id; - } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - if (qat_pci_dev->qat_asym_driver_id != - qat_asym_driver_id) { - QAT_LOG(ERR, - "Device %s have different driver id than corresponding device in primary process", - name); - return -(EFAULT); - } - } - - /* Populate subset device to use in cryptodev device creation */ - qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver; - qat_dev_instance->asym_rte_dev.numa_node = - qat_dev_instance->pci_dev->device.numa_node; - qat_dev_instance->asym_rte_dev.devargs = NULL; - - cryptodev = rte_cryptodev_pmd_create(name, - &(qat_dev_instance->asym_rte_dev), &init_params); - - if (cryptodev == NULL) - return -ENODEV; - - qat_dev_instance->asym_rte_dev.name = cryptodev->data->name; - cryptodev->driver_id = qat_asym_driver_id; - cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; - - cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst; - cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst; - - cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, - "QAT_ASYM_CAPA_GEN_%d", - qat_pci_dev->qat_dev_gen); - - internals = cryptodev->data->dev_private; - internals->qat_dev = qat_pci_dev; - internals->dev_id = cryptodev->data->dev_id; - - capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); - capabilities = capa_info.data; - capa_size = capa_info.size; - - internals->capa_mz = rte_memzone_lookup(capa_memz_name); - if (internals->capa_mz == NULL) { - internals->capa_mz = rte_memzone_reserve(capa_memz_name, - capa_size, rte_socket_id(), 0); - if (internals->capa_mz == NULL) { - QAT_LOG(DEBUG, - "Error allocating memzone for capabilities, " - "destroying PMD for %s", - name); - rte_cryptodev_pmd_destroy(cryptodev); - memset(&qat_dev_instance->asym_rte_dev, 0, - sizeof(qat_dev_instance->asym_rte_dev)); - return -EFAULT; - } - } - - memcpy(internals->capa_mz->addr, capabilities, capa_size); - internals->qat_dev_capabilities = internals->capa_mz->addr; - - while (1) { - if (qat_dev_cmd_param[i].name == NULL) - break; - if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME)) - internals->min_enq_burst_threshold = - qat_dev_cmd_param[i].val; - i++; - } - - qat_pci_dev->asym_dev = internals; - internals->service_type = QAT_SERVICE_ASYMMETRIC; - QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d", - cryptodev->data->name, internals->dev_id); - return 0; -} - -int -qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev) -{ - struct rte_cryptodev *cryptodev; - - if (qat_pci_dev == NULL) - return -ENODEV; - if (qat_pci_dev->asym_dev == NULL) - return 0; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_memzone_free(qat_pci_dev->asym_dev->capa_mz); - - /* free crypto device */ - cryptodev = rte_cryptodev_pmd_get_dev( - qat_pci_dev->asym_dev->dev_id); - rte_cryptodev_pmd_destroy(cryptodev); - qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL; - qat_pci_dev->asym_dev = NULL; - - return 0; -} - -static struct cryptodev_driver qat_crypto_drv; -RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, - cryptodev_qat_asym_driver, - qat_asym_driver_id); diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h deleted file mode 100644 index 6d3d991bc7..0000000000 --- a/drivers/crypto/qat/qat_asym_refactor.h +++ /dev/null @@ -1,125 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation - */ - -#ifndef _QAT_ASYM_H_ -#define _QAT_ASYM_H_ - -#include -#include -#include "icp_qat_fw_pke.h" -#include "qat_device.h" -#include "qat_crypto.h" -#include "icp_qat_fw.h" - -/** Intel(R) QAT Asymmetric Crypto PMD driver name */ -#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym - -typedef uint64_t large_int_ptr; -#define MAX_PKE_PARAMS 8 -#define QAT_PKE_MAX_LN_SIZE 512 -#define _PKE_ALIGN_ __rte_aligned(8) - -#define QAT_ASYM_MAX_PARAMS 8 -#define QAT_ASYM_MODINV_NUM_IN_PARAMS 2 -#define QAT_ASYM_MODINV_NUM_OUT_PARAMS 1 -#define QAT_ASYM_MODEXP_NUM_IN_PARAMS 3 -#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS 1 -#define QAT_ASYM_RSA_NUM_IN_PARAMS 3 -#define QAT_ASYM_RSA_NUM_OUT_PARAMS 1 -#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6 - -/** - * helper function to add an asym capability - * - **/ -#define QAT_ASYM_CAP(n, o, l, r, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \ - {.asym = { \ - .xform_capa = { \ - .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\ - .op_types = o, \ - { \ - .modlen = { \ - .min = l, \ - .max = r, \ - .increment = i \ - }, } \ - } \ - }, \ - } \ - } - -struct qat_asym_op_cookie { - size_t alg_size; - uint64_t error; - rte_iova_t input_addr; - rte_iova_t output_addr; - large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_; - large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_; - union { - uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE]; - uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE]; - } _PKE_ALIGN_; - uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_; -} _PKE_ALIGN_; - -struct qat_asym_session { - struct icp_qat_fw_pke_request req_tmpl; - struct rte_crypto_asym_xform *xform; -}; - -static inline void -qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req) -{ - memset(qat_req, 0, sizeof(*qat_req)); - qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; - - qat_req->pke_hdr.hdr_flags = - ICP_QAT_FW_COMN_HDR_FLAGS_BUILD - (ICP_QAT_FW_COMN_REQ_FLAG_SET); -} - -static inline void -qat_asym_build_req_tmpl(void *sess_private_data) -{ - struct icp_qat_fw_pke_request *qat_req; - struct qat_asym_session *session = sess_private_data; - - qat_req = &session->req_tmpl; - qat_fill_req_tmpl(qat_req); -} - -int -qat_asym_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_asym_xform *xform, - struct rte_cryptodev_asym_session *sess, - struct rte_mempool *mempool); - -unsigned int -qat_asym_session_get_private_size(struct rte_cryptodev *dev); - -void -qat_asym_session_clear(struct rte_cryptodev *dev, - struct rte_cryptodev_asym_session *sess); - -/* - * Process PKE response received from outgoing queue of QAT - * - * @param op a ptr to the rte_crypto_op referred to by - * the response message is returned in this param - * @param resp icp_qat_fw_pke_resp message received from - * outgoing fw message queue - * @param op_cookie Cookie pointer that holds private metadata - * @param dequeue_err_count Error count number pointer - * - */ -int -qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, - void *op_cookie, __rte_unused uint64_t *dequeue_err_count); - -void -qat_asym_init_op_cookie(void *cookie); - -#endif /* _QAT_ASYM_H_ */ diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index 8fe3f0b061..203f74f46f 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -12,7 +12,10 @@ extern uint8_t qat_sym_driver_id; extern uint8_t qat_asym_driver_id; -/** helper macro to set cryptodev capability range **/ +/** + * helper macro to set cryptodev capability range + * + **/ #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i} #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0} diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index de687de857..b78e6a6ef3 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -11,273 +11,107 @@ #include #include "qat_sym.h" +#include "qat_crypto.h" +#include "qat_qp.h" +uint8_t qat_sym_driver_id; -/** Decrypt a single partial block - * Depends on openssl libcrypto - * Uses ECB+XOR to do CFB encryption, same result, more performant - */ -static inline int -bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, - uint8_t *iv, int ivlen, int srclen, - void *bpi_ctx) -{ - EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; - int encrypted_ivlen; - uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; - uint8_t *encr = encrypted_iv; - - /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ - if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) - <= 0) - goto cipher_decrypt_err; - - for (; srclen != 0; --srclen, ++dst, ++src, ++encr) - *dst = *src ^ *encr; - - return 0; - -cipher_decrypt_err: - QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed"); - return -EINVAL; -} - - -static inline uint32_t -qat_bpicipher_preprocess(struct qat_sym_session *ctx, - struct rte_crypto_op *op) -{ - int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); - struct rte_crypto_sym_op *sym_op = op->sym; - uint8_t last_block_len = block_len > 0 ? - sym_op->cipher.data.length % block_len : 0; - - if (last_block_len && - ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { - - /* Decrypt last block */ - uint8_t *last_block, *dst, *iv; - uint32_t last_block_offset = sym_op->cipher.data.offset + - sym_op->cipher.data.length - last_block_len; - last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, - uint8_t *, last_block_offset); - - if (unlikely((sym_op->m_dst != NULL) - && (sym_op->m_dst != sym_op->m_src))) - /* out-of-place operation (OOP) */ - dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, - uint8_t *, last_block_offset); - else - dst = last_block; - - if (last_block_len < sym_op->cipher.data.length) - /* use previous block ciphertext as IV */ - iv = last_block - block_len; - else - /* runt block, i.e. less than one full block */ - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:", - last_block, last_block_len); - if (sym_op->m_dst != NULL) - QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:", - dst, last_block_len); -#endif - bpi_cipher_decrypt(last_block, dst, iv, block_len, - last_block_len, ctx->bpi_ctx); -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:", - last_block, last_block_len); - if (sym_op->m_dst != NULL) - QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:", - dst, last_block_len); -#endif - } - - return sym_op->cipher.data.length - last_block_len; -} - -static inline void -set_cipher_iv(uint16_t iv_length, uint16_t iv_offset, - struct icp_qat_fw_la_cipher_req_params *cipher_param, - struct rte_crypto_op *op, - struct icp_qat_fw_la_bulk_req *qat_req) -{ - /* copy IV into request if it fits */ - if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) { - rte_memcpy(cipher_param->u.cipher_IV_array, - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset), - iv_length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = - rte_crypto_op_ctophys_offset(op, - iv_offset); - } -} +struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS]; -/** Set IV for CCM is special case, 0th byte is set to q-1 - * where q is padding of nonce in 16 byte block +/* An rte_driver is needed in the registration of both the device and the driver + * with cryptodev. + * The actual qat pci's rte_driver can't be used as its name represents + * the whole pci device with all services. Think of this as a holder for a name + * for the crypto part of the pci device. */ -static inline void -set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset, - struct icp_qat_fw_la_cipher_req_params *cipher_param, - struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz) +static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD); +static const struct rte_driver cryptodev_qat_sym_driver = { + .name = qat_sym_drv_name, + .alias = qat_sym_drv_name +}; + +void +qat_sym_init_op_cookie(void *op_cookie) { - rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) + - ICP_QAT_HW_CCM_NONCE_OFFSET, - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, - iv_length); - *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = - q - ICP_QAT_HW_CCM_NONCE_OFFSET; - - if (aad_len_field_sz) - rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET], - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, - iv_length); -} - -/** Handle Single-Pass AES-GMAC on QAT GEN3 */ -static inline void -handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op, - struct qat_sym_op_cookie *cookie, - struct icp_qat_fw_la_bulk_req *qat_req) -{ - static const uint32_t ver_key_offset = - sizeof(struct icp_qat_hw_auth_setup) + - ICP_QAT_HW_GALOIS_128_STATE1_SZ + - ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + - ICP_QAT_HW_GALOIS_E_CTR0_SZ + - sizeof(struct icp_qat_hw_cipher_config); - struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = - (void *) &qat_req->cd_ctrl; - struct icp_qat_fw_la_cipher_req_params *cipher_param = - (void *) &qat_req->serv_specif_rqpars; - uint32_t data_length = op->sym->auth.data.length; - - /* Fill separate Content Descriptor for this op */ - rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key, - ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? - ctx->cd.cipher.key : - RTE_PTR_ADD(&ctx->cd, ver_key_offset), - ctx->auth_key_length); - cookie->opt.spc_gmac.cd_cipher.cipher_config.val = - ICP_QAT_HW_CIPHER_CONFIG_BUILD( - ICP_QAT_HW_CIPHER_AEAD_MODE, - ctx->qat_cipher_alg, - ICP_QAT_HW_CIPHER_NO_CONVERT, - (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? - ICP_QAT_HW_CIPHER_ENCRYPT : - ICP_QAT_HW_CIPHER_DECRYPT)); - QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val, - ctx->digest_length, - QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, - QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); - cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved = - ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length); - - /* Update the request */ - qat_req->cd_pars.u.s.content_desc_addr = - cookie->opt.spc_gmac.cd_phys_addr; - qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL( - sizeof(struct icp_qat_hw_cipher_config) + - ctx->auth_key_length, 8) >> 3; - qat_req->comn_mid.src_length = data_length; - qat_req->comn_mid.dst_length = 0; - - cipher_param->spc_aad_addr = 0; - cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr; - cipher_param->spc_aad_sz = data_length; - cipher_param->reserved = 0; - cipher_param->spc_auth_res_sz = ctx->digest_length; - - qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; - cipher_cd_ctrl->cipher_cfg_offset = 0; - ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); - ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_SINGLE_PASS_PROTO); - ICP_QAT_FW_LA_PROTO_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); + struct qat_sym_op_cookie *cookie = op_cookie; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_dst); + + cookie->opt.spc_gmac.cd_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + opt.spc_gmac.cd_cipher); } static __rte_always_inline int -refactor_qat_sym_build_request(__rte_unused void *in_op, - __rte_unused uint8_t *out_msg, __rte_unused void *op_cookie, - __rte_unused uint64_t *opaque, - __rte_unused enum qat_device_gen dev_gen) -{ - return 0; -} - -uint16_t -refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request, - (void **)ops, nb_ops); -} - -uint16_t -refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return refactor_qat_dequeue_op_burst(qp, (void **)ops, - refactor_qat_sym_process_response, nb_ops); -} - -int qat_sym_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen) + void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen) { - int ret = 0; - struct qat_sym_session *ctx = NULL; - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_cipher_20_req_params *cipher_param20; - struct icp_qat_fw_la_auth_req_params *auth_param; - register struct icp_qat_fw_la_bulk_req *qat_req; - uint8_t do_auth = 0, do_cipher = 0, do_aead = 0; - uint32_t cipher_len = 0, cipher_ofs = 0; - uint32_t auth_len = 0, auth_ofs = 0; - uint32_t min_ofs = 0; - uint64_t src_buf_start = 0, dst_buf_start = 0; - uint64_t auth_data_end = 0; - uint8_t do_sgl = 0; - uint8_t in_place = 1; - int alignment_adjustment = 0; - int oop_shift = 0; struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; - struct qat_sym_op_cookie *cookie = - (struct qat_sym_op_cookie *)op_cookie; + void *sess = (void *)opaque[0]; + qat_sym_build_request_t build_request = (void *)opaque[1]; + struct qat_sym_session *ctx = NULL; - if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { - QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto " - "operation requests, op (%p) is not a " - "symmetric operation.", op); - return -EINVAL; + if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) { + ctx = get_sym_session_private_data(op->sym->session, + qat_sym_driver_id); + if (unlikely(!ctx)) { + QAT_DP_LOG(ERR, "No session for this device"); + return -EINVAL; + } + if (sess != ctx) { + struct rte_cryptodev *cdev; + struct qat_cryptodev_private *internals; + enum rte_proc_type_t proc_type; + + cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id); + internals = cdev->data->dev_private; + proc_type = rte_eal_process_type(); + + if (internals->qat_dev->qat_dev_gen != dev_gen) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + + if (unlikely(ctx->build_request[proc_type] == NULL)) { + int ret = + qat_sym_gen_dev_ops[dev_gen].set_session( + (void *)cdev, sess); + if (ret < 0) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + } + + build_request = ctx->build_request[proc_type]; + opaque[0] = (uintptr_t)ctx; + opaque[1] = (uintptr_t)build_request; + } } - if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { - QAT_DP_LOG(ERR, "QAT PMD only supports session oriented" - " requests, op (%p) is sessionless.", op); - return -EINVAL; - } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - ctx = (struct qat_sym_session *)get_sym_session_private_data( - op->sym->session, qat_sym_driver_id); #ifdef RTE_LIB_SECURITY - } else { - ctx = (struct qat_sym_session *)get_sec_session_private_data( - op->sym->sec_session); - if (likely(ctx)) { + else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + if (sess != (void *)op->sym->sec_session) { + struct rte_cryptodev *cdev; + struct qat_cryptodev_private *internals; + enum rte_proc_type_t proc_type; + + ctx = get_sec_session_private_data( + op->sym->sec_session); + if (unlikely(!ctx)) { + QAT_DP_LOG(ERR, "No session for this device"); + return -EINVAL; + } if (unlikely(ctx->bpi_ctx == NULL)) { QAT_DP_LOG(ERR, "QAT PMD only supports security" " operation requests for" @@ -293,463 +127,284 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; } - } -#endif - } + cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id); + internals = cdev->data->dev_private; + proc_type = rte_eal_process_type(); - if (unlikely(ctx == NULL)) { - QAT_DP_LOG(ERR, "Session was not created for this device"); - return -EINVAL; - } + if (internals->qat_dev->qat_dev_gen != dev_gen) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } - qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; - rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); - qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; - cipher_param = (void *)&qat_req->serv_specif_rqpars; - cipher_param20 = (void *)&qat_req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + - ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); - - if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || - ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && - !ctx->is_gmac) { - /* AES-GCM or AES-CCM */ - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || - (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 - && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE - && ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { - do_aead = 1; - } else { - do_auth = 1; - do_cipher = 1; + if (unlikely(ctx->build_request[proc_type] == NULL)) { + int ret = + qat_sym_gen_dev_ops[dev_gen].set_session( + (void *)cdev, sess); + if (ret < 0) { + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + } + + sess = (void *)op->sym->sec_session; + build_request = ctx->build_request[proc_type]; + opaque[0] = (uintptr_t)sess; + opaque[1] = (uintptr_t)build_request; } - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { - do_auth = 1; - do_cipher = 0; - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { - do_auth = 0; - do_cipher = 1; + } +#endif + else { /* RTE_CRYPTO_OP_SESSIONLESS */ + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + QAT_LOG(DEBUG, "QAT does not support sessionless operation"); + return -1; } - if (do_cipher) { + return build_request(op, (void *)ctx, out_msg, op_cookie); +} - if (ctx->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || - ctx->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { +uint16_t +qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_enqueue_op_burst(qp, qat_sym_build_request, + (void **)ops, nb_ops); +} - if (unlikely( - (op->sym->cipher.data.length % BYTE_LENGTH != 0) || - (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) { - QAT_DP_LOG(ERR, - "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - cipher_len = op->sym->cipher.data.length >> 3; - cipher_ofs = op->sym->cipher.data.offset >> 3; - - } else if (ctx->bpi_ctx) { - /* DOCSIS - only send complete blocks to device. - * Process any partial block using CFB mode. - * Even if 0 complete blocks, still send this to device - * to get into rx queue for post-process and dequeuing - */ - cipher_len = qat_bpicipher_preprocess(ctx, op); - cipher_ofs = op->sym->cipher.data.offset; - } else { - cipher_len = op->sym->cipher.data.length; - cipher_ofs = op->sym->cipher.data.offset; - } +uint16_t +qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, + qat_sym_process_response, nb_ops); +} - set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset, - cipher_param, op, qat_req); - min_ofs = cipher_ofs; +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, + struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) +{ + int i = 0, ret = 0; + struct qat_device_info *qat_dev_instance = + &qat_pci_devs[qat_pci_dev->qat_dev_id]; + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_dev_instance->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_cryptodev_private) + }; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + struct rte_cryptodev *cryptodev; + struct qat_cryptodev_private *internals; + struct qat_capabilities_info capa_info; + const struct rte_cryptodev_capabilities *capabilities; + const struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; + uint64_t capa_size; + + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "sym"); + QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); + + if (gen_dev_ops->cryptodev_ops == NULL) { + QAT_LOG(ERR, "Device %s does not support symmetric crypto", + name); + return -(EFAULT); } - if (do_auth) { - - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { - if (unlikely( - (op->sym->auth.data.offset % BYTE_LENGTH != 0) || - (op->sym->auth.data.length % BYTE_LENGTH != 0))) { - QAT_DP_LOG(ERR, - "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - auth_ofs = op->sym->auth.data.offset >> 3; - auth_len = op->sym->auth.data.length >> 3; - - auth_param->u1.aad_adr = - rte_crypto_op_ctophys_offset(op, - ctx->auth_iv.offset); - - } else if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - /* AES-GMAC */ - set_cipher_iv(ctx->auth_iv.length, - ctx->auth_iv.offset, - cipher_param, op, qat_req); - auth_ofs = op->sym->auth.data.offset; - auth_len = op->sym->auth.data.length; - - auth_param->u1.aad_adr = 0; - auth_param->u2.aad_sz = 0; - - } else { - auth_ofs = op->sym->auth.data.offset; - auth_len = op->sym->auth.data.length; - + /* + * All processes must use same driver id so they can share sessions. + * Store driver_id so we can validate that all processes have the same + * value, typically they have, but could differ if binaries built + * separately. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + qat_pci_dev->qat_sym_driver_id = + qat_sym_driver_id; + } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (qat_pci_dev->qat_sym_driver_id != + qat_sym_driver_id) { + QAT_LOG(ERR, + "Device %s have different driver id than corresponding device in primary process", + name); + return -(EFAULT); } - min_ofs = auth_ofs; - - if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL || - ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY) - auth_param->auth_res_addr = - op->sym->auth.digest.phys_addr; - } - if (do_aead) { - /* - * This address may used for setting AAD physical pointer - * into IV offset from op - */ - rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr; - if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - - set_cipher_iv(ctx->cipher_iv.length, - ctx->cipher_iv.offset, - cipher_param, op, qat_req); - - } else if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) { - - /* In case of AES-CCM this may point to user selected - * memory or iv offset in cypto_op - */ - uint8_t *aad_data = op->sym->aead.aad.data; - /* This is true AAD length, it not includes 18 bytes of - * preceding data - */ - uint8_t aad_ccm_real_len = 0; - uint8_t aad_len_field_sz = 0; - uint32_t msg_len_be = - rte_bswap32(op->sym->aead.data.length); - - if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { - aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; - aad_ccm_real_len = ctx->aad_len - - ICP_QAT_HW_CCM_AAD_B0_LEN - - ICP_QAT_HW_CCM_AAD_LEN_INFO; - } else { - /* - * aad_len not greater than 18, so no actual aad - * data, then use IV after op for B0 block - */ - aad_data = rte_crypto_op_ctod_offset(op, - uint8_t *, - ctx->cipher_iv.offset); - aad_phys_addr_aead = - rte_crypto_op_ctophys_offset(op, - ctx->cipher_iv.offset); - } + /* Populate subset device to use in cryptodev device creation */ + qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver; + qat_dev_instance->sym_rte_dev.numa_node = + qat_dev_instance->pci_dev->device.numa_node; + qat_dev_instance->sym_rte_dev.devargs = NULL; - uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - - ctx->cipher_iv.length; - - aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS( - aad_len_field_sz, - ctx->digest_length, q); - - if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET + - (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), - (uint8_t *)&msg_len_be, - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); - } else { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET, - (uint8_t *)&msg_len_be - + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE - - q), q); - } + cryptodev = rte_cryptodev_pmd_create(name, + &(qat_dev_instance->sym_rte_dev), &init_params); - if (aad_len_field_sz > 0) { - *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] - = rte_bswap16(aad_ccm_real_len); - - if ((aad_ccm_real_len + aad_len_field_sz) - % ICP_QAT_HW_CCM_AAD_B0_LEN) { - uint8_t pad_len = 0; - uint8_t pad_idx = 0; - - pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - - ((aad_ccm_real_len + aad_len_field_sz) % - ICP_QAT_HW_CCM_AAD_B0_LEN); - pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + - aad_ccm_real_len + aad_len_field_sz; - memset(&aad_data[pad_idx], - 0, pad_len); - } + if (cryptodev == NULL) + return -ENODEV; - } + qat_dev_instance->sym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = qat_sym_driver_id; + cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; - set_cipher_iv_ccm(ctx->cipher_iv.length, - ctx->cipher_iv.offset, - cipher_param, op, q, - aad_len_field_sz); + cryptodev->enqueue_burst = qat_sym_enqueue_burst; + cryptodev->dequeue_burst = qat_sym_dequeue_burst; - } + cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); - cipher_len = op->sym->aead.data.length; - cipher_ofs = op->sym->aead.data.offset; - auth_len = op->sym->aead.data.length; - auth_ofs = op->sym->aead.data.offset; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; - auth_param->u1.aad_adr = aad_phys_addr_aead; - auth_param->auth_res_addr = op->sym->aead.digest.phys_addr; - min_ofs = op->sym->aead.data.offset; - } - - if (op->sym->m_src->nb_segs > 1 || - (op->sym->m_dst && op->sym->m_dst->nb_segs > 1)) - do_sgl = 1; - - /* adjust for chain case */ - if (do_cipher && do_auth) - min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; - - if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) - min_ofs = 0; - - if (unlikely((op->sym->m_dst != NULL) && - (op->sym->m_dst != op->sym->m_src))) { - /* Out-of-place operation (OOP) - * Don't align DMA start. DMA the minimum data-set - * so as not to overwrite data in dest buffer - */ - in_place = 0; - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs); - dst_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs); - oop_shift = min_ofs; - - } else { - /* In-place operation - * Start DMA at nearest aligned address below min_ofs - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs) - & QAT_64_BTYE_ALIGN_MASK; - - if (unlikely((rte_pktmbuf_iova(op->sym->m_src) - - rte_pktmbuf_headroom(op->sym->m_src)) - > src_buf_start)) { - /* alignment has pushed addr ahead of start of mbuf - * so revert and take the performance hit - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, - min_ofs); +#ifdef RTE_LIB_SECURITY + if (gen_dev_ops->create_security_ctx) { + cryptodev->security_ctx = + gen_dev_ops->create_security_ctx((void *)cryptodev); + if (cryptodev->security_ctx == NULL) { + QAT_LOG(ERR, "rte_security_ctx memory alloc failed"); + ret = -ENOMEM; + goto error; } - dst_buf_start = src_buf_start; - - /* remember any adjustment for later, note, can be +/- */ - alignment_adjustment = src_buf_start - - rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs); - } - if (do_cipher || do_aead) { - cipher_param->cipher_offset = - (uint32_t)rte_pktmbuf_iova_offset( - op->sym->m_src, cipher_ofs) - src_buf_start; - cipher_param->cipher_length = cipher_len; + cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; + QAT_LOG(INFO, "Device %s rte_security support ensabled", name); } else { - cipher_param->cipher_offset = 0; - cipher_param->cipher_length = 0; + QAT_LOG(INFO, "Device %s rte_security support disabled", name); } - - if (!ctx->is_single_pass) { - /* Do not let to overwrite spc_aad len */ - if (do_auth || do_aead) { - auth_param->auth_off = - (uint32_t)rte_pktmbuf_iova_offset( - op->sym->m_src, auth_ofs) - src_buf_start; - auth_param->auth_len = auth_len; - } else { - auth_param->auth_off = 0; - auth_param->auth_len = 0; +#endif + snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, + "QAT_SYM_CAPA_GEN_%d", + qat_pci_dev->qat_dev_gen); + + internals = cryptodev->data->dev_private; + internals->qat_dev = qat_pci_dev; + + internals->dev_id = cryptodev->data->dev_id; + + capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); + capabilities = capa_info.data; + capa_size = capa_info.size; + + internals->capa_mz = rte_memzone_lookup(capa_memz_name); + if (internals->capa_mz == NULL) { + internals->capa_mz = rte_memzone_reserve(capa_memz_name, + capa_size, rte_socket_id(), 0); + if (internals->capa_mz == NULL) { + QAT_LOG(DEBUG, + "Error allocating memzone for capabilities, " + "destroying PMD for %s", + name); + ret = -EFAULT; + goto error; } } - qat_req->comn_mid.dst_length = - qat_req->comn_mid.src_length = - (cipher_param->cipher_offset + cipher_param->cipher_length) - > (auth_param->auth_off + auth_param->auth_len) ? - (cipher_param->cipher_offset + cipher_param->cipher_length) - : (auth_param->auth_off + auth_param->auth_len); - - if (do_auth && do_cipher) { - /* Handle digest-encrypted cases, i.e. - * auth-gen-then-cipher-encrypt and - * cipher-decrypt-then-auth-verify - */ - /* First find the end of the data */ - if (do_sgl) { - uint32_t remaining_off = auth_param->auth_off + - auth_param->auth_len + alignment_adjustment + oop_shift; - struct rte_mbuf *sgl_buf = - (in_place ? - op->sym->m_src : op->sym->m_dst); - - while (remaining_off >= rte_pktmbuf_data_len(sgl_buf) - && sgl_buf->next != NULL) { - remaining_off -= rte_pktmbuf_data_len(sgl_buf); - sgl_buf = sgl_buf->next; - } + memcpy(internals->capa_mz->addr, capabilities, capa_size); + internals->qat_dev_capabilities = internals->capa_mz->addr; - auth_data_end = (uint64_t)rte_pktmbuf_iova_offset( - sgl_buf, remaining_off); - } else { - auth_data_end = (in_place ? - src_buf_start : dst_buf_start) + - auth_param->auth_off + auth_param->auth_len; - } - /* Then check if digest-encrypted conditions are met */ - if ((auth_param->auth_off + auth_param->auth_len < - cipher_param->cipher_offset + - cipher_param->cipher_length) && - (op->sym->auth.digest.phys_addr == - auth_data_end)) { - /* Handle partial digest encryption */ - if (cipher_param->cipher_offset + - cipher_param->cipher_length < - auth_param->auth_off + - auth_param->auth_len + - ctx->digest_length) - qat_req->comn_mid.dst_length = - qat_req->comn_mid.src_length = - auth_param->auth_off + - auth_param->auth_len + - ctx->digest_length; - struct icp_qat_fw_comn_req_hdr *header = - &qat_req->comn_hdr; - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - header->serv_specif_flags, - ICP_QAT_FW_LA_DIGEST_IN_BUFFER); - } + while (1) { + if (qat_dev_cmd_param[i].name == NULL) + break; + if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME)) + internals->min_enq_burst_threshold = + qat_dev_cmd_param[i].val; + i++; } - if (do_sgl) { + internals->service_type = QAT_SERVICE_SYMMETRIC; + qat_pci_dev->sym_dev = internals; + QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d", + cryptodev->data->name, internals->dev_id); - ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, - QAT_COMN_PTR_TYPE_SGL); - ret = qat_sgl_fill_array(op->sym->m_src, - (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)), - &cookie->qat_sgl_src, - qat_req->comn_mid.src_length, - QAT_SYM_SGL_MAX_NUMBER); + return 0; - if (unlikely(ret)) { - QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); - return ret; - } +error: +#ifdef RTE_LIB_SECURITY + rte_free(cryptodev->security_ctx); + cryptodev->security_ctx = NULL; +#endif + rte_cryptodev_pmd_destroy(cryptodev); + memset(&qat_dev_instance->sym_rte_dev, 0, + sizeof(qat_dev_instance->sym_rte_dev)); - if (in_place) - qat_req->comn_mid.dest_data_addr = - qat_req->comn_mid.src_data_addr = - cookie->qat_sgl_src_phys_addr; - else { - ret = qat_sgl_fill_array(op->sym->m_dst, - (int64_t)(dst_buf_start - - rte_pktmbuf_iova(op->sym->m_dst)), - &cookie->qat_sgl_dst, - qat_req->comn_mid.dst_length, - QAT_SYM_SGL_MAX_NUMBER); - - if (unlikely(ret)) { - QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array"); - return ret; - } + return ret; +} - qat_req->comn_mid.src_data_addr = - cookie->qat_sgl_src_phys_addr; - qat_req->comn_mid.dest_data_addr = - cookie->qat_sgl_dst_phys_addr; - } - qat_req->comn_mid.src_length = 0; - qat_req->comn_mid.dst_length = 0; - } else { - qat_req->comn_mid.src_data_addr = src_buf_start; - qat_req->comn_mid.dest_data_addr = dst_buf_start; - } +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev *cryptodev; - if (ctx->is_single_pass) { - if (ctx->is_ucs) { - /* GEN 4 */ - cipher_param20->spc_aad_addr = - op->sym->aead.aad.phys_addr; - cipher_param20->spc_auth_res_addr = - op->sym->aead.digest.phys_addr; - } else { - cipher_param->spc_aad_addr = - op->sym->aead.aad.phys_addr; - cipher_param->spc_auth_res_addr = - op->sym->aead.digest.phys_addr; - } - } else if (ctx->is_single_pass_gmac && - op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) { - /* Handle Single-Pass AES-GMAC */ - handle_spc_gmac(ctx, op, cookie, qat_req); - } + if (qat_pci_dev == NULL) + return -ENODEV; + if (qat_pci_dev->sym_dev == NULL) + return 0; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_memzone_free(qat_pci_dev->sym_dev->capa_mz); -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_la_bulk_req)); - QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", - rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), - rte_pktmbuf_data_len(op->sym->m_src)); - if (do_cipher) { - uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op, - uint8_t *, - ctx->cipher_iv.offset); - QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr, - ctx->cipher_iv.length); - } + /* free crypto device */ + cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id); +#ifdef RTE_LIB_SECURITY + rte_free(cryptodev->security_ctx); + cryptodev->security_ctx = NULL; +#endif + rte_cryptodev_pmd_destroy(cryptodev); + qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL; + qat_pci_dev->sym_dev = NULL; - if (do_auth) { - if (ctx->auth_iv.length) { - uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op, - uint8_t *, - ctx->auth_iv.offset); - QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr, - ctx->auth_iv.length); - } - QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data, - ctx->digest_length); + return 0; +} + +int +qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *raw_dp_ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update) +{ + struct qat_cryptodev_private *internals = dev->data->dev_private; + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; + struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_sym_gen_dev_ops[qat_dev_gen]; + struct qat_qp *qp; + struct qat_sym_session *ctx; + struct qat_sym_dp_ctx *dp_ctx; + + if (!gen_dev_ops->set_raw_dp_ctx) { + QAT_LOG(ERR, "Device GEN %u does not support raw data path", + qat_dev_gen); + return -ENOTSUP; } - if (do_aead) { - QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, - ctx->digest_length); - QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, - ctx->aad_len); + qp = dev->data->queue_pairs[qp_id]; + dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data; + + if (!is_update) { + memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) + + sizeof(struct qat_sym_dp_ctx)); + raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id]; + dp_ctx->tail = qp->tx_q.tail; + dp_ctx->head = qp->rx_q.head; + dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0; } -#endif - return 0; + + if (sess_type != RTE_CRYPTO_OP_WITH_SESSION) + return -EINVAL; + + ctx = (struct qat_sym_session *)get_sym_session_private_data( + session_ctx.crypto_sess, qat_sym_driver_id); + + dp_ctx->session = ctx; + + return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx); +} + + +int +qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct qat_sym_dp_ctx); } + +static struct cryptodev_driver qat_crypto_drv; +RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, + cryptodev_qat_sym_driver, + qat_sym_driver_id); diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h index 4801bd50a7..278abbfd3a 100644 --- a/drivers/crypto/qat/qat_sym.h +++ b/drivers/crypto/qat/qat_sym.h @@ -15,15 +15,75 @@ #include "qat_common.h" #include "qat_sym_session.h" -#include "qat_sym_pmd.h" +#include "qat_crypto.h" #include "qat_logs.h" +#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat + #define BYTE_LENGTH 8 /* bpi is only used for partial blocks of DES and AES * so AES block len can be assumed as max len for iv, src and dst */ #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ +/* Internal capabilities */ +#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0) +#define QAT_SYM_CAP_VALID (1 << 31) + +/** + * Macro to add a sym capability + * helper function to add an sym capability + * + * + **/ +#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_##n, \ + b, d \ + }, } \ + }, } \ + } + +#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_##n, \ + b, k, d, a, i \ + }, } \ + }, } \ + } + +#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_##n, \ + b, k, d, a, i \ + }, } \ + }, } \ + } + +#define QAT_SYM_CIPHER_CAP(n, b, k, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_##n, \ + b, k, i \ + }, } \ + }, } \ + } + /* * Maximum number of SGL entries */ @@ -62,27 +122,14 @@ struct qat_sym_dp_ctx { uint16_t cached_dequeue; }; -static __rte_always_inline int -refactor_qat_sym_process_response(__rte_unused void **op, - __rte_unused uint8_t *resp, __rte_unused void *op_cookie, - __rte_unused uint64_t *dequeue_err_count) -{ - return 0; -} - uint16_t -refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, +qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops); uint16_t -refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, +qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops); -int -qat_sym_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie, enum qat_device_gen qat_dev_gen); - - /** Encrypt a single partial block * Depends on openssl libcrypto * Uses ECB+XOR to do CFB encryption, same result, more performant @@ -237,17 +284,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops) } } } -#else - -static inline void -qat_sym_preprocess_requests(void **ops __rte_unused, - uint16_t nb_ops __rte_unused) -{ -} #endif -static inline void -qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie) +static __rte_always_inline int +qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie, + uint64_t *dequeue_err_count __rte_unused) { struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp *)resp; @@ -306,6 +347,8 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie) } *op = (void *)rx_op; + + return 1; } int @@ -317,6 +360,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, int qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev); +void +qat_sym_init_op_cookie(void *cookie); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG +static __rte_always_inline void +qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req, + struct qat_sym_session *ctx, + struct rte_crypto_vec *vec, uint32_t vec_len, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *auth_iv, + struct rte_crypto_va_iova_ptr *aad, + struct rte_crypto_va_iova_ptr *digest) +{ + uint32_t i; + + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_la_bulk_req)); + for (i = 0; i < vec_len; i++) + QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len); + if (cipher_iv && ctx->cipher_iv.length > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va, + ctx->cipher_iv.length); + if (auth_iv && ctx->auth_iv.length > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va, + ctx->auth_iv.length); + if (aad && ctx->aad_len > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va, + ctx->aad_len); + if (digest && ctx->digest_length > 0) + QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va, + ctx->digest_length); +} +#else +static __rte_always_inline void +qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused, + struct qat_sym_session *ctx __rte_unused, + struct rte_crypto_vec *vec __rte_unused, + uint32_t vec_len __rte_unused, + struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused, + struct rte_crypto_va_iova_ptr *auth_iv __rte_unused, + struct rte_crypto_va_iova_ptr *aad __rte_unused, + struct rte_crypto_va_iova_ptr *digest __rte_unused) +{ +} +#endif + #else static inline void @@ -331,5 +420,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused, { } -#endif +#endif /* BUILD_QAT_SYM */ #endif /* _QAT_SYM_H_ */ diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c deleted file mode 100644 index af75ac2011..0000000000 --- a/drivers/crypto/qat/qat_sym_hw_dp.c +++ /dev/null @@ -1,975 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2020 Intel Corporation - */ - -#include - -#include "adf_transport_access_macros.h" -#include "icp_qat_fw.h" -#include "icp_qat_fw_la.h" - -#include "qat_sym_refactor.h" -#include "qat_sym_pmd.h" -#include "qat_sym_session.h" -#include "qat_qp.h" - -static __rte_always_inline int32_t -qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req, - struct rte_crypto_vec *data, uint16_t n_data_vecs) -{ - struct qat_queue *tx_queue; - struct qat_sym_op_cookie *cookie; - struct qat_sgl *list; - uint32_t i; - uint32_t total_len; - - if (likely(n_data_vecs == 1)) { - req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr = - data[0].iova; - req->comn_mid.src_length = req->comn_mid.dst_length = - data[0].len; - return data[0].len; - } - - if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER) - return -1; - - total_len = 0; - tx_queue = &qp->tx_q; - - ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags, - QAT_COMN_PTR_TYPE_SGL); - cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz]; - list = (struct qat_sgl *)&cookie->qat_sgl_src; - - for (i = 0; i < n_data_vecs; i++) { - list->buffers[i].len = data[i].len; - list->buffers[i].resrvd = 0; - list->buffers[i].addr = data[i].iova; - if (total_len + data[i].len > UINT32_MAX) { - QAT_DP_LOG(ERR, "Message too long"); - return -1; - } - total_len += data[i].len; - } - - list->num_bufs = i; - req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr = - cookie->qat_sgl_src_phys_addr; - req->comn_mid.src_length = req->comn_mid.dst_length = 0; - return total_len; -} - -static __rte_always_inline void -set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param, - struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len, - struct icp_qat_fw_la_bulk_req *qat_req) -{ - /* copy IV into request if it fits */ - if (iv_len <= sizeof(cipher_param->u.cipher_IV_array)) - rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va, - iv_len); - else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova; - } -} - -#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \ - (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \ - ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status)) - -static __rte_always_inline void -qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n) -{ - uint32_t i; - - for (i = 0; i < n; i++) - sta[i] = status; -} - -#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \ - RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n) - -static __rte_always_inline void -enqueue_one_cipher_job(struct qat_sym_session *ctx, - struct icp_qat_fw_la_bulk_req *req, - struct rte_crypto_va_iova_ptr *iv, - union rte_crypto_sym_ofs ofs, uint32_t data_len) -{ - struct icp_qat_fw_la_cipher_req_params *cipher_param; - - cipher_param = (void *)&req->serv_specif_rqpars; - - /* cipher IV */ - set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req); - cipher_param->cipher_offset = ofs.ofs.cipher.head; - cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - - ofs.ofs.cipher.tail; -} - -static __rte_always_inline int -qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_vec *data, uint16_t n_data_vecs, - union rte_crypto_sym_ofs ofs, - struct rte_crypto_va_iova_ptr *iv, - struct rte_crypto_va_iova_ptr *digest __rte_unused, - struct rte_crypto_va_iova_ptr *aad __rte_unused, - void *user_data) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - uint32_t tail = dp_ctx->tail; - - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); - data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); - if (unlikely(data_len < 0)) - return -1; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; - - enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len); - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue++; - - return 0; -} - -static __rte_always_inline uint32_t -qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, - void *user_data[], int *status) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - uint32_t i, n; - uint32_t tail; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - - n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); - if (unlikely(n == 0)) { - qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); - *status = 0; - return 0; - } - - tail = dp_ctx->tail; - - for (i = 0; i < n; i++) { - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - - data_len = qat_sym_dp_parse_data_vec(qp, req, - vec->src_sgl[i].vec, - vec->src_sgl[i].num); - if (unlikely(data_len < 0)) - break; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; - enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs, - (uint32_t)data_len); - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - } - - if (unlikely(i < n)) - qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue += i; - *status = 0; - return i; -} - -static __rte_always_inline void -enqueue_one_auth_job(struct qat_sym_session *ctx, - struct icp_qat_fw_la_bulk_req *req, - struct rte_crypto_va_iova_ptr *digest, - struct rte_crypto_va_iova_ptr *auth_iv, - union rte_crypto_sym_ofs ofs, uint32_t data_len) -{ - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_auth_req_params *auth_param; - - cipher_param = (void *)&req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + - ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); - - auth_param->auth_off = ofs.ofs.auth.head; - auth_param->auth_len = data_len - ofs.ofs.auth.head - - ofs.ofs.auth.tail; - auth_param->auth_res_addr = digest->iova; - - switch (ctx->qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: - case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: - auth_param->u1.aad_adr = auth_iv->iova; - break; - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va, - ctx->auth_iv.length); - break; - default: - break; - } -} - -static __rte_always_inline int -qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_vec *data, uint16_t n_data_vecs, - union rte_crypto_sym_ofs ofs, - struct rte_crypto_va_iova_ptr *iv __rte_unused, - struct rte_crypto_va_iova_ptr *digest, - struct rte_crypto_va_iova_ptr *auth_iv, - void *user_data) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - uint32_t tail = dp_ctx->tail; - - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); - data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); - if (unlikely(data_len < 0)) - return -1; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; - - enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs, - (uint32_t)data_len); - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue++; - - return 0; -} - -static __rte_always_inline uint32_t -qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, - void *user_data[], int *status) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - uint32_t i, n; - uint32_t tail; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - - n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); - if (unlikely(n == 0)) { - qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); - *status = 0; - return 0; - } - - tail = dp_ctx->tail; - - for (i = 0; i < n; i++) { - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - - data_len = qat_sym_dp_parse_data_vec(qp, req, - vec->src_sgl[i].vec, - vec->src_sgl[i].num); - if (unlikely(data_len < 0)) - break; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; - enqueue_one_auth_job(ctx, req, &vec->digest[i], - &vec->auth_iv[i], ofs, (uint32_t)data_len); - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - } - - if (unlikely(i < n)) - qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue += i; - *status = 0; - return i; -} - -static __rte_always_inline int -enqueue_one_chain_job(struct qat_sym_session *ctx, - struct icp_qat_fw_la_bulk_req *req, - struct rte_crypto_vec *data, - uint16_t n_data_vecs, - struct rte_crypto_va_iova_ptr *cipher_iv, - struct rte_crypto_va_iova_ptr *digest, - struct rte_crypto_va_iova_ptr *auth_iv, - union rte_crypto_sym_ofs ofs, uint32_t data_len) -{ - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_auth_req_params *auth_param; - rte_iova_t auth_iova_end; - int32_t cipher_len, auth_len; - - cipher_param = (void *)&req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + - ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); - - cipher_len = data_len - ofs.ofs.cipher.head - - ofs.ofs.cipher.tail; - auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail; - - if (unlikely(cipher_len < 0 || auth_len < 0)) - return -1; - - cipher_param->cipher_offset = ofs.ofs.cipher.head; - cipher_param->cipher_length = cipher_len; - set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req); - - auth_param->auth_off = ofs.ofs.auth.head; - auth_param->auth_len = auth_len; - auth_param->auth_res_addr = digest->iova; - - switch (ctx->qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: - case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: - auth_param->u1.aad_adr = auth_iv->iova; - break; - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - break; - default: - break; - } - - if (unlikely(n_data_vecs > 1)) { - int auth_end_get = 0, i = n_data_vecs - 1; - struct rte_crypto_vec *cvec = &data[0]; - uint32_t len; - - len = data_len - ofs.ofs.auth.tail; - - while (i >= 0 && len > 0) { - if (cvec->len >= len) { - auth_iova_end = cvec->iova + len; - len = 0; - auth_end_get = 1; - break; - } - len -= cvec->len; - i--; - cvec++; - } - - if (unlikely(auth_end_get == 0)) - return -1; - } else - auth_iova_end = data[0].iova + auth_param->auth_off + - auth_param->auth_len; - - /* Then check if digest-encrypted conditions are met */ - if ((auth_param->auth_off + auth_param->auth_len < - cipher_param->cipher_offset + - cipher_param->cipher_length) && - (digest->iova == auth_iova_end)) { - /* Handle partial digest encryption */ - if (cipher_param->cipher_offset + - cipher_param->cipher_length < - auth_param->auth_off + - auth_param->auth_len + - ctx->digest_length) - req->comn_mid.dst_length = - req->comn_mid.src_length = - auth_param->auth_off + - auth_param->auth_len + - ctx->digest_length; - struct icp_qat_fw_comn_req_hdr *header = - &req->comn_hdr; - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - header->serv_specif_flags, - ICP_QAT_FW_LA_DIGEST_IN_BUFFER); - } - - return 0; -} - -static __rte_always_inline int -qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_vec *data, uint16_t n_data_vecs, - union rte_crypto_sym_ofs ofs, - struct rte_crypto_va_iova_ptr *cipher_iv, - struct rte_crypto_va_iova_ptr *digest, - struct rte_crypto_va_iova_ptr *auth_iv, - void *user_data) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - uint32_t tail = dp_ctx->tail; - - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); - data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); - if (unlikely(data_len < 0)) - return -1; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; - - if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs, - cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len))) - return -1; - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue++; - - return 0; -} - -static __rte_always_inline uint32_t -qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, - void *user_data[], int *status) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - uint32_t i, n; - uint32_t tail; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - - n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); - if (unlikely(n == 0)) { - qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); - *status = 0; - return 0; - } - - tail = dp_ctx->tail; - - for (i = 0; i < n; i++) { - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - - data_len = qat_sym_dp_parse_data_vec(qp, req, - vec->src_sgl[i].vec, - vec->src_sgl[i].num); - if (unlikely(data_len < 0)) - break; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; - if (unlikely(enqueue_one_chain_job(ctx, req, - vec->src_sgl[i].vec, vec->src_sgl[i].num, - &vec->iv[i], &vec->digest[i], - &vec->auth_iv[i], ofs, (uint32_t)data_len))) - break; - - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - } - - if (unlikely(i < n)) - qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue += i; - *status = 0; - return i; -} - -static __rte_always_inline void -enqueue_one_aead_job(struct qat_sym_session *ctx, - struct icp_qat_fw_la_bulk_req *req, - struct rte_crypto_va_iova_ptr *iv, - struct rte_crypto_va_iova_ptr *digest, - struct rte_crypto_va_iova_ptr *aad, - union rte_crypto_sym_ofs ofs, uint32_t data_len) -{ - struct icp_qat_fw_la_cipher_req_params *cipher_param = - (void *)&req->serv_specif_rqpars; - struct icp_qat_fw_la_auth_req_params *auth_param = - (void *)((uint8_t *)&req->serv_specif_rqpars + - ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); - uint8_t *aad_data; - uint8_t aad_ccm_real_len; - uint8_t aad_len_field_sz; - uint32_t msg_len_be; - rte_iova_t aad_iova = 0; - uint8_t q; - - /* CPM 1.7 uses single pass to treat AEAD as cipher operation */ - if (ctx->is_single_pass) { - enqueue_one_cipher_job(ctx, req, iv, ofs, data_len); - cipher_param->spc_aad_addr = aad->iova; - cipher_param->spc_auth_res_addr = digest->iova; - return; - } - - switch (ctx->qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - rte_memcpy(cipher_param->u.cipher_IV_array, iv->va, - ctx->cipher_iv.length); - aad_iova = aad->iova; - break; - case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: - aad_data = aad->va; - aad_iova = aad->iova; - aad_ccm_real_len = 0; - aad_len_field_sz = 0; - msg_len_be = rte_bswap32((uint32_t)data_len - - ofs.ofs.cipher.head); - - if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { - aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; - aad_ccm_real_len = ctx->aad_len - - ICP_QAT_HW_CCM_AAD_B0_LEN - - ICP_QAT_HW_CCM_AAD_LEN_INFO; - } else { - aad_data = iv->va; - aad_iova = iv->iova; - } - - q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length; - aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS( - aad_len_field_sz, ctx->digest_length, q); - if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET + (q - - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), - (uint8_t *)&msg_len_be, - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); - } else { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET, - (uint8_t *)&msg_len_be + - (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE - - q), q); - } - - if (aad_len_field_sz > 0) { - *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] = - rte_bswap16(aad_ccm_real_len); - - if ((aad_ccm_real_len + aad_len_field_sz) - % ICP_QAT_HW_CCM_AAD_B0_LEN) { - uint8_t pad_len = 0; - uint8_t pad_idx = 0; - - pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - - ((aad_ccm_real_len + - aad_len_field_sz) % - ICP_QAT_HW_CCM_AAD_B0_LEN); - pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + - aad_ccm_real_len + - aad_len_field_sz; - memset(&aad_data[pad_idx], 0, pad_len); - } - } - - rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) - + ICP_QAT_HW_CCM_NONCE_OFFSET, - (uint8_t *)iv->va + - ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length); - *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = - q - ICP_QAT_HW_CCM_NONCE_OFFSET; - - rte_memcpy((uint8_t *)aad->va + - ICP_QAT_HW_CCM_NONCE_OFFSET, - (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, - ctx->cipher_iv.length); - break; - default: - break; - } - - cipher_param->cipher_offset = ofs.ofs.cipher.head; - cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - - ofs.ofs.cipher.tail; - auth_param->auth_off = ofs.ofs.cipher.head; - auth_param->auth_len = cipher_param->cipher_length; - auth_param->auth_res_addr = digest->iova; - auth_param->u1.aad_adr = aad_iova; -} - -static __rte_always_inline int -qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_vec *data, uint16_t n_data_vecs, - union rte_crypto_sym_ofs ofs, - struct rte_crypto_va_iova_ptr *iv, - struct rte_crypto_va_iova_ptr *digest, - struct rte_crypto_va_iova_ptr *aad, - void *user_data) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - uint32_t tail = dp_ctx->tail; - - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); - data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); - if (unlikely(data_len < 0)) - return -1; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; - - enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs, - (uint32_t)data_len); - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue++; - - return 0; -} - -static __rte_always_inline uint32_t -qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx, - struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, - void *user_data[], int *status) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_session *ctx = dp_ctx->session; - uint32_t i, n; - uint32_t tail; - struct icp_qat_fw_la_bulk_req *req; - int32_t data_len; - - n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); - if (unlikely(n == 0)) { - qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); - *status = 0; - return 0; - } - - tail = dp_ctx->tail; - - for (i = 0; i < n; i++) { - req = (struct icp_qat_fw_la_bulk_req *)( - (uint8_t *)tx_queue->base_addr + tail); - rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - - data_len = qat_sym_dp_parse_data_vec(qp, req, - vec->src_sgl[i].vec, - vec->src_sgl[i].num); - if (unlikely(data_len < 0)) - break; - req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; - enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i], - &vec->aad[i], ofs, (uint32_t)data_len); - tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; - } - - if (unlikely(i < n)) - qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); - - dp_ctx->tail = tail; - dp_ctx->cached_enqueue += i; - *status = 0; - return i; -} - -static __rte_always_inline uint32_t -qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx, - rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, - uint32_t max_nb_to_dequeue, - rte_cryptodev_raw_post_dequeue_t post_dequeue, - void **out_user_data, uint8_t is_user_data_array, - uint32_t *n_success_jobs, int *return_status) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *rx_queue = &qp->rx_q; - struct icp_qat_fw_comn_resp *resp; - void *resp_opaque; - uint32_t i, n, inflight; - uint32_t head; - uint8_t status; - - *n_success_jobs = 0; - *return_status = 0; - head = dp_ctx->head; - - inflight = qp->enqueued - qp->dequeued; - if (unlikely(inflight == 0)) - return 0; - - resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr + - head); - /* no operation ready */ - if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) - return 0; - - resp_opaque = (void *)(uintptr_t)resp->opaque_data; - /* get the dequeue count */ - if (get_dequeue_count) { - n = get_dequeue_count(resp_opaque); - if (unlikely(n == 0)) - return 0; - } else { - if (unlikely(max_nb_to_dequeue == 0)) - return 0; - n = max_nb_to_dequeue; - } - - out_user_data[0] = resp_opaque; - status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); - post_dequeue(resp_opaque, 0, status); - *n_success_jobs += status; - - head = (head + rx_queue->msg_size) & rx_queue->modulo_mask; - - /* we already finished dequeue when n == 1 */ - if (unlikely(n == 1)) { - i = 1; - goto end_deq; - } - - if (is_user_data_array) { - for (i = 1; i < n; i++) { - resp = (struct icp_qat_fw_comn_resp *)( - (uint8_t *)rx_queue->base_addr + head); - if (unlikely(*(uint32_t *)resp == - ADF_RING_EMPTY_SIG)) - goto end_deq; - out_user_data[i] = (void *)(uintptr_t)resp->opaque_data; - status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); - *n_success_jobs += status; - post_dequeue(out_user_data[i], i, status); - head = (head + rx_queue->msg_size) & - rx_queue->modulo_mask; - } - - goto end_deq; - } - - /* opaque is not array */ - for (i = 1; i < n; i++) { - resp = (struct icp_qat_fw_comn_resp *)( - (uint8_t *)rx_queue->base_addr + head); - status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); - if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) - goto end_deq; - head = (head + rx_queue->msg_size) & - rx_queue->modulo_mask; - post_dequeue(resp_opaque, i, status); - *n_success_jobs += status; - } - -end_deq: - dp_ctx->head = head; - dp_ctx->cached_dequeue += i; - return i; -} - -static __rte_always_inline void * -qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status, - enum rte_crypto_op_status *op_status) -{ - struct qat_qp *qp = qp_data; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - struct qat_queue *rx_queue = &qp->rx_q; - register struct icp_qat_fw_comn_resp *resp; - - resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr + - dp_ctx->head); - - if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) - return NULL; - - dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) & - rx_queue->modulo_mask; - dp_ctx->cached_dequeue++; - - *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ? - RTE_CRYPTO_OP_STATUS_SUCCESS : - RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - *dequeue_status = 0; - return (void *)(uintptr_t)resp->opaque_data; -} - -static __rte_always_inline int -qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n) -{ - struct qat_qp *qp = qp_data; - struct qat_queue *tx_queue = &qp->tx_q; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - - if (unlikely(dp_ctx->cached_enqueue != n)) - return -1; - - qp->enqueued += n; - qp->stats.enqueued_count += n; - - tx_queue->tail = dp_ctx->tail; - - WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, - tx_queue->hw_bundle_number, - tx_queue->hw_queue_number, tx_queue->tail); - tx_queue->csr_tail = tx_queue->tail; - dp_ctx->cached_enqueue = 0; - - return 0; -} - -static __rte_always_inline int -qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n) -{ - struct qat_qp *qp = qp_data; - struct qat_queue *rx_queue = &qp->rx_q; - struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; - - if (unlikely(dp_ctx->cached_dequeue != n)) - return -1; - - rx_queue->head = dp_ctx->head; - rx_queue->nb_processed_responses += n; - qp->dequeued += n; - qp->stats.dequeued_count += n; - if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) { - uint32_t old_head, new_head; - uint32_t max_head; - - old_head = rx_queue->csr_head; - new_head = rx_queue->head; - max_head = qp->nb_descriptors * rx_queue->msg_size; - - /* write out free descriptors */ - void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head; - - if (new_head < old_head) { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, - max_head - old_head); - memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE, - new_head); - } else { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - - old_head); - } - rx_queue->nb_processed_responses = 0; - rx_queue->csr_head = new_head; - - /* write current head to CSR */ - WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, - rx_queue->hw_bundle_number, rx_queue->hw_queue_number, - new_head); - } - - dp_ctx->cached_dequeue = 0; - return 0; -} - -int -qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, - struct rte_crypto_raw_dp_ctx *raw_dp_ctx, - enum rte_crypto_op_sess_type sess_type, - union rte_cryptodev_session_ctx session_ctx, uint8_t is_update) -{ - struct qat_qp *qp; - struct qat_sym_session *ctx; - struct qat_sym_dp_ctx *dp_ctx; - - qp = dev->data->queue_pairs[qp_id]; - dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data; - - if (!is_update) { - memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) + - sizeof(struct qat_sym_dp_ctx)); - raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id]; - dp_ctx->tail = qp->tx_q.tail; - dp_ctx->head = qp->rx_q.head; - dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0; - } - - if (sess_type != RTE_CRYPTO_OP_WITH_SESSION) - return -EINVAL; - - ctx = (struct qat_sym_session *)get_sym_session_private_data( - session_ctx.crypto_sess, qat_sym_driver_id); - - dp_ctx->session = ctx; - - raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail; - raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst; - raw_dp_ctx->dequeue = qat_sym_dp_dequeue; - raw_dp_ctx->dequeue_done = qat_sym_dp_update_head; - - if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || - ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && - !ctx->is_gmac) { - /* AES-GCM or AES-CCM */ - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || - (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 - && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE - && ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { - raw_dp_ctx->enqueue_burst = - qat_sym_dp_enqueue_aead_jobs; - raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead; - } else { - raw_dp_ctx->enqueue_burst = - qat_sym_dp_enqueue_chain_jobs; - raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain; - } - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { - raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs; - raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth; - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { - if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE || - ctx->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) { - raw_dp_ctx->enqueue_burst = - qat_sym_dp_enqueue_aead_jobs; - raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead; - } else { - raw_dp_ctx->enqueue_burst = - qat_sym_dp_enqueue_cipher_jobs; - raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher; - } - } else - return -1; - - return 0; -} - -int -qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev) -{ - return sizeof(struct qat_sym_dp_ctx); -} diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c deleted file mode 100644 index b835245f17..0000000000 --- a/drivers/crypto/qat/qat_sym_pmd.c +++ /dev/null @@ -1,251 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#ifdef RTE_LIB_SECURITY -#include -#endif - -#include "qat_logs.h" -#include "qat_crypto.h" -#include "qat_sym.h" -#include "qat_sym_session.h" -#include "qat_sym_pmd.h" - -#define MIXED_CRYPTO_MIN_FW_VER 0x04090000 - -uint8_t qat_sym_driver_id; - -struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS]; - -void -qat_sym_init_op_cookie(void *op_cookie) -{ - struct qat_sym_op_cookie *cookie = op_cookie; - - cookie->qat_sgl_src_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_sym_op_cookie, - qat_sgl_src); - - cookie->qat_sgl_dst_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_sym_op_cookie, - qat_sgl_dst); - - cookie->opt.spc_gmac.cd_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_sym_op_cookie, - opt.spc_gmac.cd_cipher); -} - -static uint16_t -qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_enqueue_op_burst(qp, (void **)ops, nb_ops); -} - -static uint16_t -qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_dequeue_op_burst(qp, (void **)ops, nb_ops); -} - -/* An rte_driver is needed in the registration of both the device and the driver - * with cryptodev. - * The actual qat pci's rte_driver can't be used as its name represents - * the whole pci device with all services. Think of this as a holder for a name - * for the crypto part of the pci device. - */ -static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD); -static const struct rte_driver cryptodev_qat_sym_driver = { - .name = qat_sym_drv_name, - .alias = qat_sym_drv_name -}; - -int -qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, - struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) -{ - int i = 0, ret = 0; - struct qat_device_info *qat_dev_instance = - &qat_pci_devs[qat_pci_dev->qat_dev_id]; - struct rte_cryptodev_pmd_init_params init_params = { - .name = "", - .socket_id = qat_dev_instance->pci_dev->device.numa_node, - .private_data_size = sizeof(struct qat_cryptodev_private) - }; - char name[RTE_CRYPTODEV_NAME_MAX_LEN]; - char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - struct rte_cryptodev *cryptodev; - struct qat_cryptodev_private *internals; - struct qat_capabilities_info capa_info; - const struct rte_cryptodev_capabilities *capabilities; - const struct qat_crypto_gen_dev_ops *gen_dev_ops = - &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; - uint64_t capa_size; - - snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", - qat_pci_dev->name, "sym"); - QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); - - if (gen_dev_ops->cryptodev_ops == NULL) { - QAT_LOG(ERR, "Device %s does not support symmetric crypto", - name); - return -EFAULT; - } - - /* - * All processes must use same driver id so they can share sessions. - * Store driver_id so we can validate that all processes have the same - * value, typically they have, but could differ if binaries built - * separately. - */ - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - qat_pci_dev->qat_sym_driver_id = - qat_sym_driver_id; - } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - if (qat_pci_dev->qat_sym_driver_id != - qat_sym_driver_id) { - QAT_LOG(ERR, - "Device %s have different driver id than corresponding device in primary process", - name); - return -(EFAULT); - } - } - - /* Populate subset device to use in cryptodev device creation */ - qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver; - qat_dev_instance->sym_rte_dev.numa_node = - qat_dev_instance->pci_dev->device.numa_node; - qat_dev_instance->sym_rte_dev.devargs = NULL; - - cryptodev = rte_cryptodev_pmd_create(name, - &(qat_dev_instance->sym_rte_dev), &init_params); - - if (cryptodev == NULL) - return -ENODEV; - - qat_dev_instance->sym_rte_dev.name = cryptodev->data->name; - cryptodev->driver_id = qat_sym_driver_id; - cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; - - cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst; - cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst; - - cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - -#ifdef RTE_LIB_SECURITY - if (gen_dev_ops->create_security_ctx) { - cryptodev->security_ctx = - gen_dev_ops->create_security_ctx((void *)cryptodev); - if (cryptodev->security_ctx == NULL) { - QAT_LOG(ERR, "rte_security_ctx memory alloc failed"); - ret = -ENOMEM; - goto error; - } - - cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; - QAT_LOG(INFO, "Device %s rte_security support enabled", name); - } else - QAT_LOG(INFO, "Device %s rte_security support disabled", name); - -#endif - snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, - "QAT_SYM_CAPA_GEN_%d", - qat_pci_dev->qat_dev_gen); - - internals = cryptodev->data->dev_private; - internals->qat_dev = qat_pci_dev; - internals->service_type = QAT_SERVICE_SYMMETRIC; - internals->dev_id = cryptodev->data->dev_id; - - capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); - capabilities = capa_info.data; - capa_size = capa_info.size; - - internals->capa_mz = rte_memzone_lookup(capa_memz_name); - if (internals->capa_mz == NULL) { - internals->capa_mz = rte_memzone_reserve(capa_memz_name, - capa_size, rte_socket_id(), 0); - if (internals->capa_mz == NULL) { - QAT_LOG(DEBUG, - "Error allocating capability memzon for %s", - name); - ret = -EFAULT; - goto error; - } - } - - memcpy(internals->capa_mz->addr, capabilities, capa_size); - internals->qat_dev_capabilities = internals->capa_mz->addr; - - while (1) { - if (qat_dev_cmd_param[i].name == NULL) - break; - if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME)) - internals->min_enq_burst_threshold = - qat_dev_cmd_param[i].val; - i++; - } - - qat_pci_dev->sym_dev = internals; - QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d", - cryptodev->data->name, internals->dev_id); - - rte_cryptodev_pmd_probing_finish(cryptodev); - - return 0; - -error: -#ifdef RTE_LIB_SECURITY - rte_free(cryptodev->security_ctx); - cryptodev->security_ctx = NULL; -#endif - rte_cryptodev_pmd_destroy(cryptodev); - memset(&qat_dev_instance->sym_rte_dev, 0, - sizeof(qat_dev_instance->sym_rte_dev)); - - return ret; -} - -int -qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev) -{ - struct rte_cryptodev *cryptodev; - - if (qat_pci_dev == NULL) - return -ENODEV; - if (qat_pci_dev->sym_dev == NULL) - return 0; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_memzone_free(qat_pci_dev->sym_dev->capa_mz); - - /* free crypto device */ - cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id); -#ifdef RTE_LIB_SECURITY - rte_free(cryptodev->security_ctx); - cryptodev->security_ctx = NULL; -#endif - rte_cryptodev_pmd_destroy(cryptodev); - qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL; - qat_pci_dev->sym_dev = NULL; - - return 0; -} - -static struct cryptodev_driver qat_crypto_drv; -RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, - cryptodev_qat_sym_driver, - qat_sym_driver_id); diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h deleted file mode 100644 index 0dc0c6f0d9..0000000000 --- a/drivers/crypto/qat/qat_sym_pmd.h +++ /dev/null @@ -1,95 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation - */ - -#ifndef _QAT_SYM_PMD_H_ -#define _QAT_SYM_PMD_H_ - -#ifdef BUILD_QAT_SYM - -#include -#include -#ifdef RTE_LIB_SECURITY -#include -#endif - -#include "qat_crypto.h" -#include "qat_device.h" - -/** Intel(R) QAT Symmetric Crypto PMD driver name */ -#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat - -/* Internal capabilities */ -#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0) -#define QAT_SYM_CAP_VALID (1 << 31) - -/** - * Macro to add a sym capability - * helper function to add an sym capability - * - * - **/ -#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_##n, \ - b, d \ - }, } \ - }, } \ - } - -#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_##n, \ - b, k, d, a, i \ - }, } \ - }, } \ - } - -#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ - {.aead = { \ - .algo = RTE_CRYPTO_AEAD_##n, \ - b, k, d, a, i \ - }, } \ - }, } \ - } - -#define QAT_SYM_CIPHER_CAP(n, b, k, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_##n, \ - b, k, i \ - }, } \ - }, } \ - } - -extern uint8_t qat_sym_driver_id; - -extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[]; - -int -qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, - struct qat_dev_cmd_param *qat_dev_cmd_param); - -int -qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev); - -void -qat_sym_init_op_cookie(void *op_cookie); - -#endif -#endif /* _QAT_SYM_PMD_H_ */ diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c deleted file mode 100644 index 82f078ff1e..0000000000 --- a/drivers/crypto/qat/qat_sym_refactor.c +++ /dev/null @@ -1,360 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2019 Intel Corporation - */ - -#include - -#include -#include -#include -#include -#include - -#include "qat_sym_refactor.h" -#include "qat_crypto.h" -#include "qat_qp.h" - -uint8_t qat_sym_driver_id; - -struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS]; - -/* An rte_driver is needed in the registration of both the device and the driver - * with cryptodev. - * The actual qat pci's rte_driver can't be used as its name represents - * the whole pci device with all services. Think of this as a holder for a name - * for the crypto part of the pci device. - */ -static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD); -static const struct rte_driver cryptodev_qat_sym_driver = { - .name = qat_sym_drv_name, - .alias = qat_sym_drv_name -}; - -void -qat_sym_init_op_cookie(void *op_cookie) -{ - struct qat_sym_op_cookie *cookie = op_cookie; - - cookie->qat_sgl_src_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_sym_op_cookie, - qat_sgl_src); - - cookie->qat_sgl_dst_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_sym_op_cookie, - qat_sgl_dst); - - cookie->opt.spc_gmac.cd_phys_addr = - rte_mempool_virt2iova(cookie) + - offsetof(struct qat_sym_op_cookie, - opt.spc_gmac.cd_cipher); -} - -static __rte_always_inline int -qat_sym_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen) -{ - struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; - void *sess = (void *)opaque[0]; - qat_sym_build_request_t build_request = (void *)opaque[1]; - struct qat_sym_session *ctx = NULL; - - if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) { - ctx = get_sym_session_private_data(op->sym->session, - qat_sym_driver_id); - if (unlikely(!ctx)) { - QAT_DP_LOG(ERR, "No session for this device"); - return -EINVAL; - } - if (sess != ctx) { - struct rte_cryptodev *cdev; - struct qat_cryptodev_private *internals; - enum rte_proc_type_t proc_type; - - cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id); - internals = cdev->data->dev_private; - proc_type = rte_eal_process_type(); - - if (internals->qat_dev->qat_dev_gen != dev_gen) { - op->status = - RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - - if (unlikely(ctx->build_request[proc_type] == NULL)) { - int ret = - qat_sym_gen_dev_ops[dev_gen].set_session( - (void *)cdev, sess); - if (ret < 0) { - op->status = - RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - } - - build_request = ctx->build_request[proc_type]; - opaque[0] = (uintptr_t)ctx; - opaque[1] = (uintptr_t)build_request; - } - } - -#ifdef RTE_LIB_SECURITY - else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { - if (sess != (void *)op->sym->sec_session) { - struct rte_cryptodev *cdev; - struct qat_cryptodev_private *internals; - enum rte_proc_type_t proc_type; - - ctx = get_sec_session_private_data( - op->sym->sec_session); - if (unlikely(!ctx)) { - QAT_DP_LOG(ERR, "No session for this device"); - return -EINVAL; - } - if (unlikely(ctx->bpi_ctx == NULL)) { - QAT_DP_LOG(ERR, "QAT PMD only supports security" - " operation requests for" - " DOCSIS, op (%p) is not for" - " DOCSIS.", op); - return -EINVAL; - } else if (unlikely(((op->sym->m_dst != NULL) && - (op->sym->m_dst != op->sym->m_src)) || - op->sym->m_src->nb_segs > 1)) { - QAT_DP_LOG(ERR, "OOP and/or multi-segment" - " buffers not supported for" - " DOCSIS security."); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id); - internals = cdev->data->dev_private; - proc_type = rte_eal_process_type(); - - if (internals->qat_dev->qat_dev_gen != dev_gen) { - op->status = - RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - - if (unlikely(ctx->build_request[proc_type] == NULL)) { - int ret = - qat_sym_gen_dev_ops[dev_gen].set_session( - (void *)cdev, sess); - if (ret < 0) { - op->status = - RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - } - - sess = (void *)op->sym->sec_session; - build_request = ctx->build_request[proc_type]; - opaque[0] = (uintptr_t)sess; - opaque[1] = (uintptr_t)build_request; - } - } -#endif - else { /* RTE_CRYPTO_OP_SESSIONLESS */ - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - QAT_LOG(DEBUG, "QAT does not support sessionless operation"); - return -1; - } - - return build_request(op, (void *)ctx, out_msg, op_cookie); -} - -uint16_t -qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_enqueue_op_burst(qp, qat_sym_build_request, - (void **)ops, nb_ops); -} - -uint16_t -qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - return qat_dequeue_op_burst(qp, (void **)ops, - qat_sym_process_response, nb_ops); -} - -int -qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, - struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused) -{ - int i = 0, ret = 0; - struct qat_device_info *qat_dev_instance = - &qat_pci_devs[qat_pci_dev->qat_dev_id]; - struct rte_cryptodev_pmd_init_params init_params = { - .name = "", - .socket_id = qat_dev_instance->pci_dev->device.numa_node, - .private_data_size = sizeof(struct qat_cryptodev_private) - }; - char name[RTE_CRYPTODEV_NAME_MAX_LEN]; - char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - struct rte_cryptodev *cryptodev; - struct qat_cryptodev_private *internals; - struct qat_capabilities_info capa_info; - const struct rte_cryptodev_capabilities *capabilities; - const struct qat_crypto_gen_dev_ops *gen_dev_ops = - &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; - uint64_t capa_size; - - snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", - qat_pci_dev->name, "sym"); - QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); - - if (gen_dev_ops->cryptodev_ops == NULL) { - QAT_LOG(ERR, "Device %s does not support symmetric crypto", - name); - return -(EFAULT); - } - - /* - * All processes must use same driver id so they can share sessions. - * Store driver_id so we can validate that all processes have the same - * value, typically they have, but could differ if binaries built - * separately. - */ - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - qat_pci_dev->qat_sym_driver_id = - qat_sym_driver_id; - } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - if (qat_pci_dev->qat_sym_driver_id != - qat_sym_driver_id) { - QAT_LOG(ERR, - "Device %s have different driver id than corresponding device in primary process", - name); - return -(EFAULT); - } - } - - /* Populate subset device to use in cryptodev device creation */ - qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver; - qat_dev_instance->sym_rte_dev.numa_node = - qat_dev_instance->pci_dev->device.numa_node; - qat_dev_instance->sym_rte_dev.devargs = NULL; - - cryptodev = rte_cryptodev_pmd_create(name, - &(qat_dev_instance->sym_rte_dev), &init_params); - - if (cryptodev == NULL) - return -ENODEV; - - qat_dev_instance->sym_rte_dev.name = cryptodev->data->name; - cryptodev->driver_id = qat_sym_driver_id; - cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; - - cryptodev->enqueue_burst = qat_sym_enqueue_burst; - cryptodev->dequeue_burst = qat_sym_dequeue_burst; - - cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - -#ifdef RTE_LIB_SECURITY - if (gen_dev_ops->create_security_ctx) { - cryptodev->security_ctx = - gen_dev_ops->create_security_ctx((void *)cryptodev); - if (cryptodev->security_ctx == NULL) { - QAT_LOG(ERR, "rte_security_ctx memory alloc failed"); - ret = -ENOMEM; - goto error; - } - - cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; - QAT_LOG(INFO, "Device %s rte_security support enabled", name); - } else { - QAT_LOG(INFO, "Device %s rte_security support disabled", name); - } -#endif - snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, - "QAT_SYM_CAPA_GEN_%d", - qat_pci_dev->qat_dev_gen); - - internals = cryptodev->data->dev_private; - internals->qat_dev = qat_pci_dev; - - internals->dev_id = cryptodev->data->dev_id; - - capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); - capabilities = capa_info.data; - capa_size = capa_info.size; - - internals->capa_mz = rte_memzone_lookup(capa_memz_name); - if (internals->capa_mz == NULL) { - internals->capa_mz = rte_memzone_reserve(capa_memz_name, - capa_size, rte_socket_id(), 0); - if (internals->capa_mz == NULL) { - QAT_LOG(DEBUG, - "Error allocating memzone for capabilities, " - "destroying PMD for %s", - name); - ret = -EFAULT; - goto error; - } - } - - memcpy(internals->capa_mz->addr, capabilities, capa_size); - internals->qat_dev_capabilities = internals->capa_mz->addr; - - while (1) { - if (qat_dev_cmd_param[i].name == NULL) - break; - if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME)) - internals->min_enq_burst_threshold = - qat_dev_cmd_param[i].val; - i++; - } - - internals->service_type = QAT_SERVICE_SYMMETRIC; - qat_pci_dev->sym_dev = internals; - QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d", - cryptodev->data->name, internals->dev_id); - - return 0; - -error: -#ifdef RTE_LIB_SECURITY - rte_free(cryptodev->security_ctx); - cryptodev->security_ctx = NULL; -#endif - rte_cryptodev_pmd_destroy(cryptodev); - memset(&qat_dev_instance->sym_rte_dev, 0, - sizeof(qat_dev_instance->sym_rte_dev)); - - return ret; -} - -int -qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev) -{ - struct rte_cryptodev *cryptodev; - - if (qat_pci_dev == NULL) - return -ENODEV; - if (qat_pci_dev->sym_dev == NULL) - return 0; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_memzone_free(qat_pci_dev->sym_dev->capa_mz); - - /* free crypto device */ - cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id); -#ifdef RTE_LIB_SECURITY - rte_free(cryptodev->security_ctx); - cryptodev->security_ctx = NULL; -#endif - rte_cryptodev_pmd_destroy(cryptodev); - qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL; - qat_pci_dev->sym_dev = NULL; - - return 0; -} - -static struct cryptodev_driver qat_crypto_drv; -RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, - cryptodev_qat_sym_driver, - qat_sym_driver_id); diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h deleted file mode 100644 index 44feca8251..0000000000 --- a/drivers/crypto/qat/qat_sym_refactor.h +++ /dev/null @@ -1,402 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation - */ - -#ifndef _QAT_SYM_H_ -#define _QAT_SYM_H_ - -#include -#ifdef RTE_LIB_SECURITY -#include -#endif - -#include - -#include "qat_common.h" -#include "qat_sym_session.h" -#include "qat_crypto.h" -#include "qat_logs.h" - -#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat - -#define BYTE_LENGTH 8 -/* bpi is only used for partial blocks of DES and AES - * so AES block len can be assumed as max len for iv, src and dst - */ -#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ - -/* Internal capabilities */ -#define QAT_SYM_CAP_MIXED_CRYPTO (1 << 0) -#define QAT_SYM_CAP_VALID (1 << 31) - -/* Macro to add a capability */ -#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_##n, \ - b, d \ - }, } \ - }, } \ - } - -#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_##n, \ - b, k, d, a, i \ - }, } \ - }, } \ - } - -#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ - {.aead = { \ - .algo = RTE_CRYPTO_AEAD_##n, \ - b, k, d, a, i \ - }, } \ - }, } \ - } - -#define QAT_SYM_CIPHER_CAP(n, b, k, i) \ - { \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_##n, \ - b, k, i \ - }, } \ - }, } \ - } - -/* - * Maximum number of SGL entries - */ -#define QAT_SYM_SGL_MAX_NUMBER 16 - -/* Maximum data length for single pass GMAC: 2^14-1 */ -#define QAT_AES_GMAC_SPC_MAX_SIZE 16383 - -struct qat_sym_session; - -struct qat_sym_sgl { - qat_sgl_hdr; - struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER]; -} __rte_packed __rte_cache_aligned; - -struct qat_sym_op_cookie { - struct qat_sym_sgl qat_sgl_src; - struct qat_sym_sgl qat_sgl_dst; - phys_addr_t qat_sgl_src_phys_addr; - phys_addr_t qat_sgl_dst_phys_addr; - union { - /* Used for Single-Pass AES-GMAC only */ - struct { - struct icp_qat_hw_cipher_algo_blk cd_cipher - __rte_packed __rte_cache_aligned; - phys_addr_t cd_phys_addr; - } spc_gmac; - } opt; -}; - -struct qat_sym_dp_ctx { - struct qat_sym_session *session; - uint32_t tail; - uint32_t head; - uint16_t cached_enqueue; - uint16_t cached_dequeue; -}; - -uint16_t -qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -uint16_t -qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -/** Encrypt a single partial block - * Depends on openssl libcrypto - * Uses ECB+XOR to do CFB encryption, same result, more performant - */ -static inline int -bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, - uint8_t *iv, int ivlen, int srclen, - void *bpi_ctx) -{ - EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; - int encrypted_ivlen; - uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; - uint8_t *encr = encrypted_iv; - - /* ECB method: encrypt the IV, then XOR this with plaintext */ - if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) - <= 0) - goto cipher_encrypt_err; - - for (; srclen != 0; --srclen, ++dst, ++src, ++encr) - *dst = *src ^ *encr; - - return 0; - -cipher_encrypt_err: - QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed"); - return -EINVAL; -} - -static inline uint32_t -qat_bpicipher_postprocess(struct qat_sym_session *ctx, - struct rte_crypto_op *op) -{ - int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); - struct rte_crypto_sym_op *sym_op = op->sym; - uint8_t last_block_len = block_len > 0 ? - sym_op->cipher.data.length % block_len : 0; - - if (last_block_len > 0 && - ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { - - /* Encrypt last block */ - uint8_t *last_block, *dst, *iv; - uint32_t last_block_offset; - - last_block_offset = sym_op->cipher.data.offset + - sym_op->cipher.data.length - last_block_len; - last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, - uint8_t *, last_block_offset); - - if (unlikely(sym_op->m_dst != NULL)) - /* out-of-place operation (OOP) */ - dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, - uint8_t *, last_block_offset); - else - dst = last_block; - - if (last_block_len < sym_op->cipher.data.length) - /* use previous block ciphertext as IV */ - iv = dst - block_len; - else - /* runt block, i.e. less than one full block */ - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:", - last_block, last_block_len); - if (sym_op->m_dst != NULL) - QAT_DP_HEXDUMP_LOG(DEBUG, - "BPI: dst before post-process:", - dst, last_block_len); -#endif - bpi_cipher_encrypt(last_block, dst, iv, block_len, - last_block_len, ctx->bpi_ctx); -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:", - last_block, last_block_len); - if (sym_op->m_dst != NULL) - QAT_DP_HEXDUMP_LOG(DEBUG, - "BPI: dst after post-process:", - dst, last_block_len); -#endif - } - return sym_op->cipher.data.length - last_block_len; -} - -#ifdef RTE_LIB_SECURITY -static inline void -qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op) -{ - struct rte_crypto_sym_op *sym_op = op->sym; - uint32_t crc_data_ofs, crc_data_len, crc; - uint8_t *crc_data; - - if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT && - sym_op->auth.data.length != 0) { - - crc_data_ofs = sym_op->auth.data.offset; - crc_data_len = sym_op->auth.data.length; - crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, - crc_data_ofs); - - crc = rte_net_crc_calc(crc_data, crc_data_len, - RTE_NET_CRC32_ETH); - - if (crc != *(uint32_t *)(crc_data + crc_data_len)) - op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - } -} - -static inline void -qat_crc_generate(struct qat_sym_session *ctx, - struct rte_crypto_op *op) -{ - struct rte_crypto_sym_op *sym_op = op->sym; - uint32_t *crc, crc_data_len; - uint8_t *crc_data; - - if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT && - sym_op->auth.data.length != 0 && - sym_op->m_src->nb_segs == 1) { - - crc_data_len = sym_op->auth.data.length; - crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, - sym_op->auth.data.offset); - crc = (uint32_t *)(crc_data + crc_data_len); - *crc = rte_net_crc_calc(crc_data, crc_data_len, - RTE_NET_CRC32_ETH); - } -} - -static inline void -qat_sym_preprocess_requests(void **ops, uint16_t nb_ops) -{ - struct rte_crypto_op *op; - struct qat_sym_session *ctx; - uint16_t i; - - for (i = 0; i < nb_ops; i++) { - op = (struct rte_crypto_op *)ops[i]; - - if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { - ctx = (struct qat_sym_session *) - get_sec_session_private_data( - op->sym->sec_session); - - if (ctx == NULL || ctx->bpi_ctx == NULL) - continue; - - qat_crc_generate(ctx, op); - } - } -} -#endif - -static __rte_always_inline int -qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie, - uint64_t *dequeue_err_count __rte_unused) -{ - struct icp_qat_fw_comn_resp *resp_msg = - (struct icp_qat_fw_comn_resp *)resp; - struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) - (resp_msg->opaque_data); - struct qat_sym_session *sess; - uint8_t is_docsis_sec; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); -#endif - -#ifdef RTE_LIB_SECURITY - if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { - /* - * Assuming at this point that if it's a security - * op, that this is for DOCSIS - */ - sess = (struct qat_sym_session *) - get_sec_session_private_data( - rx_op->sym->sec_session); - is_docsis_sec = 1; - } else -#endif - { - sess = (struct qat_sym_session *) - get_sym_session_private_data( - rx_op->sym->session, - qat_sym_driver_id); - is_docsis_sec = 0; - } - - if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != - ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( - resp_msg->comn_hdr.comn_status)) { - - rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - } else { - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; - - if (sess->bpi_ctx) { - qat_bpicipher_postprocess(sess, rx_op); -#ifdef RTE_LIB_SECURITY - if (is_docsis_sec) - qat_crc_verify(sess, rx_op); -#endif - } - } - - if (sess->is_single_pass_gmac) { - struct qat_sym_op_cookie *cookie = - (struct qat_sym_op_cookie *) op_cookie; - memset(cookie->opt.spc_gmac.cd_cipher.key, 0, - sess->auth_key_length); - } - - *op = (void *)rx_op; - - return 1; -} - -int -qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, - struct rte_crypto_raw_dp_ctx *raw_dp_ctx, - enum rte_crypto_op_sess_type sess_type, - union rte_cryptodev_session_ctx session_ctx, uint8_t is_update); - -int -qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev); - -void -qat_sym_init_op_cookie(void *cookie); - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG -static __rte_always_inline void -qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req, - struct qat_sym_session *ctx, - struct rte_crypto_vec *vec, uint32_t vec_len, - struct rte_crypto_va_iova_ptr *cipher_iv, - struct rte_crypto_va_iova_ptr *auth_iv, - struct rte_crypto_va_iova_ptr *aad, - struct rte_crypto_va_iova_ptr *digest) -{ - uint32_t i; - - QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_la_bulk_req)); - for (i = 0; i < vec_len; i++) - QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len); - if (cipher_iv && ctx->cipher_iv.length > 0) - QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va, - ctx->cipher_iv.length); - if (auth_iv && ctx->auth_iv.length > 0) - QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va, - ctx->auth_iv.length); - if (aad && ctx->aad_len > 0) - QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va, - ctx->aad_len); - if (digest && ctx->digest_length > 0) - QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va, - ctx->digest_length); -} -#else -static __rte_always_inline void -qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused, - struct qat_sym_session *ctx __rte_unused, - struct rte_crypto_vec *vec __rte_unused, - uint32_t vec_len __rte_unused, - struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused, - struct rte_crypto_va_iova_ptr *auth_iv __rte_unused, - struct rte_crypto_va_iova_ptr *aad __rte_unused, - struct rte_crypto_va_iova_ptr *digest __rte_unused) -{} -#endif - -#endif /* _QAT_SYM_H_ */ diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index 52837d7c9c..27cdeb1de2 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -20,7 +20,7 @@ #include "qat_logs.h" #include "qat_sym_session.h" -#include "qat_sym_pmd.h" +#include "qat_sym.h" /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ static const uint8_t sha1InitialState[] = { @@ -600,11 +600,11 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session, session->is_auth = 1; session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; /* Chacha-Poly is special case that use QAT CTR mode */ - if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { + if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE; - } else { + else session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - } + session->cipher_iv.offset = aead_xform->iv.offset; session->cipher_iv.length = aead_xform->iv.length; session->aad_len = aead_xform->aad_length;