From patchwork Fri Nov 3 13:30:36 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sivaramakrishnan Venkat X-Patchwork-Id: 133897 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 05650432B0; Mon, 6 Nov 2023 15:18:15 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C1BFD402B6; Mon, 6 Nov 2023 15:18:14 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id 089CE40273; Fri, 3 Nov 2023 14:30:48 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1699018249; x=1730554249; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=gXUequ1arGdD9KOCSCMlKtQHDbr4EYvjgh6h6ibPvNI=; b=dtrO0O4Ol9A3A52xpy2NF/W3VAPUilpf6i9gG1Z9M/5fTnFgezChjTEJ TERrEqQQw7/6LayYZHqGMBdeId5mIVvvQTDWnVIAK7MoYO41XxtvYkXn3 cBfov0P/N/4PoE/VJ1txB4iY/teTvr5NHM68LuT2OptpwhXPTz4jikxoq qoXIPiQKXd2nZKqSR9G+jtPocUhsZqDcHTd0wjqjLBxDMFho7Q3UuhGKi u2jLa7rVsN5AJwP/1MJXht81zIACJmvc9cHQAVSY58WjLwCF4TxzaYRwm AuvNSGsDzpDoJlgIL+Dx53n1ht9c6t5q3zVJ7VfpFk0TfaeRPL5Ai+ZEu Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10883"; a="388770132" X-IronPort-AV: E=Sophos;i="6.03,273,1694761200"; d="scan'208";a="388770132" Received: from orviesa001.jf.intel.com ([10.64.159.141]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2023 06:30:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.03,273,1694761200"; d="scan'208";a="9378740" Received: from silpixa00401012.ir.intel.com ([10.243.22.112]) by orviesa001.jf.intel.com with ESMTP; 03 Nov 2023 06:30:42 -0700 From: Sivaramakrishnan Venkat To: Kai Ji Cc: dev@dpdk.org, stable@dpdk.org, gakhil@marvell.com, Sivaramakrishnan Venkat Subject: [PATCH 1/2] crypto/qat: fix block cipher misalignment for AES CBC and 3DES CBC Date: Fri, 3 Nov 2023 13:30:36 +0000 Message-Id: <20231103133037.782512-1-venkatx.sivaramakrishnan@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-Mailman-Approved-At: Mon, 06 Nov 2023 15:18:13 +0100 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org check cipher length alignment for 3DES CBC and AES CBC to change it to NULL op for buffer misalignment Fixes: a815a04cea05 ("crypto/qat: support symmetric build op request") Fixes: 85fec6fd9674 ("crypto/qat: unify raw data path functions") Fixes: def38073ac90 ("crypto/qat: check cipher buffer alignment") Cc: kai.ji@intel.com Cc: stable@dpdk.org Signed-off-by: Sivaramakrishnan Venkat Acked-by: Kai Ji --- drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 35 +++++++++++--------- drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 12 +++---- drivers/crypto/qat/qat_sym.h | 9 +++++ 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h index 37647374d5..49053e662e 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h @@ -616,7 +616,8 @@ static __rte_always_inline void enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx, struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_va_iova_ptr *iv, - union rte_crypto_sym_ofs ofs, uint32_t data_len) + union rte_crypto_sym_ofs ofs, uint32_t data_len, + struct qat_sym_op_cookie *cookie) { struct icp_qat_fw_la_cipher_req_params *cipher_param; @@ -627,6 +628,15 @@ enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx, cipher_param->cipher_offset = ofs.ofs.cipher.head; cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail; + + if (AES_OR_3DES_MISALIGNED) { + QAT_LOG(DEBUG, + "Input cipher buffer misalignment detected and change job as NULL operation"); + struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; + header->service_type = ICP_QAT_FW_COMN_REQ_NULL; + header->service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID; + cookie->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + } } static __rte_always_inline void @@ -683,7 +693,8 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx, struct rte_crypto_va_iova_ptr *cipher_iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *auth_iv, - union rte_crypto_sym_ofs ofs, uint32_t data_len) + union rte_crypto_sym_ofs ofs, uint32_t data_len, + struct qat_sym_op_cookie *cookie) { struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; @@ -711,20 +722,14 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx, auth_param->auth_off = ofs.ofs.auth.head; auth_param->auth_len = auth_len; auth_param->auth_res_addr = digest->iova; - /* Input cipher length alignment requirement for 3DES-CBC and AES-CBC. - * For 3DES-CBC cipher algo, ESP Payload size requires 8 Byte aligned. - * For AES-CBC cipher algo, ESP Payload size requires 16 Byte aligned. - * The alignment should be guaranteed by the ESP package padding field - * according to the RFC4303. Under this condition, QAT will pass through - * chain job as NULL cipher and NULL auth operation and report misalignment - * error detected. - */ if (AES_OR_3DES_MISALIGNED) { - QAT_LOG(ERR, "Input cipher length alignment error detected.\n"); - ctx->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL; - ctx->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; - cipher_param->cipher_length = 0; - auth_param->auth_len = 0; + QAT_LOG(DEBUG, + "Input cipher buffer misalignment detected and change job as NULL operation"); + struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; + header->service_type = ICP_QAT_FW_COMN_REQ_NULL; + header->service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID; + cookie->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -1; } switch (ctx->qat_hash_alg) { diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index e4bcfa59e7..208b7e0ba6 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -248,7 +248,7 @@ qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx, return -EINVAL; } - enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len); + enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len, op_cookie); qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, NULL, NULL, NULL); @@ -383,7 +383,7 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx, enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv, - ofs, total_len); + ofs, total_len, cookie); qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, &auth_iv, NULL, &digest); @@ -507,7 +507,7 @@ qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) return -1; - enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len); + enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len, cookie); qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv, NULL, NULL, NULL); @@ -564,7 +564,7 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx, if (unlikely(data_len < 0)) break; enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs, - (uint32_t)data_len); + (uint32_t)data_len, cookie); tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec, @@ -740,7 +740,7 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx, if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs, NULL, 0, cipher_iv, job_digest, auth_iv, ofs, - (uint32_t)data_len))) + (uint32_t)data_len, cookie))) return -1; dp_ctx->tail = tail; @@ -811,7 +811,7 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0, &vec->iv[i], job_digest, - &vec->auth_iv[i], ofs, (uint32_t)data_len))) + &vec->auth_iv[i], ofs, (uint32_t)data_len, cookie))) break; tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h index d19cadde86..bc25ddf33d 100644 --- a/drivers/crypto/qat/qat_sym.h +++ b/drivers/crypto/qat/qat_sym.h @@ -115,6 +115,7 @@ struct qat_sym_op_cookie { } opt; uint8_t digest_null[4]; phys_addr_t digest_null_phys_addr; + enum rte_crypto_op_status status; }; struct qat_sym_dp_ctx { @@ -319,6 +320,7 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie, (resp_msg->opaque_data); struct qat_sym_session *sess; uint8_t is_docsis_sec; + struct qat_sym_op_cookie *cookie = NULL; #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, @@ -364,6 +366,13 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie, sess->auth_key_length); } + cookie = (struct qat_sym_op_cookie *) op_cookie; + if (cookie->status == RTE_CRYPTO_OP_STATUS_INVALID_ARGS) { + rx_op->status = cookie->status; + resp_msg->comn_hdr.service_id = ICP_QAT_FW_COMN_RESP_SERV_CPM_FW; + cookie->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + *op = (void *)rx_op; /*