From patchwork Fri Oct 11 16:32:27 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hemant Agrawal X-Patchwork-Id: 60978 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C15591EB51; Fri, 11 Oct 2019 18:35:22 +0200 (CEST) Received: from inva020.nxp.com (inva020.nxp.com [92.121.34.13]) by dpdk.org (Postfix) with ESMTP id BB3071EB45 for ; Fri, 11 Oct 2019 18:35:14 +0200 (CEST) Received: from inva020.nxp.com (localhost [127.0.0.1]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 7C8BF1A058E; Fri, 11 Oct 2019 18:35:14 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 67BB91A056F; Fri, 11 Oct 2019 18:35:12 +0200 (CEST) Received: from bf-netperf1.ap.freescale.net (bf-netperf1.ap.freescale.net [10.232.133.63]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id CFDE1402FF; Sat, 12 Oct 2019 00:35:09 +0800 (SGT) From: Hemant Agrawal To: dev@dpdk.org Cc: akhil.goyal@nxp.com, Vakul Garg Date: Fri, 11 Oct 2019 22:02:27 +0530 Message-Id: <20191011163233.31017-5-hemant.agrawal@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20191011163233.31017-1-hemant.agrawal@nxp.com> References: <20191011163233.31017-1-hemant.agrawal@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH 04/10] crypto/dpaa2_sec: enhance gcm descs to not skip aadt X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Vakul Garg The GCM descriptors needlessly skip auth_only_len bytes from output buffer. Due to this, workarounds have to be made in dpseci driver code. Also this leads to failing of one cryptodev test case for gcm. In this patch, we change the descriptor construction and adjust dpseci driver accordingly. The test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg now passes. Signed-off-by: Vakul Garg --- drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 25 ++++++++------------- drivers/crypto/dpaa2_sec/hw/desc/algo.h | 10 --------- drivers/crypto/dpaa_sec/dpaa_sec.c | 14 +++++------- 3 files changed, 15 insertions(+), 34 deletions(-) diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index 14f0c523c..8803e8d3c 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -350,14 +350,13 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); op_fle->length = (sess->dir == DIR_ENC) ? - (sym_op->aead.data.length + icv_len + auth_only_len) : - sym_op->aead.data.length + auth_only_len; + (sym_op->aead.data.length + icv_len) : + sym_op->aead.data.length; /* Configure Output SGE for Encap/Decap */ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); - DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + - RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len); - sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len; + DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset); + sge->length = mbuf->data_len - sym_op->aead.data.offset; mbuf = mbuf->next; /* o/p segs */ @@ -510,24 +509,21 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, if (auth_only_len) DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len); fle->length = (sess->dir == DIR_ENC) ? - (sym_op->aead.data.length + icv_len + auth_only_len) : - sym_op->aead.data.length + auth_only_len; + (sym_op->aead.data.length + icv_len) : + sym_op->aead.data.length; DPAA2_SET_FLE_SG_EXT(fle); /* Configure Output SGE for Encap/Decap */ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst)); - DPAA2_SET_FLE_OFFSET(sge, dst->data_off + - RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len); - sge->length = sym_op->aead.data.length + auth_only_len; + DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset); + sge->length = sym_op->aead.data.length; if (sess->dir == DIR_ENC) { sge++; DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data)); sge->length = sess->digest_length; - DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + - sess->iv.length + auth_only_len)); } DPAA2_SET_FLE_FIN(sge); @@ -566,10 +562,6 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, sess->digest_length); DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); sge->length = sess->digest_length; - DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length + - sess->digest_length + - sess->iv.length + - auth_only_len)); } DPAA2_SET_FLE_FIN(sge); @@ -578,6 +570,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); } + DPAA2_SET_FD_LEN(fd, fle->length); return 0; } diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h index 32ce787fa..c41cb2292 100644 --- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h +++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h @@ -649,11 +649,6 @@ cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap, MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0); pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z); - MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0); - - /* skip assoc data */ - SEQFIFOSTORE(p, SKIP, 0, 0, VLF); - /* cryptlen = seqinlen - assoclen */ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0); @@ -756,11 +751,6 @@ cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap, MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0); pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z); - MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0); - - /* skip assoc data */ - SEQFIFOSTORE(p, SKIP, 0, 0, VLF); - /* read assoc data */ SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1); diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index e89cbcefb..c1c6c054a 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -1180,10 +1180,9 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) out_sg = &cf->sg[0]; out_sg->extension = 1; if (is_encode(ses)) - out_sg->length = sym->aead.data.length + ses->auth_only_len - + ses->digest_length; + out_sg->length = sym->aead.data.length + ses->digest_length; else - out_sg->length = sym->aead.data.length + ses->auth_only_len; + out_sg->length = sym->aead.data.length; /* output sg entries */ sg = &cf->sg[2]; @@ -1192,9 +1191,8 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* 1st seg */ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf)); - sg->length = mbuf->data_len - sym->aead.data.offset + - ses->auth_only_len; - sg->offset = sym->aead.data.offset - ses->auth_only_len; + sg->length = mbuf->data_len - sym->aead.data.offset; + sg->offset = sym->aead.data.offset; /* Successive segs */ mbuf = mbuf->next; @@ -1367,8 +1365,8 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); qm_sg_entry_set64(sg, - dst_start_addr + sym->aead.data.offset - ses->auth_only_len); - sg->length = sym->aead.data.length + ses->auth_only_len; + dst_start_addr + sym->aead.data.offset); + sg->length = sym->aead.data.length; length = sg->length; if (is_encode(ses)) { cpu_to_hw_sg(sg);