From patchwork Fri Jan 19 11:40:26 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sivaramakrishnan Venkat X-Patchwork-Id: 135980 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CBF05438FD; Fri, 19 Jan 2024 12:40:35 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B387942830; Fri, 19 Jan 2024 12:40:35 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.15]) by mails.dpdk.org (Postfix) with ESMTP id CB0CA4029F for ; Fri, 19 Jan 2024 12:40:33 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1705664432; x=1737200432; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=+jshEgbj85m2RRqOMx3S1KVuRLc0rS0EcGi4uwmkOT4=; b=XxvREiQ7CNya39JZi7Kcg211ZHz75kJmPd1ic5XMJL0B/Ka+8UdOhbJG 52kZrhDkz5c/8mQsFtVM0UubAXOJQ5Eurd8TLFhRUjyB2ca5Q4YYVIC4c OIa3w/aOvWx2tJLKcONFhs6tmNKT+de1RWlzdILoCNwpZlWL9ssy0cHKB 5kyGHb8Q0EJmdIC+ffJXT+E+TOqfQ57cg7MJ7NUAy/Tctr227sdB0HZI2 wV5a7VjSZSW+6enm7kkZR6fMkWBlAgWahxR/Jr0zs7fpuApNOtRD07IFS 0BM2wS4VXjoC9XzxLjnSZLXFfUyldeso0a5sAUQn+u91h/fBfv5gfgd0o w==; X-IronPort-AV: E=McAfee;i="6600,9927,10956"; a="623636" X-IronPort-AV: E=Sophos;i="6.05,204,1701158400"; d="scan'208";a="623636" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orvoesa107.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Jan 2024 03:40:30 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10956"; a="928383139" X-IronPort-AV: E=Sophos;i="6.05,204,1701158400"; d="scan'208";a="928383139" Received: from silpixa00400902.ir.intel.com ([10.243.23.146]) by fmsmga001.fm.intel.com with ESMTP; 19 Jan 2024 03:40:29 -0800 From: Sivaramakrishnan Venkat To: Kai Ji , Pablo de Lara Cc: dev@dpdk.org, Sivaramakrishnan Venkat Subject: [PATCH v1] crypto/ipsec_mb: upgrade IPsec Multi-buffer to 1.4 Date: Fri, 19 Jan 2024 11:40:26 +0000 Message-Id: <20240119114026.1552305-1-venkatx.sivaramakrishnan@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org SW PMDs do not support IPsec Multi-buffer version 1.3 and less. A minimum IPsec Multi-buffer version of 1.4 or greater is now required. Signed-off-by: Sivaramakrishnan Venkat Acked-by: Ciara Power --- drivers/crypto/ipsec_mb/ipsec_mb_ops.c | 22 --- drivers/crypto/ipsec_mb/meson.build | 2 +- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 168 -------------------- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 9 -- 4 files changed, 1 insertion(+), 200 deletions(-) diff --git a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c index f21f9cc5a0..c52193180e 100644 --- a/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +++ b/drivers/crypto/ipsec_mb/ipsec_mb_ops.c @@ -147,15 +147,10 @@ ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rte_ring_free(rte_ring_lookup(qp->name)); -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - if (qp->mb_mgr) - free_mb_mgr(qp->mb_mgr); -#else if (qp->mb_mgr_mz) { rte_memzone_free(qp->mb_mgr_mz); qp->mb_mgr = NULL; } -#endif rte_free(qp); dev->data->queue_pairs[qp_id] = NULL; } else { /* secondary process */ @@ -211,7 +206,6 @@ static struct rte_ring RING_F_SP_ENQ | RING_F_SC_DEQ); } -#if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM static IMB_MGR * ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz, const char *mb_mgr_mz_name) @@ -244,7 +238,6 @@ ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz, } return mb_mgr; } -#endif /** Setup a queue pair */ int @@ -260,12 +253,6 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, int ret; if (rte_eal_process_type() == RTE_PROC_SECONDARY) { -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess," - "the minimum version required for this feature is %s.", - IMB_VERSION_STR, IMB_MP_REQ_VER_STR); - return -EINVAL; -#endif qp = dev->data->queue_pairs[qp_id]; if (qp == NULL) { IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp."); @@ -285,15 +272,11 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, return -ENOMEM; } -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - qp->mb_mgr = alloc_init_mb_mgr(); -#else char mz_name[IPSEC_MB_MAX_MZ_NAME]; snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d", dev->data->dev_id, qp_id); qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz), mz_name); -#endif if (qp->mb_mgr == NULL) { ret = -ENOMEM; goto qp_setup_cleanup; @@ -330,15 +313,10 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, return 0; qp_setup_cleanup: -#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM - if (qp->mb_mgr) - free_mb_mgr(qp->mb_mgr); -#else if (rte_eal_process_type() == RTE_PROC_SECONDARY) return ret; if (qp->mb_mgr_mz) rte_memzone_free(qp->mb_mgr_mz); -#endif rte_free(qp); return ret; } diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build index 87bf965554..0c988d7411 100644 --- a/drivers/crypto/ipsec_mb/meson.build +++ b/drivers/crypto/ipsec_mb/meson.build @@ -7,7 +7,7 @@ if is_windows subdir_done() endif -IMB_required_ver = '1.0.0' +IMB_required_ver = '1.4.0' IMB_header = '#include' if arch_subdir == 'arm' IMB_header = '#include' diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 4de4866cf3..40de13af1a 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -210,13 +210,9 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } } else if (xform->auth.key.length == 32) { sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN; -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (sess->auth.req_digest_len != 4 && sess->auth.req_digest_len != 8 && sess->auth.req_digest_len != 16) { -#else - if (sess->auth.req_digest_len != 4) { -#endif IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } @@ -845,11 +841,9 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, } } -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM sess->session_id = imb_set_session(mb_mgr, &sess->template_job); sess->pid = getpid(); RTE_PER_LCORE(pid) = sess->pid; -#endif return 0; } @@ -982,9 +976,7 @@ aesni_mb_set_docsis_sec_session_parameters( goto error_exit; } -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job); -#endif error_exit: free_mb_mgr(mb_mgr); @@ -1239,7 +1231,6 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) return 0; } -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM static inline int single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, int oop, uint32_t offset, struct rte_mbuf *m_src, @@ -1324,7 +1315,6 @@ single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, job->sgl_io_segs = sgl_segs; return 0; } -#endif static inline int multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, @@ -1394,9 +1384,7 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, job->msg_len_to_hash_in_bytes = 0; job->msg_len_to_cipher_in_bytes = 0; job->cipher_start_src_offset_in_bytes = 0; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM imb_set_session(mb_mgr, job); -#endif } else { job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; @@ -1424,15 +1412,10 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, job->src = NULL; job->dst = NULL; -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (m_src->nb_segs <= MAX_NUM_SEGS) return single_sgl_job(job, op, oop, m_offset, m_src, m_dst, qp_data->sgl_segs); - else -#endif - return multi_sgl_job(job, op, oop, - m_offset, m_src, m_dst, mb_mgr); } else { job->src = rte_pktmbuf_mtod(m_src, uint8_t *); job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); @@ -1520,10 +1503,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, uint8_t sgl = 0; uint8_t lb_sgl = 0; -#if IMB_VERSION(1, 3, 0) >= IMB_VERSION_NUM - (void) pid; -#endif - session = ipsec_mb_get_session_private(qp, op); if (session == NULL) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; @@ -1533,12 +1512,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, const IMB_CIPHER_MODE cipher_mode = session->template_job.cipher_mode; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM if (session->pid != pid) { memcpy(job, &session->template_job, sizeof(IMB_JOB)); imb_set_session(mb_mgr, job); } else if (job->session_id != session->session_id) -#endif memcpy(job, &session->template_job, sizeof(IMB_JOB)); if (!op->sym->m_dst) { @@ -1579,9 +1556,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; job->cipher_mode = IMB_CIPHER_GCM_SGL; job->hash_alg = IMB_AUTH_GCM_SGL; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM imb_set_session(mb_mgr, job); -#endif } break; case IMB_AUTH_AES_GMAC_128: @@ -1606,9 +1581,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; -#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM imb_set_session(mb_mgr, job); -#endif } break; default: @@ -1804,13 +1777,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (lb_sgl) return handle_sgl_linear(job, op, m_offset, session); -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (m_src->nb_segs <= MAX_NUM_SEGS) return single_sgl_job(job, op, oop, m_offset, m_src, m_dst, qp_data->sgl_segs); else -#endif return multi_sgl_job(job, op, oop, m_offset, m_src, m_dst, mb_mgr); } @@ -2130,7 +2101,6 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) return job; } -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM static uint16_t aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -2263,144 +2233,6 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, return processed_jobs; } -#else - -/** - * Process a completed IMB_JOB job and keep processing jobs until - * get_completed_job return NULL - * - * @param qp Queue Pair to process - * @param mb_mgr IMB_MGR to use - * @param job IMB_JOB job - * @param ops crypto ops to fill - * @param nb_ops number of crypto ops - * - * @return - * - Number of processed jobs - */ -static unsigned -handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, - IMB_JOB *job, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - struct rte_crypto_op *op = NULL; - uint16_t processed_jobs = 0; - - while (job != NULL) { - op = post_process_mb_job(qp, job); - - if (op) { - ops[processed_jobs++] = op; - qp->stats.dequeued_count++; - } else { - qp->stats.dequeue_err_count++; - break; - } - if (processed_jobs == nb_ops) - break; - - job = IMB_GET_COMPLETED_JOB(mb_mgr); - } - - return processed_jobs; -} - -static inline uint16_t -flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, - struct rte_crypto_op **ops, uint16_t nb_ops) -{ - int processed_ops = 0; - - /* Flush the remaining jobs */ - IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr); - - if (job) - processed_ops += handle_completed_jobs(qp, mb_mgr, job, - &ops[processed_ops], nb_ops - processed_ops); - - return processed_ops; -} - -static uint16_t -aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - struct ipsec_mb_qp *qp = queue_pair; - IMB_MGR *mb_mgr = qp->mb_mgr; - struct rte_crypto_op *op; - IMB_JOB *job; - int retval, processed_jobs = 0; - pid_t pid = 0; - - if (unlikely(nb_ops == 0 || mb_mgr == NULL)) - return 0; - - uint8_t digest_idx = qp->digest_idx; - - do { - /* Get next free mb job struct from mb manager */ - job = IMB_GET_NEXT_JOB(mb_mgr); - if (unlikely(job == NULL)) { - /* if no free mb job structs we need to flush mb_mgr */ - processed_jobs += flush_mb_mgr(qp, mb_mgr, - &ops[processed_jobs], - nb_ops - processed_jobs); - - if (nb_ops == processed_jobs) - break; - - job = IMB_GET_NEXT_JOB(mb_mgr); - } - - /* - * Get next operation to process from ingress queue. - * There is no need to return the job to the IMB_MGR - * if there are no more operations to process, since the IMB_MGR - * can use that pointer again in next get_next calls. - */ - retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op); - if (retval < 0) - break; - - if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) - retval = set_sec_mb_job_params(job, qp, op, - &digest_idx); - else - retval = set_mb_job_params(job, qp, op, - &digest_idx, mb_mgr, pid); - - if (unlikely(retval != 0)) { - qp->stats.dequeue_err_count++; - set_job_null_op(job, op); - } - - /* Submit job to multi-buffer for processing */ -#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG - job = IMB_SUBMIT_JOB(mb_mgr); -#else - job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr); -#endif - /* - * If submit returns a processed job then handle it, - * before submitting subsequent jobs - */ - if (job) - processed_jobs += handle_completed_jobs(qp, mb_mgr, - job, &ops[processed_jobs], - nb_ops - processed_jobs); - - } while (processed_jobs < nb_ops); - - qp->digest_idx = digest_idx; - - if (processed_jobs < 1) - processed_jobs += flush_mb_mgr(qp, mb_mgr, - &ops[processed_jobs], - nb_ops - processed_jobs); - - return processed_jobs; -} -#endif static inline int check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) { diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 85994fe5a1..51cfd7e2aa 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -17,9 +17,7 @@ #define HMAC_IPAD_VALUE (0x36) #define HMAC_OPAD_VALUE (0x5C) -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM #define MAX_NUM_SEGS 16 -#endif static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = { { /* MD5 HMAC */ @@ -567,13 +565,8 @@ static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = { }, .digest_size = { .min = 4, -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM .max = 16, .increment = 4 -#else - .max = 4, - .increment = 0 -#endif }, .iv_size = { .min = 16, @@ -730,9 +723,7 @@ struct aesni_mb_qp_data { * by the driver when verifying a digest provided * by the user (using authentication verify operation) */ -#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM struct IMB_SGL_IOV sgl_segs[MAX_NUM_SEGS]; -#endif union { struct gcm_context_data gcm_sgl_ctx; struct chacha20_poly1305_context_data chacha_sgl_ctx;