From patchwork Wed Jul 27 07:15:36 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shijith Thotton X-Patchwork-Id: 114259 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B953AA00C4; Wed, 27 Jul 2022 09:15:56 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 57E9040A89; Wed, 27 Jul 2022 09:15:56 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 80B66400D7 for ; Wed, 27 Jul 2022 09:15:54 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 26R5vPof006728 for ; Wed, 27 Jul 2022 00:15:53 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=sf0qAFjE/+HswcmjoIjh+a7MXgR8V3RJdh8yiRuz+jI=; b=F2I0CZ6iTgcjyfGaPkcJGz8W2gpsKGHoss21DoTfFesk86RMGhf5T+UJJ1qQJTsEwuzR gm7EecfRnDMW4vvVRVw4YhEtyJBhu9jEHB7MVl2NWTaBEHOtw/oMNeotxn999ZsPhldX fNlQjxeg5NmOnRQ2tFVN+6P/G1qykBFDx44MsVrVojGA9EoiHdDHAY17N1v4JUFl14q/ L1yH5f0QHFYDQ32gnTEyp2jPW6og2gbkxVq0r5AmbH0HE+wotoHCIGN/904f3C3IwIJH TRVWTeKJm/B2GLcJiUYYr82izV2kRTkOLlQ2jVLViR0SOCJlGcNfoCkz44473Hpht/SR Sg== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3hjyn8g77h-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Wed, 27 Jul 2022 00:15:53 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 27 Jul 2022 00:15:51 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Wed, 27 Jul 2022 00:15:51 -0700 Received: from localhost.localdomain (unknown [10.28.34.29]) by maili.marvell.com (Postfix) with ESMTP id DB88F3F7061; Wed, 27 Jul 2022 00:15:49 -0700 (PDT) From: Shijith Thotton To: CC: Shijith Thotton , , Pavan Nikhilesh Subject: [PATCH] event/cnxk: move crypto adapter ops to respective files Date: Wed, 27 Jul 2022 12:45:36 +0530 Message-ID: <3a3a26b81355825c5571f45d4c24a298b2b119d5.1658905483.git.sthotton@marvell.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: nKfAdvGImCkBtgllSovcZjlcCQzU5p63 X-Proofpoint-GUID: nKfAdvGImCkBtgllSovcZjlcCQzU5p63 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-07-26_07,2022-07-26_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Moved the common crypto adapter ops to file specific to eventdev adapters. Signed-off-by: Shijith Thotton --- drivers/event/cnxk/cnxk_eventdev.c | 121 ----------------------- drivers/event/cnxk/cnxk_eventdev.h | 10 +- drivers/event/cnxk/cnxk_eventdev_adptr.c | 115 +++++++++++++++++++++ 3 files changed, 118 insertions(+), 128 deletions(-) diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c index 97dcf7b66e..b7b93778c6 100644 --- a/drivers/event/cnxk/cnxk_eventdev.c +++ b/drivers/event/cnxk/cnxk_eventdev.c @@ -2,129 +2,8 @@ * Copyright(C) 2021 Marvell. */ -#include "cnxk_cryptodev_ops.h" #include "cnxk_eventdev.h" -static int -crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, - struct cnxk_cpt_qp *qp) -{ - char name[RTE_MEMPOOL_NAMESIZE]; - uint32_t cache_size, nb_req; - unsigned int req_size; - uint32_t nb_desc_min; - - /* - * Update CPT FC threshold. Decrement by hardware burst size to allow - * simultaneous enqueue from all available cores. - */ - if (roc_model_is_cn10k()) - nb_desc_min = rte_lcore_count() * 32; - else - nb_desc_min = rte_lcore_count() * 2; - - if (qp->lmtline.fc_thresh < nb_desc_min) { - plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores", - rte_lcore_count()); - return -ENOSPC; - } - - qp->lmtline.fc_thresh -= nb_desc_min; - - snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", - cdev->data->dev_id, qp->lf.lf_id); - req_size = sizeof(struct cpt_inflight_req); - cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5); - nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count()); - qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, - 0, NULL, NULL, NULL, NULL, - rte_socket_id(), 0); - if (qp->ca.req_mp == NULL) - return -ENOMEM; - - qp->ca.enabled = true; - - return 0; -} - -int -cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, - const struct rte_cryptodev *cdev, - int32_t queue_pair_id) -{ - struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev); - uint32_t adptr_xae_cnt = 0; - struct cnxk_cpt_qp *qp; - int ret; - - if (queue_pair_id == -1) { - uint16_t qp_id; - - for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { - qp = cdev->data->queue_pairs[qp_id]; - ret = crypto_adapter_qp_setup(cdev, qp); - if (ret) { - cnxk_crypto_adapter_qp_del(cdev, -1); - return ret; - } - adptr_xae_cnt += qp->ca.req_mp->size; - } - } else { - qp = cdev->data->queue_pairs[queue_pair_id]; - ret = crypto_adapter_qp_setup(cdev, qp); - if (ret) - return ret; - adptr_xae_cnt = qp->ca.req_mp->size; - } - - /* Update crypto adapter XAE count */ - sso_evdev->adptr_xae_cnt += adptr_xae_cnt; - cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); - - return 0; -} - -static int -crypto_adapter_qp_free(struct cnxk_cpt_qp *qp) -{ - int ret; - - rte_mempool_free(qp->ca.req_mp); - qp->ca.enabled = false; - - ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id); - if (ret < 0) { - plt_err("Could not reset lmtline for queue pair %d", - qp->lf.lf_id); - return ret; - } - - return 0; -} - -int -cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, - int32_t queue_pair_id) -{ - struct cnxk_cpt_qp *qp; - - if (queue_pair_id == -1) { - uint16_t qp_id; - - for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { - qp = cdev->data->queue_pairs[qp_id]; - if (qp->ca.enabled) - crypto_adapter_qp_free(qp); - } - } else { - qp = cdev->data->queue_pairs[queue_pair_id]; - if (qp->ca.enabled) - crypto_adapter_qp_free(qp); - } - - return 0; -} - void cnxk_sso_info_get(struct cnxk_sso_evdev *dev, struct rte_event_dev_info *dev_info) diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index bfd0c5627e..c9a0686b4d 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -287,13 +287,6 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev, int16_t queue_port_id, const uint32_t ids[], uint32_t n); -/* Crypto adapter APIs. */ -int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, - const struct rte_cryptodev *cdev, - int32_t queue_pair_id); -int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, - int32_t queue_pair_id); - /* CN9K */ void cn9k_sso_set_rsrc(void *arg); @@ -318,5 +311,8 @@ int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev, int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev); int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev); int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev); +int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, + const struct rte_cryptodev *cdev, int32_t queue_pair_id); +int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id); #endif /* __CNXK_EVENTDEV_H__ */ diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c index 1f2e1b4b5d..3f46e79ba8 100644 --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c @@ -2,6 +2,7 @@ * Copyright(C) 2021 Marvell. */ +#include "cnxk_cryptodev_ops.h" #include "cnxk_ethdev.h" #include "cnxk_eventdev.h" @@ -628,3 +629,117 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused, return 0; } + +static int +crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp) +{ + char name[RTE_MEMPOOL_NAMESIZE]; + uint32_t cache_size, nb_req; + unsigned int req_size; + uint32_t nb_desc_min; + + /* + * Update CPT FC threshold. Decrement by hardware burst size to allow + * simultaneous enqueue from all available cores. + */ + if (roc_model_is_cn10k()) + nb_desc_min = rte_lcore_count() * 32; + else + nb_desc_min = rte_lcore_count() * 2; + + if (qp->lmtline.fc_thresh < nb_desc_min) { + plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores", + rte_lcore_count()); + return -ENOSPC; + } + + qp->lmtline.fc_thresh -= nb_desc_min; + + snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", cdev->data->dev_id, qp->lf.lf_id); + req_size = sizeof(struct cpt_inflight_req); + cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5); + nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count()); + qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0, NULL, NULL, NULL, + NULL, rte_socket_id(), 0); + if (qp->ca.req_mp == NULL) + return -ENOMEM; + + qp->ca.enabled = true; + + return 0; +} + +int +cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, + int32_t queue_pair_id) +{ + struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev); + uint32_t adptr_xae_cnt = 0; + struct cnxk_cpt_qp *qp; + int ret; + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { + qp = cdev->data->queue_pairs[qp_id]; + ret = crypto_adapter_qp_setup(cdev, qp); + if (ret) { + cnxk_crypto_adapter_qp_del(cdev, -1); + return ret; + } + adptr_xae_cnt += qp->ca.req_mp->size; + } + } else { + qp = cdev->data->queue_pairs[queue_pair_id]; + ret = crypto_adapter_qp_setup(cdev, qp); + if (ret) + return ret; + adptr_xae_cnt = qp->ca.req_mp->size; + } + + /* Update crypto adapter XAE count */ + sso_evdev->adptr_xae_cnt += adptr_xae_cnt; + cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); + + return 0; +} + +static int +crypto_adapter_qp_free(struct cnxk_cpt_qp *qp) +{ + int ret; + + rte_mempool_free(qp->ca.req_mp); + qp->ca.enabled = false; + + ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id); + if (ret < 0) { + plt_err("Could not reset lmtline for queue pair %d", qp->lf.lf_id); + return ret; + } + + return 0; +} + +int +cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id) +{ + struct cnxk_cpt_qp *qp; + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { + qp = cdev->data->queue_pairs[qp_id]; + if (qp->ca.enabled) + crypto_adapter_qp_free(qp); + } + } else { + qp = cdev->data->queue_pairs[queue_pair_id]; + if (qp->ca.enabled) + crypto_adapter_qp_free(qp); + } + + return 0; +}