From patchwork Thu Sep 29 03:27:42 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Zhangfei Gao X-Patchwork-Id: 117091 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 645A8A00C4; Thu, 29 Sep 2022 05:28:13 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 29F37427EC; Thu, 29 Sep 2022 05:28:12 +0200 (CEST) Received: from mail-pj1-f44.google.com (mail-pj1-f44.google.com [209.85.216.44]) by mails.dpdk.org (Postfix) with ESMTP id A437340DDC for ; Thu, 29 Sep 2022 05:28:10 +0200 (CEST) Received: by mail-pj1-f44.google.com with SMTP id bu5-20020a17090aee4500b00202e9ca2182so4716828pjb.0 for ; Wed, 28 Sep 2022 20:28:10 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date; bh=fuxP8SNKLk9uy5vKU7Ew3ZeGmHM/TT3KtV1xfFhOJC0=; b=utjeymQqBHjKjZ40puPNMmAUwhbB/8tQkkGjXgjfoZ+VTbTLwVm2DFkMU7fRg/E+QH k35kRuGJ7A5LBoN9yluxHxIyLa3iWvE1AnkI4SJS30/JnImUJNlhUFJqjlhoXvq6/ySI As1GQG8LNWLk/BEfApYEDE9n6/cD0xztvls8rCRlwhlGYoLcTYP1cO76vSja5DwfLSZP +DV6bx5ZyO+FswQKhGEcFyKC7jOsSeTFivqcbIQESDIyMFxPaqF+ZBaU5OL5rrEJq9Cw GtHVacrnvSUwtTnapOf5Qo17jawFMR2uDRhkbReUT9bOgt0t5UrGed/mO2hRwPTLNOrI mhDQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date; bh=fuxP8SNKLk9uy5vKU7Ew3ZeGmHM/TT3KtV1xfFhOJC0=; b=lj6lF9GSEBSiZjLbwnsiFpZD9HL1PCbe3NuBwuc6VjoBKjmXLd1qL7xhxKJ3hivOvf xm62ApO/4jbfCH+zZgBI/Q2HUoG6Y3OjnlM7AtSanHRDvRSUOtIM21ACWlSSCSF5kApX N53ApOYOVm46qGxVK15yhFUGaLQ1ta21MoXe4p4c2L18jnWnoUK0ByHwpGuntqtjCtkx rADHQ250xJDmZGSg1X+C4JlINNTo7DWcOfStHYK7an8NHQsIGgyuYTQUOG8A3BXTK7ER EJbuSDIuli1ZY92LPpNV3U3ROpWp4uQuJanius6+60evH91zyZ6fYU/+XbxOsQpHR3pz Xwow== X-Gm-Message-State: ACrzQf0G4JP20WGyfWqvHupO2SKOVTUJKSiLQ8F8bqIADDTkVSbujerg FnrI7XwIsEjqhDnjj1ZRRA2QSQ== X-Google-Smtp-Source: AMsMyM4MZKidEJCfywQKym1vxb+rlgbwFSp4QQ6L3/h428LvCxX/22Gt5xPTzBEinDlIFuStoeJfFg== X-Received: by 2002:a17:90b:4b4c:b0:203:1eef:d810 with SMTP id mi12-20020a17090b4b4c00b002031eefd810mr14186287pjb.75.1664422089909; Wed, 28 Sep 2022 20:28:09 -0700 (PDT) Received: from localhost.localdomain ([45.128.198.71]) by smtp.gmail.com with ESMTPSA id gn4-20020a17090ac78400b001fd66d5c42csm2304271pjb.49.2022.09.28.20.28.05 (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Wed, 28 Sep 2022 20:28:09 -0700 (PDT) From: Zhangfei Gao To: Akhil Goyal , Declan Doherty , Fan Zhang , Ashish Gupta , Ray Kinsella Cc: dev@dpdk.org, acc@openeuler.org, Zhangfei Gao Subject: [PATCH v3 2/6] crypto/uadk: support basic operations Date: Thu, 29 Sep 2022 11:27:42 +0800 Message-Id: <20220929032746.10659-3-zhangfei.gao@linaro.org> X-Mailer: git-send-email 2.36.1 In-Reply-To: <20220929032746.10659-1-zhangfei.gao@linaro.org> References: <20220929032746.10659-1-zhangfei.gao@linaro.org> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Support the basic dev control operations: configure, close, start, stop and get info, as well as queue pairs operations. Signed-off-by: Zhangfei Gao --- drivers/crypto/uadk/uadk_crypto_pmd.c | 213 ++++++++++++++++++++++++-- 1 file changed, 204 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/uadk/uadk_crypto_pmd.c b/drivers/crypto/uadk/uadk_crypto_pmd.c index 5c2e6d8f40..d6d9f26337 100644 --- a/drivers/crypto/uadk/uadk_crypto_pmd.c +++ b/drivers/crypto/uadk/uadk_crypto_pmd.c @@ -10,6 +10,25 @@ #include #include +/* Maximum length for digest (SHA-512 needs 64 bytes) */ +#define DIGEST_LENGTH_MAX 64 + +struct uadk_qp { + /* Ring for placing process packets */ + struct rte_ring *processed_pkts; + /* Queue pair statistics */ + struct rte_cryptodev_stats qp_stats; + /* Queue Pair Identifier */ + uint16_t id; + /* Unique Queue Pair Name */ + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + /* Buffer used to store the digest generated + * by the driver when verifying a digest provided + * by the user (using authentication verify operation) + */ + uint8_t temp_digest[DIGEST_LENGTH_MAX]; +} __rte_cache_aligned; + enum uadk_crypto_version { UADK_CRYPTO_V2, UADK_CRYPTO_V3, @@ -28,16 +47,192 @@ RTE_LOG_REGISTER_DEFAULT(uadk_crypto_logtype, INFO); "%s() line %u: " fmt "\n", __func__, __LINE__, \ ## __VA_ARGS__) +static const struct rte_cryptodev_capabilities uadk_crypto_v2_capabilities[] = { + /* End of capabilities */ + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +/* Configure device */ +static int +uadk_crypto_pmd_config(struct rte_cryptodev *dev __rte_unused, + struct rte_cryptodev_config *config __rte_unused) +{ + return 0; +} + +/* Start device */ +static int +uadk_crypto_pmd_start(struct rte_cryptodev *dev __rte_unused) +{ + return 0; +} + +/* Stop device */ +static void +uadk_crypto_pmd_stop(struct rte_cryptodev *dev __rte_unused) +{ +} + +/* Close device */ +static int +uadk_crypto_pmd_close(struct rte_cryptodev *dev __rte_unused) +{ + return 0; +} + +/* Get device statistics */ +static void +uadk_crypto_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct uadk_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/* Reset device statistics */ +static void +uadk_crypto_pmd_stats_reset(struct rte_cryptodev *dev __rte_unused) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct uadk_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + +/* Get device info */ +static void +uadk_crypto_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct uadk_crypto_priv *priv = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->driver_id = dev->driver_id; + dev_info->driver_name = dev->device->driver->name; + dev_info->max_nb_queue_pairs = 128; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; + dev_info->feature_flags = dev->feature_flags; + + if (priv->version == UADK_CRYPTO_V2) + dev_info->capabilities = uadk_crypto_v2_capabilities; + } +} + +/* Release queue pair */ +static int +uadk_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct uadk_qp *qp = dev->data->queue_pairs[qp_id]; + + if (qp) { + rte_ring_free(qp->processed_pkts); + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + } + + return 0; +} + +/* set a unique name for the queue pair based on its name, dev_id and qp_id */ +static int +uadk_pmd_qp_set_unique_name(struct rte_cryptodev *dev, + struct uadk_qp *qp) +{ + unsigned int n = snprintf(qp->name, sizeof(qp->name), + "uadk_crypto_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n >= sizeof(qp->name)) + return -EINVAL; + + return 0; +} + +/* Create a ring to place process packets on */ +static struct rte_ring * +uadk_pmd_qp_create_processed_pkts_ring(struct uadk_qp *qp, + unsigned int ring_size, int socket_id) +{ + struct rte_ring *r = qp->processed_pkts; + + if (r) { + if (rte_ring_get_size(r) >= ring_size) { + UADK_LOG(INFO, "Reusing existing ring %s for processed packets", + qp->name); + return r; + } + + UADK_LOG(ERR, "Unable to reuse existing ring %s for processed packets", + qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_EXACT_SZ); +} + +static int +uadk_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id) +{ + struct uadk_qp *qp; + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + uadk_crypto_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("uadk PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return (-ENOMEM); + + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + if (uadk_pmd_qp_set_unique_name(dev, qp)) + goto qp_setup_cleanup; + + qp->processed_pkts = uadk_pmd_qp_create_processed_pkts_ring(qp, + qp_conf->nb_descriptors, socket_id); + if (qp->processed_pkts == NULL) + goto qp_setup_cleanup; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + + return 0; + +qp_setup_cleanup: + if (qp) { + rte_free(qp); + qp = NULL; + } + return -EINVAL; +} + static struct rte_cryptodev_ops uadk_crypto_pmd_ops = { - .dev_configure = NULL, - .dev_start = NULL, - .dev_stop = NULL, - .dev_close = NULL, - .stats_get = NULL, - .stats_reset = NULL, - .dev_infos_get = NULL, - .queue_pair_setup = NULL, - .queue_pair_release = NULL, + .dev_configure = uadk_crypto_pmd_config, + .dev_start = uadk_crypto_pmd_start, + .dev_stop = uadk_crypto_pmd_stop, + .dev_close = uadk_crypto_pmd_close, + .stats_get = uadk_crypto_pmd_stats_get, + .stats_reset = uadk_crypto_pmd_stats_reset, + .dev_infos_get = uadk_crypto_pmd_info_get, + .queue_pair_setup = uadk_crypto_pmd_qp_setup, + .queue_pair_release = uadk_crypto_pmd_qp_release, .sym_session_get_size = NULL, .sym_session_configure = NULL, .sym_session_clear = NULL,