From patchwork Thu Aug 4 10:36:22 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Volodymyr Fialko X-Patchwork-Id: 114608 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E12DDA00C4; Thu, 4 Aug 2022 12:36:48 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 27CD242BE1; Thu, 4 Aug 2022 12:36:47 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id CD33942BDE for ; Thu, 4 Aug 2022 12:36:44 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 274782VL012693; Thu, 4 Aug 2022 03:36:44 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=yr+hulEUR2NXx4UrnLd5HhxB/WlSTZ9g5wR9m1VzU58=; b=aPPlrAIVQHh9Haa/f2P2xgjL6mOpIcgCy6Lz+cFKOscfDoVyslvGa+Le+5qOdIT2dzWi 3cB8o8s2Cq73VQQGfBWQJx/TpC35Ztiuffeap/W1DEpQxOU6RTG97YJUCPV75hEizOVQ dhSqs/+uA8hhxSet5X06J5aNEBih9BWERbqKjegLiJt6Y4R0RWRkapWoPeRTESI1S9Z+ e+LhPuf6/iMvvdw84CY3pSA99aXSDQ2iTWhitY5St034y3YH3QqK/kkWpq8UIHHSdCj7 F5MBfPydpK5fQW/ZLlbLqGvcq7j41gig7BkHeskrL60rgzDyKRkqY2A6PRFfU8UVJBgJ Xg== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3hqp04n5yv-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Thu, 04 Aug 2022 03:36:43 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Thu, 4 Aug 2022 03:36:42 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Thu, 4 Aug 2022 03:36:42 -0700 Received: from localhost.localdomain (unknown [10.28.34.39]) by maili.marvell.com (Postfix) with ESMTP id 81E343F705C; Thu, 4 Aug 2022 03:36:40 -0700 (PDT) From: Volodymyr Fialko To: , Radu Nicolau , Akhil Goyal CC: , , Volodymyr Fialko Subject: [PATCH 2/6] examples/ipsec-secgw: add queue for event crypto adapter Date: Thu, 4 Aug 2022 12:36:22 +0200 Message-ID: <20220804103626.102688-3-vfialko@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220804103626.102688-1-vfialko@marvell.com> References: <20220804103626.102688-1-vfialko@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: O6WyNFid2ENbpeS9obaty68kbsLKFPtD X-Proofpoint-ORIG-GUID: O6WyNFid2ENbpeS9obaty68kbsLKFPtD X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-04_03,2022-08-04_01,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add separate event queue for event crypto adapter processing, to resolve queue contention between new and already processed events. Signed-off-by: Volodymyr Fialko --- examples/ipsec-secgw/event_helper.c | 95 +++++++++++++++++++++-------- examples/ipsec-secgw/event_helper.h | 2 + 2 files changed, 71 insertions(+), 26 deletions(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index 6b00a21b6a..9c20a05da8 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -17,6 +17,8 @@ #define DEFAULT_VECTOR_SIZE 16 #define DEFAULT_VECTOR_TMO 102400 +#define INVALID_EV_QUEUE_ID -1 + static volatile bool eth_core_running; static int @@ -151,11 +153,10 @@ eh_dev_has_burst_mode(uint8_t dev_id) } static int -eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +eh_set_nb_eventdev(struct eventmode_conf *em_conf) { - int lcore_count, nb_eventdev, nb_eth_dev, ret; struct eventdev_params *eventdev_config; - struct rte_event_dev_info dev_info; + int nb_eventdev; /* Get the number of event devices */ nb_eventdev = rte_event_dev_count(); @@ -170,6 +171,23 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) return -EINVAL; } + /* Set event dev id*/ + eventdev_config = &(em_conf->eventdev_config[0]); + eventdev_config->eventdev_id = 0; + + /* Update the number of event devices */ + em_conf->nb_eventdev = 1; + + return 0; +} + +static int +eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +{ + int lcore_count, nb_eth_dev, ret; + struct eventdev_params *eventdev_config; + struct rte_event_dev_info dev_info; + /* Get the number of eth devs */ nb_eth_dev = rte_eth_dev_count_avail(); if (nb_eth_dev == 0) { @@ -197,15 +215,30 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) eventdev_config = &(em_conf->eventdev_config[0]); /* Save number of queues & ports available */ - eventdev_config->eventdev_id = 0; - eventdev_config->nb_eventqueue = dev_info.max_event_queues; + eventdev_config->nb_eventqueue = nb_eth_dev; eventdev_config->nb_eventport = dev_info.max_event_ports; eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES; - /* Check if there are more queues than required */ - if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) { - /* One queue is reserved for Tx */ - eventdev_config->nb_eventqueue = nb_eth_dev + 1; + /* One queue is reserved for Tx */ + eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID; + if (eventdev_config->all_internal_ports) { + if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) { + EH_LOG_ERR("Not enough event queues available"); + return -EINVAL; + } + eventdev_config->tx_queue_id = + eventdev_config->nb_eventqueue++; + } + + /* One queue is reserved for event crypto adapter */ + eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID; + if (em_conf->enable_event_crypto_adapter) { + if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) { + EH_LOG_ERR("Not enough event queues available"); + return -EINVAL; + } + eventdev_config->ev_cpt_queue_id = + eventdev_config->nb_eventqueue++; } /* Check if there are more ports than required */ @@ -214,9 +247,6 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) eventdev_config->nb_eventport = lcore_count; } - /* Update the number of event devices */ - em_conf->nb_eventdev++; - return 0; } @@ -245,15 +275,10 @@ eh_do_capability_check(struct eventmode_conf *em_conf) /* * If Rx & Tx internal ports are supported by all event devices then - * eth cores won't be required. Override the eth core mask requested - * and decrement number of event queues by one as it won't be needed - * for Tx. + * eth cores won't be required. Override the eth core mask requested. */ - if (all_internal_ports) { + if (all_internal_ports) rte_bitmap_reset(em_conf->eth_core_mask); - for (i = 0; i < em_conf->nb_eventdev; i++) - em_conf->eventdev_config[i].nb_eventqueue--; - } } static int @@ -370,6 +395,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) eventdev_config->nb_eventqueue : eventdev_config->nb_eventqueue - 1; + /* Reserve one queue for event crypto adapter */ + if (em_conf->enable_event_crypto_adapter) + nb_eventqueue--; + /* * Map all queues of eth device (port) to an event queue. If there * are more event queues than eth ports then create 1:1 mapping. @@ -541,14 +570,18 @@ eh_validate_conf(struct eventmode_conf *em_conf) * and initialize the config with all ports & queues available */ if (em_conf->nb_eventdev == 0) { + ret = eh_set_nb_eventdev(em_conf); + if (ret != 0) + return ret; + eh_do_capability_check(em_conf); ret = eh_set_default_conf_eventdev(em_conf); if (ret != 0) return ret; + } else { + /* Perform capability check for the selected event devices */ + eh_do_capability_check(em_conf); } - /* Perform capability check for the selected event devices */ - eh_do_capability_check(em_conf); - /* * Check if links are specified. Else generate a default config for * the event ports used. @@ -594,8 +627,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf) uint8_t *queue = NULL; uint8_t eventdev_id; int nb_eventqueue; - uint8_t i, j; - int ret; + int ret, j; + uint8_t i; for (i = 0; i < nb_eventdev; i++) { @@ -657,14 +690,24 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf) * stage if event device does not have internal * ports. This will be an atomic queue. */ - if (!eventdev_config->all_internal_ports && - j == nb_eventqueue-1) { + if (j == eventdev_config->tx_queue_id) { eventq_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; } else { eventq_conf.schedule_type = em_conf->ext_params.sched_type; } + /* + * Give event crypto device's queue higher priority then Rx queues. This + * will allow crypto events to be processed with highest priority. + */ + if (j == eventdev_config->ev_cpt_queue_id) { + eventq_conf.priority = + RTE_EVENT_DEV_PRIORITY_HIGHEST; + } else { + eventq_conf.priority = + RTE_EVENT_DEV_PRIORITY_NORMAL; + } /* Set max atomic flows to 1024 */ eventq_conf.nb_atomic_flows = 1024; diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h index 4b26dc8fc2..af5cfcf794 100644 --- a/examples/ipsec-secgw/event_helper.h +++ b/examples/ipsec-secgw/event_helper.h @@ -88,6 +88,8 @@ struct eventdev_params { uint8_t nb_eventport; uint8_t ev_queue_mode; uint8_t all_internal_ports; + int tx_queue_id; + int ev_cpt_queue_id; }; /**