From patchwork Sun May 15 10:30:42 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shijith Thotton X-Patchwork-Id: 111152 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BB889A00BE; Sun, 15 May 2022 12:30:52 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 64ECB40A79; Sun, 15 May 2022 12:30:52 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 8FF7640143 for ; Sun, 15 May 2022 12:30:51 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 24F6rFnS032118 for ; Sun, 15 May 2022 03:30:50 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=dl3ltaGowfgMyP1KVa10IHlnlOabZDDB0DrLIMWWIiQ=; b=Dspg24Pw8mQgq099+pYNHtXdGaV0MVSNPyuOQmfJOL5vLdk/VHfzqEPvV/2dR3/hCsNi DjC3xuoO9oR1JIAu0cSvBILLs85wzFQ+czAgjnjHkD2G33J7FTKpOtMEsyySotYqv1lW YGYHGE+GlR9mgrO6HXWae63gBVnnH8MZlhxhfZr5wn+P7c9e8Fp2K9XfrrRqPpslGEYb wKVdozUtBpl/U1a2VaKgnBPY9ZuGgRbe7G7rhEuQKu9sQ5aWgKiWN1X+gvvRpSSofJCr 919MUoK61TszGIXVO9CKHWLLYABL93hXUlkd5PMDboAobPxf8jkLbTkpq7DM0EIQsIxi JQ== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3g2bxsjhy2-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Sun, 15 May 2022 03:30:50 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 15 May 2022 03:30:48 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Sun, 15 May 2022 03:30:48 -0700 Received: from localhost.localdomain (unknown [10.28.34.29]) by maili.marvell.com (Postfix) with ESMTP id 63C583F707B; Sun, 15 May 2022 03:30:47 -0700 (PDT) From: Shijith Thotton To: , CC: Shijith Thotton , Subject: [PATCH] event/cnxk: flush event queues over multiple pass Date: Sun, 15 May 2022 16:00:42 +0530 Message-ID: <0bdc8e88eacf7b41e27691b564350853577363e9.1652610489.git.sthotton@marvell.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: yrpPJ9sYPLLC4jiJAYJnw7FUb2a2YTo2 X-Proofpoint-GUID: yrpPJ9sYPLLC4jiJAYJnw7FUb2a2YTo2 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.858,Hydra:6.0.486,FMLib:17.11.64.514 definitions=2022-05-15_05,2022-05-13_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org If an event queue flush does not complete after a fixed number of tries, remaining queues are flushed before retrying the one with incomplete flush. Signed-off-by: Shijith Thotton --- drivers/event/cnxk/cn10k_eventdev.c | 10 +++++++++- drivers/event/cnxk/cn9k_eventdev.c | 10 +++++++++- drivers/event/cnxk/cnxk_eventdev.c | 31 ++++++++++++++++++++++------- drivers/event/cnxk/cnxk_eventdev.h | 5 +++-- 4 files changed, 45 insertions(+), 11 deletions(-) diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index 9b4d2895ec..6cdfc14d79 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -108,10 +108,11 @@ cn10k_sso_hws_release(void *arg, void *hws) memset(ws, 0, sizeof(*ws)); } -static void +static int cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cnxk_handle_event_t fn, void *arg) { + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX; struct cn10k_sso_hws *ws = hws; uint64_t cq_ds_cnt = 1; uint64_t aq_cnt = 1; @@ -140,6 +141,8 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cnxk_sso_hws_swtag_flush( ws->base + SSOW_LF_GWS_WQE0, ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH); + else if (retry-- == 0) + break; do { val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); } while (val & BIT_ULL(56)); @@ -150,8 +153,13 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cq_ds_cnt &= 0x3FFF3FFF0000; } + if (aq_cnt || cq_ds_cnt || ds_cnt) + return -EAGAIN; + plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); rte_mb(); + + return 0; } static void diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c index 4bba477dd1..db41f41358 100644 --- a/drivers/event/cnxk/cn9k_eventdev.c +++ b/drivers/event/cnxk/cn9k_eventdev.c @@ -117,11 +117,12 @@ cn9k_sso_hws_release(void *arg, void *hws) } } -static void +static int cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cnxk_handle_event_t fn, void *arg) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg); + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX; struct cn9k_sso_hws_dual *dws; struct cn9k_sso_hws *ws; uint64_t cq_ds_cnt = 1; @@ -159,6 +160,8 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cnxk_sso_hws_swtag_flush( ws_base + SSOW_LF_GWS_TAG, ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH); + else if (retry-- == 0) + break; do { val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE); } while (val & BIT_ULL(56)); @@ -169,7 +172,12 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base, cq_ds_cnt &= 0x3FFF3FFF0000; } + if (aq_cnt || cq_ds_cnt || ds_cnt) + return -EAGAIN; + plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL); + + return 0; } static void diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c index be021d86c9..91235ed9f3 100644 --- a/drivers/event/cnxk/cnxk_eventdev.c +++ b/drivers/event/cnxk/cnxk_eventdev.c @@ -385,9 +385,10 @@ static void cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, cnxk_sso_hws_flush_t flush_fn, uint8_t enable) { + uint8_t pend_list[RTE_EVENT_MAX_QUEUES_PER_DEV], pend_cnt, new_pcnt; struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); uintptr_t hwgrp_base; - uint16_t i; + uint8_t queue_id, i; void *ws; for (i = 0; i < dev->nb_event_ports; i++) { @@ -396,14 +397,30 @@ cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, } rte_mb(); + + /* Consume all the events through HWS0 */ ws = event_dev->data->ports[0]; - for (i = 0; i < dev->nb_event_queues; i++) { - /* Consume all the events through HWS0 */ - hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i); - flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev); - /* Enable/Disable SSO GGRP */ - plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL); + /* Starting list of queues to flush */ + pend_cnt = dev->nb_event_queues; + for (i = 0; i < dev->nb_event_queues; i++) + pend_list[i] = i; + + while (pend_cnt) { + new_pcnt = 0; + for (i = 0; i < pend_cnt; i++) { + queue_id = pend_list[i]; + hwgrp_base = + roc_sso_hwgrp_base_get(&dev->sso, queue_id); + if (flush_fn(ws, queue_id, hwgrp_base, + cnxk_handle_event, event_dev)) { + pend_list[new_pcnt++] = queue_id; + continue; + } + /* Enable/Disable SSO GGRP */ + plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL); + } + pend_cnt = new_pcnt; } } diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index 5564746e6d..a490829a8a 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -54,6 +54,7 @@ #define CN10K_GW_MODE_PREF 1 #define CN10K_GW_MODE_PREF_WFE 2 +#define CNXK_SSO_FLUSH_RETRY_MAX 0xfff #define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \ do { \ if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \ @@ -69,8 +70,8 @@ typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, uint16_t *map, uint16_t nb_link); typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev); typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws); -typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base, - cnxk_handle_event_t fn, void *arg); +typedef int (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base, + cnxk_handle_event_t fn, void *arg); struct cnxk_sso_qos { uint16_t queue;