From patchwork Tue Nov 2 15:54:15 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 103514 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EB713A0C4E; Tue, 2 Nov 2021 16:54:52 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6C4454113F; Tue, 2 Nov 2021 16:54:40 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 8277841139 for ; Tue, 2 Nov 2021 16:54:39 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 1A2F4Lxo012013 for ; Tue, 2 Nov 2021 08:54:39 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0220; bh=oJCydbdJ5oTEBaukxfwUGormnwOE8FAUFZvZal0YXT4=; b=LvIVxbSXtciVOHvQbyEM9yOMHG44uIbQ90Ut6oHX3Wh51aT4M2lyQq2XpOBb6QfEraIZ ZMgw2yvqohzPWk1UU6K2yPA0dxS8zzqeEC21VuXWDwqzTKKdK9cHoKveWFasnvqBJ/TK 5FMIZZKIOW086HcRWTvSbQtUPD2btDAedEoF81K4/J3PpdsOuIQImDFj3rC9K4Rb1NFX amPPTZ8mj1AsmYAEBYE2qzDqA46K0ShFIHhgBPpNq8sfbkcWzboVIE4cifBZoJvfYFbn /TJzHz4E9Rc/iKALFs0Fo6RqzWK7OqDUiaKu4qy49OF4317d3QnQvUECcEgG1alMTlf2 pA== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3c2ycvaw5k-2 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Tue, 02 Nov 2021 08:54:37 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 2 Nov 2021 08:54:35 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Tue, 2 Nov 2021 08:54:34 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id 592175B6946; Tue, 2 Nov 2021 08:54:33 -0700 (PDT) From: Nithin Dabilpuram To: , Nithin Dabilpuram , "Kiran Kumar K" , Sunil Kumar Kori , Satha Rao CC: Date: Tue, 2 Nov 2021 21:24:15 +0530 Message-ID: <20211102155421.486-4-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20211102155421.486-1-ndabilpuram@marvell.com> References: <20211102155421.486-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: 1ppNWKBcPyXFgXZmB-xjXqJTXpHd_ExE X-Proofpoint-ORIG-GUID: 1ppNWKBcPyXFgXZmB-xjXqJTXpHd_ExE X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.182.1,Aquarius:18.0.790,Hydra:6.0.425,FMLib:17.0.607.475 definitions=2021-11-02_08,2021-11-02_01,2020-04-07_01 Subject: [dpdk-dev] [PATCH 4/9] common/cnxk: enable tm to listen on Rx pause frames X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable tm topology to listen on backpressure received when Rx pause frame is enabled. Only one TM node in Tl3/TL2 per channel can listen on backpressure on that channel. Signed-off-by: Nithin Dabilpuram --- drivers/common/cnxk/roc_nix.c | 3 + drivers/common/cnxk/roc_nix.h | 9 +- drivers/common/cnxk/roc_nix_fc.c | 20 +++-- drivers/common/cnxk/roc_nix_priv.h | 3 + drivers/common/cnxk/roc_nix_tm.c | 157 +++++++++++++++++++++++++++++++++ drivers/common/cnxk/roc_nix_tm_ops.c | 9 ++ drivers/common/cnxk/roc_nix_tm_utils.c | 8 ++ drivers/net/cnxk/cnxk_ethdev.c | 2 +- drivers/net/cnxk/cnxk_ethdev_ops.c | 2 +- 9 files changed, 204 insertions(+), 9 deletions(-) diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c index fbfc550..c96b266 100644 --- a/drivers/common/cnxk/roc_nix.c +++ b/drivers/common/cnxk/roc_nix.c @@ -418,6 +418,9 @@ roc_nix_dev_init(struct roc_nix *roc_nix) if (nix->lbk_link) { nix->rx_pause = 1; nix->tx_pause = 1; + } else if (!roc_nix_is_vf_or_sdp(roc_nix)) { + /* Get the current state of flow control */ + roc_nix_fc_mode_get(roc_nix); } /* Register error and ras interrupts */ diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h index d83a9b5..8f36ce7 100644 --- a/drivers/common/cnxk/roc_nix.h +++ b/drivers/common/cnxk/roc_nix.h @@ -153,7 +153,10 @@ struct roc_nix_vlan_config { }; struct roc_nix_fc_cfg { - bool cq_cfg_valid; +#define ROC_NIX_FC_RXCHAN_CFG 0 +#define ROC_NIX_FC_CQ_CFG 1 +#define ROC_NIX_FC_TM_CFG 2 + uint8_t type; union { struct { bool enable; @@ -164,6 +167,10 @@ struct roc_nix_fc_cfg { uint16_t cq_drop; bool enable; } cq_cfg; + + struct { + bool enable; + } tm_cfg; }; }; diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c index ef46842..6453258 100644 --- a/drivers/common/cnxk/roc_nix_fc.c +++ b/drivers/common/cnxk/roc_nix_fc.c @@ -24,7 +24,7 @@ nix_fc_rxchan_bpid_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) else fc_cfg->rxchan_cfg.enable = false; - fc_cfg->cq_cfg_valid = false; + fc_cfg->type = ROC_NIX_FC_RXCHAN_CFG; return 0; } @@ -103,7 +103,7 @@ nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) fc_cfg->cq_cfg.cq_drop = rsp->cq.bp; fc_cfg->cq_cfg.enable = rsp->cq.bp_ena; - fc_cfg->cq_cfg_valid = true; + fc_cfg->type = ROC_NIX_FC_CQ_CFG; exit: return rc; @@ -160,10 +160,14 @@ roc_nix_fc_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix)) return 0; - if (fc_cfg->cq_cfg_valid) + if (fc_cfg->type == ROC_NIX_FC_CQ_CFG) return nix_fc_cq_config_get(roc_nix, fc_cfg); - else + else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG) return nix_fc_rxchan_bpid_get(roc_nix, fc_cfg); + else if (fc_cfg->type == ROC_NIX_FC_TM_CFG) + return nix_tm_bp_config_get(roc_nix, &fc_cfg->tm_cfg.enable); + + return -EINVAL; } int @@ -172,11 +176,15 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) if (roc_nix_is_vf_or_sdp(roc_nix) && !roc_nix_is_lbk(roc_nix)) return 0; - if (fc_cfg->cq_cfg_valid) + if (fc_cfg->type == ROC_NIX_FC_CQ_CFG) return nix_fc_cq_config_set(roc_nix, fc_cfg); - else + else if (fc_cfg->type == ROC_NIX_FC_RXCHAN_CFG) return nix_fc_rxchan_bpid_set(roc_nix, fc_cfg->rxchan_cfg.enable); + else if (fc_cfg->type == ROC_NIX_FC_TM_CFG) + return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable); + + return -EINVAL; } enum roc_nix_fc_mode diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h index 9805d4a..60a00a3 100644 --- a/drivers/common/cnxk/roc_nix_priv.h +++ b/drivers/common/cnxk/roc_nix_priv.h @@ -74,6 +74,7 @@ struct nix_tm_node { uint32_t red_algo : 2; uint32_t pkt_mode : 1; uint32_t pkt_mode_set : 1; + uint32_t bp_capa : 1; bool child_realloc; struct nix_tm_node *parent; @@ -373,6 +374,8 @@ int nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, bool ena); int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable); +int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled); +int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable); /* * TM priv utils. diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c index 08d6e86..b3d8ebd 100644 --- a/drivers/common/cnxk/roc_nix_tm.c +++ b/drivers/common/cnxk/roc_nix_tm.c @@ -98,16 +98,32 @@ int nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree) { struct nix_tm_node_list *list; + bool is_pf_or_lbk = false; struct nix_tm_node *node; + bool skip_bp = false; uint32_t hw_lvl; int rc = 0; list = nix_tm_node_list(nix, tree); + if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link) + is_pf_or_lbk = true; + for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) { TAILQ_FOREACH(node, list, node) { if (node->hw_lvl != hw_lvl) continue; + + /* Only one TL3/TL2 Link config should have BP enable + * set per channel only for PF or lbk vf. + */ + node->bp_capa = 0; + if (is_pf_or_lbk && !skip_bp && + node->hw_lvl == nix->tm_link_cfg_lvl) { + node->bp_capa = 1; + skip_bp = true; + } + rc = nix_tm_node_reg_conf(nix, node); if (rc) goto exit; @@ -301,6 +317,130 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node) } int +nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + enum roc_nix_tm_tree tree = nix->tm_tree; + struct mbox *mbox = (&nix->dev)->mbox; + struct nix_txschq_config *req = NULL; + struct nix_tm_node_list *list; + struct nix_tm_node *node; + uint8_t k = 0; + uint16_t link; + int rc = 0; + + list = nix_tm_node_list(nix, tree); + link = nix->tx_link; + + TAILQ_FOREACH(node, list, node) { + if (node->hw_lvl != nix->tm_link_cfg_lvl) + continue; + + if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa) + continue; + + if (!req) { + req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = nix->tm_link_cfg_lvl; + k = 0; + } + + req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link); + req->regval[k] = enable ? BIT_ULL(13) : 0; + req->regval_mask[k] = ~BIT_ULL(13); + k++; + + if (k >= MAX_REGS_PER_MBOX_MSG) { + req->num_regs = k; + rc = mbox_process(mbox); + if (rc) + goto err; + req = NULL; + } + } + + if (req) { + req->num_regs = k; + rc = mbox_process(mbox); + if (rc) + goto err; + } + + return 0; +err: + plt_err("Failed to %s bp on link %u, rc=%d(%s)", + enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc)); + return rc; +} + +int +nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct nix_txschq_config *req = NULL, *rsp; + enum roc_nix_tm_tree tree = nix->tm_tree; + struct mbox *mbox = (&nix->dev)->mbox; + struct nix_tm_node_list *list; + struct nix_tm_node *node; + bool found = false; + uint8_t enable = 1; + uint8_t k = 0, i; + uint16_t link; + int rc = 0; + + list = nix_tm_node_list(nix, tree); + link = nix->tx_link; + + TAILQ_FOREACH(node, list, node) { + if (node->hw_lvl != nix->tm_link_cfg_lvl) + continue; + + if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa) + continue; + + found = true; + if (!req) { + req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->read = 1; + req->lvl = nix->tm_link_cfg_lvl; + k = 0; + } + + req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link); + k++; + + if (k >= MAX_REGS_PER_MBOX_MSG) { + req->num_regs = k; + rc = mbox_process_msg(mbox, (void **)&rsp); + if (rc || rsp->num_regs != k) + goto err; + req = NULL; + + /* Report it as enabled only if enabled or all */ + for (i = 0; i < k; i++) + enable &= !!(rsp->regval[i] & BIT_ULL(13)); + } + } + + if (req) { + req->num_regs = k; + rc = mbox_process(mbox); + if (rc) + goto err; + /* Report it as enabled only if enabled or all */ + for (i = 0; i < k; i++) + enable &= !!(rsp->regval[i] & BIT_ULL(13)); + } + + *is_enabled = found ? !!enable : false; + return 0; +err: + plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc, + roc_error_msg_get(rc)); + return rc; +} + +int nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable) { struct mbox *mbox = (&nix->dev)->mbox; @@ -461,6 +601,13 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq) } } + /* Disable backpressure */ + rc = nix_tm_bp_config_set(roc_nix, false); + if (rc) { + plt_err("Failed to disable backpressure for flush, rc=%d", rc); + return rc; + } + /* Disable smq xoff for case it was enabled earlier */ rc = nix_tm_smq_xoff(nix, node->parent, false); if (rc) { @@ -580,6 +727,16 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq) } } + if (!nix->rx_pause) + return 0; + + /* Restore backpressure */ + rc = nix_tm_bp_config_set(roc_nix, true); + if (rc) { + plt_err("Failed to restore backpressure, rc=%d", rc); + return rc; + } + return 0; } diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c index eee80d5..6a417c0 100644 --- a/drivers/common/cnxk/roc_nix_tm_ops.c +++ b/drivers/common/cnxk/roc_nix_tm_ops.c @@ -452,6 +452,15 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix) } } + /* Disable backpressure, it will be enabled back if needed on + * hierarchy enable + */ + rc = nix_tm_bp_config_set(roc_nix, false); + if (rc) { + plt_err("Failed to disable backpressure for flush, rc=%d", rc); + goto cleanup; + } + /* Flush all tx queues */ for (i = 0; i < sq_cnt; i++) { sq = nix->sqs[i]; diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c index a135454..543adf9 100644 --- a/drivers/common/cnxk/roc_nix_tm_utils.c +++ b/drivers/common/cnxk/roc_nix_tm_utils.c @@ -522,6 +522,10 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) { reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); regval[k] = BIT_ULL(12) | relchan; + /* Enable BP if node is BP capable and rx_pause is set + */ + if (nix->rx_pause && node->bp_capa) + regval[k] |= BIT_ULL(13); k++; } @@ -541,6 +545,10 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) { reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link); regval[k] = BIT_ULL(12) | relchan; + /* Enable BP if node is BP capable and rx_pause is set + */ + if (nix->rx_pause && node->bp_capa) + regval[k] |= BIT_ULL(13); k++; } diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index db54468..e9bebfe 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -1199,7 +1199,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) } /* Init flow control configuration */ - fc_cfg.cq_cfg_valid = false; + fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG; fc_cfg.rxchan_cfg.enable = true; rc = roc_nix_fc_config_set(nix, &fc_cfg); if (rc) { diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index 6746430..baa474f 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -227,7 +227,7 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable) memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); cq = &dev->cqs[qid]; - fc_cfg.cq_cfg_valid = true; + fc_cfg.type = ROC_NIX_FC_CQ_CFG; fc_cfg.cq_cfg.enable = enable; fc_cfg.cq_cfg.rq = qid; fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;