From patchwork Tue Jan 11 08:18:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sunil Kumar Kori X-Patchwork-Id: 105722 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0BDBCA034D; Tue, 11 Jan 2022 09:18:48 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3AE0441203; Tue, 11 Jan 2022 09:18:43 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id AE783411AE for ; Tue, 11 Jan 2022 09:18:41 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 20B7bj1F030771; Tue, 11 Jan 2022 00:18:38 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=oXnj3r8F1DauP9N1GMA+9air+JWrnLDDTWTOSdh/h70=; b=CqWtapof5YyL2AJzN7xJA/wv54kRczKTRnLWgbNkc895UrpVd5oaKa7S/ErBzbpLyoh3 U2Pcpqt89Sh+oHvX1Yx15OL57iX9fmZ3WXUEWIOo7XC9MS/TKwK3O0GqQTj9LLZe2lXT 3XAbefhM4u8m98p4MjKQW/QdNP0ORgcLs9nCJ0Le46u4mCax9Csr78DobkLjRLF1/vol tMtrzCTG0dCHZZwJTBiz39Rk77plBNZcbjADMq+FmhDk38mBpMl36AWaQ76GvBalvO6q pYAbvMlsmtR4ZQMyaAkg6uuL9EXj70/uCgakjkw3lNQg5A9qajgW0t/3mW7J7bk+6Yew hQ== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3dgw0dssy3-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 11 Jan 2022 00:18:38 -0800 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 11 Jan 2022 00:18:36 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Tue, 11 Jan 2022 00:18:36 -0800 Received: from localhost.localdomain (unknown [10.28.34.25]) by maili.marvell.com (Postfix) with ESMTP id 85B9D3F7083; Tue, 11 Jan 2022 00:18:34 -0800 (PST) From: To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Ray Kinsella CC: Subject: [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API Date: Tue, 11 Jan 2022 13:48:30 +0530 Message-ID: <20220111081831.881374-1-skori@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220109111130.751933-2-skori@marvell.com> References: <20220109111130.751933-2-skori@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: 0hmfcKITLxlGVajLZFhYxlyJczdm4Gch X-Proofpoint-ORIG-GUID: 0hmfcKITLxlGVajLZFhYxlyJczdm4Gch X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.790,Hydra:6.0.425,FMLib:17.11.62.513 definitions=2022-01-11_03,2022-01-10_02,2021-12-02_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Sunil Kumar Kori CNXK platforms support priority flow control(802.1qbb) to pause respective traffic per class on that link. Patch adds RoC interface to configure priority flow control on MAC block i.e. CGX on cn9k and RPM on cn10k. Signed-off-by: Sunil Kumar Kori --- v2: - fix RoC API naming convention. drivers/common/cnxk/roc_mbox.h | 17 +++ drivers/common/cnxk/roc_nix.h | 21 ++++ drivers/common/cnxk/roc_nix_fc.c | 95 ++++++++++++++-- drivers/common/cnxk/roc_nix_priv.h | 4 +- drivers/common/cnxk/roc_nix_tm.c | 162 ++++++++++++++++++++++++++- drivers/common/cnxk/roc_nix_tm_ops.c | 14 ++- drivers/common/cnxk/version.map | 4 + 7 files changed, 298 insertions(+), 19 deletions(-) diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h index b63fe108c9..12a5922229 100644 --- a/drivers/common/cnxk/roc_mbox.h +++ b/drivers/common/cnxk/roc_mbox.h @@ -95,6 +95,8 @@ struct mbox_msghdr { msg_rsp) \ M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \ M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ + M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \ + cgx_pfc_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, npa_lf_alloc_req, \ npa_lf_alloc_rsp) \ @@ -540,6 +542,19 @@ struct cgx_pause_frm_cfg { uint8_t __io tx_pause; }; +struct cgx_pfc_cfg { + struct mbox_msghdr hdr; + uint8_t __io rx_pause; + uint8_t __io tx_pause; + uint16_t __io pfc_en; /* bitmap indicating enabled traffic classes */ +}; + +struct cgx_pfc_rsp { + struct mbox_msghdr hdr; + uint8_t __io rx_pause; + uint8_t __io tx_pause; +}; + struct sfp_eeprom_s { #define SFP_EEPROM_SIZE 256 uint16_t __io sff_id; @@ -1115,6 +1130,8 @@ struct nix_bp_cfg_req { * so maximum 64 channels are possible. */ #define NIX_MAX_CHAN 64 +#define NIX_CGX_MAX_CHAN 16 +#define NIX_LBK_MAX_CHAN NIX_MAX_CHAN struct nix_bp_cfg_rsp { struct mbox_msghdr hdr; /* Channel and bpid mapping */ diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h index 69a5e8e7b4..e05b7b7dd8 100644 --- a/drivers/common/cnxk/roc_nix.h +++ b/drivers/common/cnxk/roc_nix.h @@ -165,16 +165,27 @@ struct roc_nix_fc_cfg { struct { uint32_t rq; + uint16_t tc; uint16_t cq_drop; bool enable; } cq_cfg; struct { + uint32_t sq; + uint16_t tc; bool enable; } tm_cfg; }; }; +struct roc_nix_pfc_cfg { + enum roc_nix_fc_mode mode; + /* For SET, tc must be [0, 15]. + * For GET, TC will represent bitmap + */ + uint16_t tc; +}; + struct roc_nix_eeprom_info { #define ROC_NIX_EEPROM_SIZE 256 uint16_t sff_id; @@ -478,6 +489,7 @@ void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix); enum roc_nix_tm_tree { ROC_NIX_TM_DEFAULT = 0, ROC_NIX_TM_RLIMIT, + ROC_NIX_TM_PFC, ROC_NIX_TM_USER, ROC_NIX_TM_TREE_MAX, }; @@ -624,6 +636,7 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node, int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix); int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl); int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix); +int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix); bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix); int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix); @@ -736,6 +749,14 @@ int __roc_api roc_nix_fc_config_get(struct roc_nix *roc_nix, int __roc_api roc_nix_fc_mode_set(struct roc_nix *roc_nix, enum roc_nix_fc_mode mode); +int __roc_api roc_nix_pfc_mode_set(struct roc_nix *roc_nix, + struct roc_nix_pfc_cfg *pfc_cfg); + +int __roc_api roc_nix_pfc_mode_get(struct roc_nix *roc_nix, + struct roc_nix_pfc_cfg *pfc_cfg); + +uint16_t __roc_api roc_nix_chan_count_get(struct roc_nix *roc_nix); + enum roc_nix_fc_mode __roc_api roc_nix_fc_mode_get(struct roc_nix *roc_nix); void __roc_api rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c index ca29cd2bf9..95f466242a 100644 --- a/drivers/common/cnxk/roc_nix_fc.c +++ b/drivers/common/cnxk/roc_nix_fc.c @@ -36,7 +36,7 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable) struct mbox *mbox = get_mbox(roc_nix); struct nix_bp_cfg_req *req; struct nix_bp_cfg_rsp *rsp; - int rc = -ENOSPC; + int rc = -ENOSPC, i; if (roc_nix_is_sdp(roc_nix)) return 0; @@ -45,22 +45,28 @@ nix_fc_rxchan_bpid_set(struct roc_nix *roc_nix, bool enable) req = mbox_alloc_msg_nix_bp_enable(mbox); if (req == NULL) return rc; + req->chan_base = 0; - req->chan_cnt = 1; - req->bpid_per_chan = 0; + if (roc_nix_is_lbk(roc_nix)) + req->chan_cnt = NIX_LBK_MAX_CHAN; + else + req->chan_cnt = NIX_CGX_MAX_CHAN; + + req->bpid_per_chan = true; rc = mbox_process_msg(mbox, (void *)&rsp); if (rc || (req->chan_cnt != rsp->chan_cnt)) goto exit; - nix->bpid[0] = rsp->chan_bpid[0]; nix->chan_cnt = rsp->chan_cnt; + for (i = 0; i < rsp->chan_cnt; i++) + nix->bpid[i] = rsp->chan_bpid[i] & 0x1FF; } else { req = mbox_alloc_msg_nix_bp_disable(mbox); if (req == NULL) return rc; req->chan_base = 0; - req->chan_cnt = 1; + req->chan_cnt = nix->chan_cnt; rc = mbox_process(mbox); if (rc) @@ -152,7 +158,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) aq->op = NIX_AQ_INSTOP_WRITE; if (fc_cfg->cq_cfg.enable) { - aq->cq.bpid = nix->bpid[0]; + aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc]; aq->cq_mask.bpid = ~(aq->cq_mask.bpid); aq->cq.bp = fc_cfg->cq_cfg.cq_drop; aq->cq_mask.bp = ~(aq->cq_mask.bp); @@ -169,7 +175,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) aq->op = NIX_AQ_INSTOP_WRITE; if (fc_cfg->cq_cfg.enable) { - aq->cq.bpid = nix->bpid[0]; + aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc]; aq->cq_mask.bpid = ~(aq->cq_mask.bpid); aq->cq.bp = fc_cfg->cq_cfg.cq_drop; aq->cq_mask.bp = ~(aq->cq_mask.bp); @@ -210,7 +216,9 @@ roc_nix_fc_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg) return nix_fc_rxchan_bpid_set(roc_nix, fc_cfg->rxchan_cfg.enable); else if (fc_cfg->type == ROC_NIX_FC_TM_CFG) - return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.enable); + return nix_tm_bp_config_set(roc_nix, fc_cfg->tm_cfg.sq, + fc_cfg->tm_cfg.tc, + fc_cfg->tm_cfg.enable); return -EINVAL; } @@ -391,3 +399,74 @@ rox_nix_fc_npa_bp_cfg(struct roc_nix *roc_nix, uint64_t pool_id, uint8_t ena, mbox_process(mbox); } + +int +roc_nix_pfc_mode_set(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct mbox *mbox = get_mbox(roc_nix); + uint8_t tx_pause, rx_pause; + struct cgx_pfc_cfg *req; + struct cgx_pfc_rsp *rsp; + int rc = -ENOSPC; + + if (roc_nix_is_lbk(roc_nix)) + return NIX_ERR_OP_NOTSUP; + + rx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) || + (pfc_cfg->mode == ROC_NIX_FC_RX); + tx_pause = (pfc_cfg->mode == ROC_NIX_FC_FULL) || + (pfc_cfg->mode == ROC_NIX_FC_TX); + + req = mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(mbox); + if (req == NULL) + goto exit; + + req->pfc_en = BIT(pfc_cfg->tc); + req->rx_pause = rx_pause; + req->tx_pause = tx_pause; + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + nix->rx_pause = rsp->rx_pause; + nix->tx_pause = rsp->tx_pause; + if (rsp->tx_pause) + nix->cev |= BIT(pfc_cfg->tc); + else + nix->cev &= ~BIT(pfc_cfg->tc); + +exit: + return rc; +} + +int +roc_nix_pfc_mode_get(struct roc_nix *roc_nix, struct roc_nix_pfc_cfg *pfc_cfg) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + + if (roc_nix_is_lbk(roc_nix)) + return NIX_ERR_OP_NOTSUP; + + pfc_cfg->tc = nix->cev; + + if (nix->rx_pause && nix->tx_pause) + pfc_cfg->mode = ROC_NIX_FC_FULL; + else if (nix->rx_pause) + pfc_cfg->mode = ROC_NIX_FC_RX; + else if (nix->tx_pause) + pfc_cfg->mode = ROC_NIX_FC_TX; + else + pfc_cfg->mode = ROC_NIX_FC_NONE; + + return 0; +} + +uint16_t +roc_nix_chan_count_get(struct roc_nix *roc_nix) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + + return nix->chan_cnt; +} diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h index 04575af295..f5bb09dc3b 100644 --- a/drivers/common/cnxk/roc_nix_priv.h +++ b/drivers/common/cnxk/roc_nix_priv.h @@ -139,6 +139,7 @@ struct nix { uint16_t msixoff; uint8_t rx_pause; uint8_t tx_pause; + uint16_t cev; uint64_t rx_cfg; struct dev dev; uint16_t cints; @@ -376,7 +377,8 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, bool ena); int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable); int nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled); -int nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable); +int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc, + bool enable); /* * TM priv utils. diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c index b3d8ebd3c2..0775bca7b0 100644 --- a/drivers/common/cnxk/roc_nix_tm.c +++ b/drivers/common/cnxk/roc_nix_tm.c @@ -121,7 +121,7 @@ nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree) if (is_pf_or_lbk && !skip_bp && node->hw_lvl == nix->tm_link_cfg_lvl) { node->bp_capa = 1; - skip_bp = true; + skip_bp = false; } rc = nix_tm_node_reg_conf(nix, node); @@ -317,18 +317,31 @@ nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node) } int -nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable) +nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc, + bool enable) { struct nix *nix = roc_nix_to_nix_priv(roc_nix); enum roc_nix_tm_tree tree = nix->tm_tree; struct mbox *mbox = (&nix->dev)->mbox; struct nix_txschq_config *req = NULL; struct nix_tm_node_list *list; + struct nix_tm_node *sq_node; + struct nix_tm_node *parent; struct nix_tm_node *node; uint8_t k = 0; uint16_t link; int rc = 0; + sq_node = nix_tm_node_search(nix, sq, nix->tm_tree); + parent = sq_node->parent; + while (parent) { + if (parent->lvl == ROC_TM_LVL_SCH2) + break; + + parent = parent->parent; + } + + list = nix_tm_node_list(nix, tree); link = nix->tx_link; @@ -339,6 +352,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable) if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa) continue; + if (node->hw_id != parent->hw_id) + continue; + if (!req) { req = mbox_alloc_msg_nix_txschq_cfg(mbox); req->lvl = nix->tm_link_cfg_lvl; @@ -346,8 +362,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, bool enable) } req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link); - req->regval[k] = enable ? BIT_ULL(13) : 0; - req->regval_mask[k] = ~BIT_ULL(13); + req->regval[k] = enable ? tc : 0; + req->regval[k] |= enable ? BIT_ULL(13) : 0; + req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0)); k++; if (k >= MAX_REGS_PER_MBOX_MSG) { @@ -602,7 +619,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq) } /* Disable backpressure */ - rc = nix_tm_bp_config_set(roc_nix, false); + rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false); if (rc) { plt_err("Failed to disable backpressure for flush, rc=%d", rc); return rc; @@ -731,7 +748,7 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq) return 0; /* Restore backpressure */ - rc = nix_tm_bp_config_set(roc_nix, true); + rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true); if (rc) { plt_err("Failed to restore backpressure, rc=%d", rc); return rc; @@ -1420,6 +1437,139 @@ roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix) return rc; } +int +roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + uint32_t nonleaf_id = nix->nb_tx_queues; + struct nix_tm_node *node = NULL; + uint8_t leaf_lvl, lvl, lvl_end; + uint32_t tl2_node_id; + uint32_t parent, i; + int rc = -ENOMEM; + + parent = ROC_NIX_TM_NODE_ID_INVALID; + lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 : + ROC_TM_LVL_SCH2); + leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE : + ROC_TM_LVL_SCH4); + + /* TL1 node */ + node = nix_tm_node_alloc(); + if (!node) + goto error; + + node->id = nonleaf_id; + node->parent_id = parent; + node->priority = 0; + node->weight = NIX_TM_DFLT_RR_WT; + node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE; + node->lvl = ROC_TM_LVL_ROOT; + node->tree = ROC_NIX_TM_PFC; + + rc = nix_tm_node_add(roc_nix, node); + if (rc) + goto error; + + parent = nonleaf_id; + nonleaf_id++; + + /* TL2 node */ + rc = -ENOMEM; + node = nix_tm_node_alloc(); + if (!node) + goto error; + + node->id = nonleaf_id; + node->parent_id = parent; + node->priority = 0; + node->weight = NIX_TM_DFLT_RR_WT; + node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE; + node->lvl = ROC_TM_LVL_SCH1; + node->tree = ROC_NIX_TM_PFC; + + rc = nix_tm_node_add(roc_nix, node); + if (rc) + goto error; + + tl2_node_id = nonleaf_id; + nonleaf_id++; + + for (i = 0; i < nix->nb_tx_queues; i++) { + parent = tl2_node_id; + for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) { + rc = -ENOMEM; + node = nix_tm_node_alloc(); + if (!node) + goto error; + + node->id = nonleaf_id; + node->parent_id = parent; + node->priority = 0; + node->weight = NIX_TM_DFLT_RR_WT; + node->shaper_profile_id = + ROC_NIX_TM_SHAPER_PROFILE_NONE; + node->lvl = lvl; + node->tree = ROC_NIX_TM_PFC; + + rc = nix_tm_node_add(roc_nix, node); + if (rc) + goto error; + + parent = nonleaf_id; + nonleaf_id++; + } + + /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 + * otherwise + */ + lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : + ROC_TM_LVL_SCH3); + + rc = -ENOMEM; + node = nix_tm_node_alloc(); + if (!node) + goto error; + + node->id = nonleaf_id; + node->parent_id = parent; + node->priority = 0; + node->weight = NIX_TM_DFLT_RR_WT; + node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE; + node->lvl = lvl; + node->tree = ROC_NIX_TM_PFC; + + rc = nix_tm_node_add(roc_nix, node); + if (rc) + goto error; + + parent = nonleaf_id; + nonleaf_id++; + + rc = -ENOMEM; + node = nix_tm_node_alloc(); + if (!node) + goto error; + + node->id = i; + node->parent_id = parent; + node->priority = 0; + node->weight = NIX_TM_DFLT_RR_WT; + node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE; + node->lvl = leaf_lvl; + node->tree = ROC_NIX_TM_PFC; + + rc = nix_tm_node_add(roc_nix, node); + if (rc) + goto error; + } + + return 0; +error: + nix_tm_node_free(node); + return rc; +} + int nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only) { diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c index 3257fa67c7..8aa2996956 100644 --- a/drivers/common/cnxk/roc_nix_tm_ops.c +++ b/drivers/common/cnxk/roc_nix_tm_ops.c @@ -464,10 +464,16 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix) /* Disable backpressure, it will be enabled back if needed on * hierarchy enable */ - rc = nix_tm_bp_config_set(roc_nix, false); - if (rc) { - plt_err("Failed to disable backpressure for flush, rc=%d", rc); - goto cleanup; + for (i = 0; i < sq_cnt; i++) { + sq = nix->sqs[i]; + if (!sq) + continue; + + rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false); + if (rc) { + plt_err("Failed to disable backpressure, rc=%d", rc); + goto cleanup; + } } /* Flush all tx queues */ diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 07c6720f0c..cee33c40cc 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -105,6 +105,7 @@ INTERNAL { roc_nix_bpf_stats_reset; roc_nix_bpf_stats_to_idx; roc_nix_bpf_timeunit_get; + roc_nix_chan_count_get; roc_nix_cq_dump; roc_nix_cq_fini; roc_nix_cq_init; @@ -195,6 +196,8 @@ INTERNAL { roc_nix_npc_promisc_ena_dis; roc_nix_npc_rx_ena_dis; roc_nix_npc_mcast_config; + roc_nix_pfc_mode_set; + roc_nix_pfc_mode_get; roc_nix_ptp_clock_read; roc_nix_ptp_info_cb_register; roc_nix_ptp_info_cb_unregister; @@ -259,6 +262,7 @@ INTERNAL { roc_nix_tm_node_stats_get; roc_nix_tm_node_suspend_resume; roc_nix_tm_prealloc_res; + roc_nix_tm_pfc_prepare_tree; roc_nix_tm_prepare_rate_limited_tree; roc_nix_tm_rlimit_sq; roc_nix_tm_root_has_sp; From patchwork Tue Jan 11 08:18:31 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sunil Kumar Kori X-Patchwork-Id: 105721 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EA6E6A034D; Tue, 11 Jan 2022 09:18:42 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4A158411CB; Tue, 11 Jan 2022 09:18:42 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 0FEFB411AE for ; Tue, 11 Jan 2022 09:18:40 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 20B2PVB5009248 for ; Tue, 11 Jan 2022 00:18:40 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=aZxWIc8z6a1uAD8/bF93hh1KpNe+s7IjMzaVDsxPSg0=; b=TOPaIUuvXC5/nBGxlpO6HKW7eNu/19dVJNzYp7Fr1UyMJo8wbIbL2tNSee06wu0savwc GEr2brHZ/hu2Hs71QYezNHVNInDt8itjV4R2/GM8MNdWXiP9f2arUjh4pzTPFoM5wKnl p7k0RXcl6bouDThc7umHtRHy72VYGCtRi+8AhanEfriJE9DWfLakfYgeQxW3pc5r7Sui tf0/YkXRYsK5YWEztso2WPfHL9YYVRgU9r36hE6xdaB2zvY+NTxjMMLXM241JA58rNYm ek6hbtBOHwTvN09zc/I2wbdZC+2ib4D5/JgY5Y9ugLJTnWqawCgiOw0yGqqIJ49ltmu/ mg== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3dgshktncs-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Tue, 11 Jan 2022 00:18:39 -0800 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 11 Jan 2022 00:18:38 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Tue, 11 Jan 2022 00:18:38 -0800 Received: from localhost.localdomain (unknown [10.28.34.25]) by maili.marvell.com (Postfix) with ESMTP id 012683F7093; Tue, 11 Jan 2022 00:18:36 -0800 (PST) From: To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao CC: Subject: [PATCH v2 2/2] net/cnxk: support priority flow control Date: Tue, 11 Jan 2022 13:48:31 +0530 Message-ID: <20220111081831.881374-2-skori@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220111081831.881374-1-skori@marvell.com> References: <20220109111130.751933-2-skori@marvell.com> <20220111081831.881374-1-skori@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: 4eNCzi4wBFUvABEXct7cNJjPCpfL_p5C X-Proofpoint-GUID: 4eNCzi4wBFUvABEXct7cNJjPCpfL_p5C X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.790,Hydra:6.0.425,FMLib:17.11.62.513 definitions=2022-01-11_03,2022-01-10_02,2021-12-02_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Sunil Kumar Kori Patch implements priority flow control support for CNXK platforms. Signed-off-by: Sunil Kumar Kori --- v2: - fix application restart issue. drivers/net/cnxk/cnxk_ethdev.c | 27 +++++ drivers/net/cnxk/cnxk_ethdev.h | 16 +++ drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++-- 3 files changed, 211 insertions(+), 9 deletions(-) diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 74f625553d..e2690f0640 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto cq_fini; } + /* Initialize TC to SQ mapping as invalid */ + memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map)); /* * Restore queue config when reconfigure followed by * reconfigure and no queue configure invoked from application case. @@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = { .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get, .flow_ctrl_get = cnxk_nix_flow_ctrl_get, .flow_ctrl_set = cnxk_nix_flow_ctrl_set, + .priority_flow_ctrl_queue_set = cnxk_nix_priority_flow_ctrl_queue_set, .dev_set_link_up = cnxk_nix_set_link_up, .dev_set_link_down = cnxk_nix_set_link_down, .get_module_info = cnxk_nix_get_module_info, @@ -1721,6 +1724,8 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); const struct eth_dev_ops *dev_ops = eth_dev->dev_ops; + struct rte_eth_pfc_queue_conf pfc_conf = {0}; + struct rte_eth_fc_conf fc_conf = {0}; struct roc_nix *nix = &dev->nix; int rc, i; @@ -1736,6 +1741,28 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) roc_nix_npc_rx_ena_dis(nix, false); + /* Restore 802.3 Flow control configuration */ + fc_conf.mode = RTE_ETH_FC_NONE; + rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf); + + pfc_conf.mode = RTE_ETH_FC_NONE; + for (i = 0; i < 16; i++) { + if (dev->pfc_tc_sq_map[i] != 0xFFFF) { + pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i]; + pfc_conf.rx_pause.tc = i; + pfc_conf.tx_pause.rx_qid = i; + pfc_conf.tx_pause.tc = i; + rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev, + &pfc_conf); + if (rc) + plt_err("Failed to reset PFC. error code(%d)", + rc); + } + } + + fc_conf.mode = RTE_ETH_FC_FULL; + rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf); + /* Disable and free rte_meter entries */ nix_meter_fini(dev); diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index 5bfda3d815..28fb19307a 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -143,6 +143,16 @@ struct cnxk_fc_cfg { uint8_t tx_pause; }; +struct cnxk_pfc_cfg { + struct cnxk_fc_cfg fc_cfg; + uint16_t class_en; + uint16_t pause_time; + uint8_t rx_tc; + uint8_t rx_qid; + uint8_t tx_tc; + uint8_t tx_qid; +}; + struct cnxk_eth_qconf { union { struct rte_eth_txconf tx; @@ -366,6 +376,8 @@ struct cnxk_eth_dev { struct cnxk_eth_qconf *rx_qconf; /* Flow control configuration */ + uint16_t pfc_tc_sq_map[16]; + struct cnxk_pfc_cfg pfc_cfg; struct cnxk_fc_cfg fc_cfg; /* PTP Counters */ @@ -467,6 +479,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf); int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf); +int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev, + struct rte_eth_pfc_queue_conf *pfc_conf); int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev); int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev); int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev, @@ -606,6 +620,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, uint32_t *prev_id, uint32_t *next_id, struct cnxk_mtr_policy_node *policy, int *tree_level); +int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, + struct cnxk_pfc_cfg *conf); /* Inlines */ static __rte_always_inline uint64_t diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index ce5f1f7240..b3173c343e 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; + + devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix); return 0; } @@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable) cq = &dev->cqs[qid]; fc_cfg.type = ROC_NIX_FC_CQ_CFG; fc_cfg.cq_cfg.enable = enable; + /* Map all CQs to last channel */ + fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1; fc_cfg.cq_cfg.rq = qid; fc_cfg.cq_cfg.cq_drop = cq->drop_thresh; @@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_dev_data *data = eth_dev->data; struct cnxk_fc_cfg *fc = &dev->fc_cfg; struct roc_nix *nix = &dev->nix; + struct cnxk_eth_rxq_sp *rxq; + struct cnxk_eth_txq_sp *txq; uint8_t rx_pause, tx_pause; int rc, i; @@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, } for (i = 0; i < data->nb_rx_queues; i++) { - rc = nix_fc_cq_config_set(dev, i, tx_pause); + struct roc_nix_fc_cfg fc_cfg; + + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + rxq = ((struct cnxk_eth_rxq_sp *) + data->rx_queues[i]) - 1; + rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause); if (rc) return rc; } @@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, /* Check if RX pause frame is enabled or not */ if (fc->rx_pause ^ rx_pause) { - struct roc_nix_fc_cfg fc_cfg; - - memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); - fc_cfg.type = ROC_NIX_FC_TM_CFG; - fc_cfg.tm_cfg.enable = !!rx_pause; - rc = roc_nix_fc_config_set(nix, &fc_cfg); - if (rc) - return rc; + for (i = 0; i < data->nb_tx_queues; i++) { + struct roc_nix_fc_cfg fc_cfg; + + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + txq = ((struct cnxk_eth_txq_sp *) + data->tx_queues[i]) - 1; + fc_cfg.type = ROC_NIX_FC_TM_CFG; + fc_cfg.tm_cfg.sq = txq->qid; + fc_cfg.tm_cfg.enable = !!rx_pause; + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc) + return rc; + } } rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]); @@ -311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, return rc; } +int +cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev, + struct rte_eth_pfc_queue_conf *pfc_conf) +{ + struct cnxk_pfc_cfg conf = {0}; + int rc; + + conf.fc_cfg.mode = pfc_conf->mode; + + conf.pause_time = pfc_conf->tx_pause.pause_time; + conf.rx_tc = pfc_conf->tx_pause.tc; + conf.rx_qid = pfc_conf->tx_pause.rx_qid; + + conf.tx_tc = pfc_conf->rx_pause.tc; + conf.tx_qid = pfc_conf->rx_pause.tx_qid; + + rc = nix_priority_flow_ctrl_configure(eth_dev, &conf); + if (rc) + return rc; + + return rc; +} + int cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev, const struct rte_flow_ops **ops) @@ -911,3 +950,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev, return 0; } + +int +nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, + struct cnxk_pfc_cfg *conf) +{ + enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX, + ROC_NIX_FC_TX, ROC_NIX_FC_FULL}; + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg; + struct roc_nix *nix = &dev->nix; + struct roc_nix_pfc_cfg pfc_cfg; + struct roc_nix_fc_cfg fc_cfg; + struct cnxk_eth_rxq_sp *rxq; + struct cnxk_eth_txq_sp *txq; + uint8_t rx_pause, tx_pause; + enum rte_eth_fc_mode mode; + struct roc_nix_cq *cq; + struct roc_nix_sq *sq; + int rc; + + if (roc_nix_is_vf_or_sdp(nix)) { + plt_err("Prio flow ctrl config is not allowed on VF and SDP"); + return -ENOTSUP; + } + + if (roc_model_is_cn96_ax() && data->dev_started) { + /* On Ax, CQ should be in disabled state + * while setting flow control configuration. + */ + plt_info("Stop the port=%d for setting flow control", + data->port_id); + return 0; + } + + if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF && + dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) { + plt_err("Same TC can not be configured on multiple SQs"); + return -ENOTSUP; + } + + mode = conf->fc_cfg.mode; + rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE); + tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE); + + /* Configure CQs */ + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1; + cq = &dev->cqs[rxq->qid]; + fc_cfg.type = ROC_NIX_FC_CQ_CFG; + fc_cfg.cq_cfg.tc = conf->rx_tc; + fc_cfg.cq_cfg.enable = !!tx_pause; + fc_cfg.cq_cfg.rq = cq->qid; + fc_cfg.cq_cfg.cq_drop = cq->drop_thresh; + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc) + goto exit; + + /* Check if RX pause frame is enabled or not */ + if (pfc->fc_cfg.rx_pause ^ rx_pause) { + if (conf->tx_qid >= eth_dev->data->nb_tx_queues) + goto exit; + + if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_PFC) && + eth_dev->data->nb_tx_queues > 1) { + /* + * Disabled xmit will be enabled when + * new topology is available. + */ + rc = roc_nix_tm_hierarchy_disable(nix); + if (rc) + goto exit; + + rc = roc_nix_tm_pfc_prepare_tree(nix); + if (rc) + goto exit; + + rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC, + true); + if (rc) + goto exit; + } + } + + txq = ((struct cnxk_eth_txq_sp *)data->rx_queues[conf->tx_qid]) - 1; + sq = &dev->sqs[txq->qid]; + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + fc_cfg.type = ROC_NIX_FC_TM_CFG; + fc_cfg.tm_cfg.sq = sq->qid; + fc_cfg.tm_cfg.tc = conf->tx_tc; + fc_cfg.tm_cfg.enable = !!rx_pause; + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc) + return rc; + + dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid; + + /* Configure MAC block */ + if (tx_pause) + pfc->class_en |= BIT(conf->rx_tc); + else + pfc->class_en &= ~BIT(conf->rx_tc); + + if (pfc->class_en) + mode = RTE_ETH_FC_FULL; + + memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg)); + pfc_cfg.mode = mode_map[mode]; + pfc_cfg.tc = conf->rx_tc; + rc = roc_nix_pfc_mode_set(nix, &pfc_cfg); + if (rc) + return rc; + + pfc->fc_cfg.rx_pause = rx_pause; + pfc->fc_cfg.tx_pause = tx_pause; + pfc->fc_cfg.mode = mode; + +exit: + return rc; +}