From patchwork Fri Mar 3 08:10:06 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 124765 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0E69241DC3; Fri, 3 Mar 2023 09:11:43 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7753342D37; Fri, 3 Mar 2023 09:11:13 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id A4C0642C76 for ; Fri, 3 Mar 2023 09:11:11 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id 3234WuWW024918 for ; Fri, 3 Mar 2023 00:11:11 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=TmDaYmpoKff8n+r9HFlFYXTiz5Ia2ZuW1kWVxEmfUJA=; b=IgqgmzZfj1p2+FW7hY4XG27Aza1CPamf62ZAs09nw/ykBhRr23CXVINd7N7+w3SJKeCd 07obJgaZtSf3sqRs//Q4wFSyM01trHob+qFHzjvKJqvRDp0DxFOavSkxoKFRjSAzvTKJ TzyvX6x3AWKrceEa0wPYRwFrUN6KrF11XJQuk683fyd9ovbjzdA8bpuZrzaUaMNkGu6B I22aneC+08+zYdliEvIEiuqOSZ7ujtMVWW4YRblp9j+y6z2Ns/8vr8zkL0+gldszRCCq oq0DI7YDqZaIofCXPs8b3AyYZ6QUcLfuVLS5L3UeURYDaWtF7PUP9dMB7TukrAg7E41Z Xw== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3p1wr9xbjk-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Fri, 03 Mar 2023 00:11:10 -0800 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.42; Fri, 3 Mar 2023 00:11:08 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.42 via Frontend Transport; Fri, 3 Mar 2023 00:11:08 -0800 Received: from hyd1588t430.caveonetworks.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id B16673F7053; Fri, 3 Mar 2023 00:11:06 -0800 (PST) From: Nithin Dabilpuram To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao CC: , Subject: [PATCH 08/15] net/cnxk: check flow control config per queue on dev start Date: Fri, 3 Mar 2023 13:40:06 +0530 Message-ID: <20230303081013.589868-8-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230303081013.589868-1-ndabilpuram@marvell.com> References: <20230303081013.589868-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: FfcZC8hYtcvHT8zeGmjBsjKqRy9PPe_V X-Proofpoint-GUID: FfcZC8hYtcvHT8zeGmjBsjKqRy9PPe_V X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.219,Aquarius:18.0.942,Hydra:6.0.573,FMLib:17.11.170.22 definitions=2023-03-03_01,2023-03-02_02,2023-02-09_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Check and enable/disable flow control config per queue on device start to handle cases like SSO enablement, TM changes etc. Modify flow control config get to get status per RQ/SQ. Also disallow changes to flow control config when device is in started state. Signed-off-by: Nithin Dabilpuram --- drivers/net/cnxk/cnxk_ethdev.c | 9 +- drivers/net/cnxk/cnxk_ethdev_ops.c | 198 ++++++++++++++++------------- 2 files changed, 113 insertions(+), 94 deletions(-) diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index e99335b117..d8ccd307a8 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -363,7 +363,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev) struct cnxk_fc_cfg *fc = &dev->fc_cfg; int rc; - if (roc_nix_is_vf_or_sdp(&dev->nix)) + if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) return 0; /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */ @@ -388,7 +388,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev) struct cnxk_fc_cfg *fc = &dev->fc_cfg; struct rte_eth_fc_conf fc_cfg = {0}; - if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) + if (roc_nix_is_sdp(&dev->nix)) + return 0; + + /* Don't do anything if PFC is enabled */ + if (dev->pfc_cfg.rx_pause_en || dev->pfc_cfg.tx_pause_en) return 0; fc_cfg.mode = fc->mode; @@ -481,7 +485,6 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, sq->qid = qid; sq->nb_desc = nb_desc; sq->max_sqe_sz = nix_sq_max_sqe_sz(dev); - sq->tc = ROC_NIX_PFC_CLASS_INVALID; if (nix->tx_compl_ena) { sq->cqid = sq->qid + dev->nb_rxq; diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index a6ab493626..5df7927d7b 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -205,12 +205,15 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); - enum rte_eth_fc_mode mode_map[] = { - RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE, - RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL - }; + enum rte_eth_fc_mode mode_map[2][2] = { + [0][0] = RTE_ETH_FC_NONE, + [0][1] = RTE_ETH_FC_TX_PAUSE, + [1][0] = RTE_ETH_FC_RX_PAUSE, + [1][1] = RTE_ETH_FC_FULL, + }; struct roc_nix *nix = &dev->nix; - int mode; + uint8_t rx_pause, tx_pause; + int mode, i; if (roc_nix_is_sdp(nix)) return 0; @@ -219,32 +222,25 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev, if (mode < 0) return mode; + rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX); + tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX); + + /* Report flow control as disabled even if one RQ/SQ has it disabled */ + for (i = 0; i < dev->nb_rxq; i++) { + if (dev->rqs[i].tc == ROC_NIX_PFC_CLASS_INVALID) + tx_pause = 0; + } + + for (i = 0; i < dev->nb_txq; i++) { + if (dev->sqs[i].tc == ROC_NIX_PFC_CLASS_INVALID) + rx_pause = 0; + } + memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); - fc_conf->mode = mode_map[mode]; + fc_conf->mode = mode_map[rx_pause][tx_pause]; return 0; } -static int -nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable) -{ - struct roc_nix *nix = &dev->nix; - struct roc_nix_fc_cfg fc_cfg; - struct roc_nix_cq *cq; - struct roc_nix_rq *rq; - - memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); - rq = &dev->rqs[qid]; - cq = &dev->cqs[qid]; - fc_cfg.type = ROC_NIX_FC_RQ_CFG; - fc_cfg.rq_cfg.enable = enable; - fc_cfg.rq_cfg.tc = 0; - fc_cfg.rq_cfg.rq = qid; - fc_cfg.rq_cfg.pool = rq->aura_handle; - fc_cfg.rq_cfg.cq_drop = cq->drop_thresh; - - return roc_nix_fc_config_set(nix, &fc_cfg); -} - int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) @@ -260,68 +256,90 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct cnxk_eth_rxq_sp *rxq; struct cnxk_eth_txq_sp *txq; uint8_t rx_pause, tx_pause; + struct roc_nix_sq *sq; + struct roc_nix_cq *cq; + struct roc_nix_rq *rq; + uint8_t tc; int rc, i; if (roc_nix_is_sdp(nix)) return 0; + if (dev->pfc_cfg.rx_pause_en || dev->pfc_cfg.tx_pause_en) { + plt_err("Disable PFC before configuring Flow Control"); + return -ENOTSUP; + } + if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time || fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) { plt_info("Only MODE configuration is supported"); return -EINVAL; } - - rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || - (fc_conf->mode == RTE_ETH_FC_RX_PAUSE); - tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || - (fc_conf->mode == RTE_ETH_FC_TX_PAUSE); - - if (fc_conf->mode == fc->mode) { - fc->rx_pause = rx_pause; - fc->tx_pause = tx_pause; - return 0; + /* Disallow flow control changes when device is in started state */ + if (data->dev_started) { + plt_info("Stop the port=%d for setting flow control", data->port_id); + return -EBUSY; } + rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || (fc_conf->mode == RTE_ETH_FC_RX_PAUSE); + tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || (fc_conf->mode == RTE_ETH_FC_TX_PAUSE); + /* Check if TX pause frame is already enabled or not */ - if (fc->tx_pause ^ tx_pause) { - if (roc_model_is_cn96_ax() && data->dev_started) { - /* On Ax, CQ should be in disabled state - * while setting flow control configuration. - */ - plt_info("Stop the port=%d for setting flow control", - data->port_id); - return 0; - } + tc = tx_pause ? 0 : ROC_NIX_PFC_CLASS_INVALID; + for (i = 0; i < data->nb_rx_queues; i++) { + struct roc_nix_fc_cfg fc_cfg; - for (i = 0; i < data->nb_rx_queues; i++) { - struct roc_nix_fc_cfg fc_cfg; + /* Skip if RQ does not exist */ + if (!data->rx_queues[i]) + continue; - memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); - rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) - - 1; - rxq->tx_pause = !!tx_pause; - rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause); - if (rc) - return rc; - } + rxq = cnxk_eth_rxq_to_sp(data->rx_queues[i]); + rq = &dev->rqs[rxq->qid]; + cq = &dev->cqs[rxq->qid]; + + /* Skip if RQ is in expected state */ + if (fc->tx_pause == tx_pause && rq->tc == tc) + continue; + + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + fc_cfg.type = ROC_NIX_FC_RQ_CFG; + fc_cfg.rq_cfg.enable = !!tx_pause; + fc_cfg.rq_cfg.tc = 0; + fc_cfg.rq_cfg.rq = rq->qid; + fc_cfg.rq_cfg.pool = rq->aura_handle; + fc_cfg.rq_cfg.cq_drop = cq->drop_thresh; + + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc) + return rc; + rxq->tx_pause = !!tx_pause; } /* Check if RX pause frame is enabled or not */ - if (fc->rx_pause ^ rx_pause) { - for (i = 0; i < data->nb_tx_queues; i++) { - struct roc_nix_fc_cfg fc_cfg; - - memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); - txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) - - 1; - fc_cfg.type = ROC_NIX_FC_TM_CFG; - fc_cfg.tm_cfg.sq = txq->qid; - fc_cfg.tm_cfg.enable = !!rx_pause; - rc = roc_nix_fc_config_set(nix, &fc_cfg); - if (rc) - return rc; - } + tc = rx_pause ? 0 : ROC_NIX_PFC_CLASS_INVALID; + for (i = 0; i < data->nb_tx_queues; i++) { + struct roc_nix_fc_cfg fc_cfg; + + /* Skip if SQ does not exist */ + if (!data->tx_queues[i]) + continue; + + txq = cnxk_eth_txq_to_sp(data->tx_queues[i]); + sq = &dev->sqs[txq->qid]; + + /* Skip if SQ is in expected state */ + if (fc->rx_pause == rx_pause && sq->tc == tc) + continue; + + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + fc_cfg.type = ROC_NIX_FC_TM_CFG; + fc_cfg.tm_cfg.sq = txq->qid; + fc_cfg.tm_cfg.tc = 0; + fc_cfg.tm_cfg.enable = !!rx_pause; + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc && rc != EEXIST) + return rc; } rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]); @@ -350,6 +368,7 @@ cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev, struct rte_eth_pfc_queue_conf *pfc_conf) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; struct roc_nix *nix = &dev->nix; enum rte_eth_fc_mode mode; uint8_t en, tc; @@ -366,6 +385,12 @@ cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev, return -ENOTSUP; } + /* Disallow flow control changes when device is in started state */ + if (data->dev_started) { + plt_info("Stop the port=%d for setting PFC", data->port_id); + return -EBUSY; + } + mode = pfc_conf->mode; /* Perform Tx pause configuration on RQ */ @@ -1094,7 +1119,7 @@ nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, enum roc_nix_fc_mode mode; struct roc_nix_rq *rq; struct roc_nix_cq *cq; - int rc; + int rc, i; if (roc_model_is_cn96_ax() && data->dev_started) { /* On Ax, CQ should be in disabled state @@ -1127,15 +1152,13 @@ nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, if (rc) return rc; - if (rxq->tx_pause != tx_pause) { - if (tx_pause) - pfc->tx_pause_en++; - else - pfc->tx_pause_en--; - } - rxq->tx_pause = !!tx_pause; rxq->tc = tc; + /* Recheck number of RQ's that have PFC enabled */ + pfc->tx_pause_en = 0; + for (i = 0; i < dev->nb_rxq; i++) + if (dev->rqs[i].tc != ROC_NIX_PFC_CLASS_INVALID) + pfc->tx_pause_en++; /* Skip if PFC already enabled in mac */ if (pfc->tx_pause_en > 1) @@ -1168,7 +1191,7 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, struct cnxk_eth_txq_sp *txq; enum roc_nix_fc_mode mode; struct roc_nix_sq *sq; - int rc; + int rc, i; if (data->tx_queues == NULL) return -EINVAL; @@ -1212,18 +1235,11 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, if (rc) return rc; - /* Maintaining a count for SQs which are configured for PFC. This is - * required to handle disabling of a particular SQ without affecting - * PFC on other SQs. - */ - if (!fc_cfg.tm_cfg.enable && sq->tc != ROC_NIX_PFC_CLASS_INVALID) { - sq->tc = ROC_NIX_PFC_CLASS_INVALID; - pfc->rx_pause_en--; - } else if (fc_cfg.tm_cfg.enable && - sq->tc == ROC_NIX_PFC_CLASS_INVALID) { - sq->tc = tc; - pfc->rx_pause_en++; - } + /* Recheck number of SQ's that have PFC enabled */ + pfc->rx_pause_en = 0; + for (i = 0; i < dev->nb_txq; i++) + if (dev->sqs[i].tc != ROC_NIX_PFC_CLASS_INVALID) + pfc->rx_pause_en++; if (pfc->rx_pause_en > 1) goto exit;