[v2,2/2] net/cnxk: support priority flow control

Message ID 20220111081831.881374-2-skori@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series [v2,1/2] common/cnxk: support priority flow ctrl config API |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/github-robot: build fail github build: failed
ci/Intel-compilation fail Compilation issues

Commit Message

Sunil Kumar Kori Jan. 11, 2022, 8:18 a.m. UTC
  From: Sunil Kumar Kori <skori@marvell.com>

Patch implements priority flow control support for CNXK platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v2:
 - fix application restart issue.

 drivers/net/cnxk/cnxk_ethdev.c     |  27 +++++
 drivers/net/cnxk/cnxk_ethdev.h     |  16 +++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
 3 files changed, 211 insertions(+), 9 deletions(-)
  

Patch

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 74f625553d..e2690f0640 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1260,6 +1260,8 @@  cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		goto cq_fini;
 	}
 
+	/* Initialize TC to SQ mapping as invalid */
+	memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -1548,6 +1550,7 @@  struct eth_dev_ops cnxk_eth_dev_ops = {
 	.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 	.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
 	.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+	.priority_flow_ctrl_queue_set = cnxk_nix_priority_flow_ctrl_queue_set,
 	.dev_set_link_up = cnxk_nix_set_link_up,
 	.dev_set_link_down = cnxk_nix_set_link_down,
 	.get_module_info = cnxk_nix_get_module_info,
@@ -1721,6 +1724,8 @@  cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+	struct rte_eth_pfc_queue_conf pfc_conf = {0};
+	struct rte_eth_fc_conf fc_conf = {0};
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
@@ -1736,6 +1741,28 @@  cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
 	roc_nix_npc_rx_ena_dis(nix, false);
 
+	/* Restore 802.3 Flow control configuration */
+	fc_conf.mode = RTE_ETH_FC_NONE;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+	pfc_conf.mode = RTE_ETH_FC_NONE;
+	for (i = 0; i < 16; i++) {
+		if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+			pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+			pfc_conf.rx_pause.tc = i;
+			pfc_conf.tx_pause.rx_qid = i;
+			pfc_conf.tx_pause.tc = i;
+			rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev,
+								   &pfc_conf);
+			if (rc)
+				plt_err("Failed to reset PFC. error code(%d)",
+					rc);
+			}
+	}
+
+	fc_conf.mode = RTE_ETH_FC_FULL;
+	rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
 	/* Disable and free rte_meter entries */
 	nix_meter_fini(dev);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 5bfda3d815..28fb19307a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -143,6 +143,16 @@  struct cnxk_fc_cfg {
 	uint8_t tx_pause;
 };
 
+struct cnxk_pfc_cfg {
+	struct cnxk_fc_cfg fc_cfg;
+	uint16_t class_en;
+	uint16_t pause_time;
+	uint8_t rx_tc;
+	uint8_t rx_qid;
+	uint8_t tx_tc;
+	uint8_t tx_qid;
+};
+
 struct cnxk_eth_qconf {
 	union {
 		struct rte_eth_txconf tx;
@@ -366,6 +376,8 @@  struct cnxk_eth_dev {
 	struct cnxk_eth_qconf *rx_qconf;
 
 	/* Flow control configuration */
+	uint16_t pfc_tc_sq_map[16];
+	struct cnxk_pfc_cfg pfc_cfg;
 	struct cnxk_fc_cfg fc_cfg;
 
 	/* PTP Counters */
@@ -467,6 +479,8 @@  int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 			   struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+					  struct rte_eth_pfc_queue_conf *pfc_conf);
 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -606,6 +620,8 @@  int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
 				  uint32_t *prev_id, uint32_t *next_id,
 				  struct cnxk_mtr_policy_node *policy,
 				  int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				     struct cnxk_pfc_cfg *conf);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index ce5f1f7240..b3173c343e 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -69,6 +69,8 @@  cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
 	devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
+	devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
 	return 0;
 }
 
@@ -230,6 +232,8 @@  nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
 	cq = &dev->cqs[qid];
 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
 	fc_cfg.cq_cfg.enable = enable;
+	/* Map all CQs to last channel */
+	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
 	fc_cfg.cq_cfg.rq = qid;
 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
 
@@ -248,6 +252,8 @@  cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
 	uint8_t rx_pause, tx_pause;
 	int rc, i;
 
@@ -282,7 +288,12 @@  cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 		}
 
 		for (i = 0; i < data->nb_rx_queues; i++) {
-			rc = nix_fc_cq_config_set(dev, i, tx_pause);
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			rxq = ((struct cnxk_eth_rxq_sp *)
+				data->rx_queues[i]) - 1;
+			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
 			if (rc)
 				return rc;
 		}
@@ -290,14 +301,19 @@  cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 
 	/* Check if RX pause frame is enabled or not */
 	if (fc->rx_pause ^ rx_pause) {
-		struct roc_nix_fc_cfg fc_cfg;
-
-		memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
-		fc_cfg.type = ROC_NIX_FC_TM_CFG;
-		fc_cfg.tm_cfg.enable = !!rx_pause;
-		rc = roc_nix_fc_config_set(nix, &fc_cfg);
-		if (rc)
-			return rc;
+		for (i = 0; i < data->nb_tx_queues; i++) {
+			struct roc_nix_fc_cfg fc_cfg;
+
+			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+			txq = ((struct cnxk_eth_txq_sp *)
+				data->tx_queues[i]) - 1;
+			fc_cfg.type = ROC_NIX_FC_TM_CFG;
+			fc_cfg.tm_cfg.sq = txq->qid;
+			fc_cfg.tm_cfg.enable = !!rx_pause;
+			rc = roc_nix_fc_config_set(nix, &fc_cfg);
+			if (rc)
+				return rc;
+		}
 	}
 
 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +327,29 @@  cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
+				      struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+	struct cnxk_pfc_cfg conf = {0};
+	int rc;
+
+	conf.fc_cfg.mode = pfc_conf->mode;
+
+	conf.pause_time = pfc_conf->tx_pause.pause_time;
+	conf.rx_tc = pfc_conf->tx_pause.tc;
+	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+	conf.tx_tc = pfc_conf->rx_pause.tc;
+	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
 int
 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
 		      const struct rte_flow_ops **ops)
@@ -911,3 +950,123 @@  cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
 
 	return 0;
 }
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+				 struct cnxk_pfc_cfg *conf)
+{
+	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_eth_dev_data *data = eth_dev->data;
+	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_pfc_cfg pfc_cfg;
+	struct roc_nix_fc_cfg fc_cfg;
+	struct cnxk_eth_rxq_sp *rxq;
+	struct cnxk_eth_txq_sp *txq;
+	uint8_t rx_pause, tx_pause;
+	enum rte_eth_fc_mode mode;
+	struct roc_nix_cq *cq;
+	struct roc_nix_sq *sq;
+	int rc;
+
+	if (roc_nix_is_vf_or_sdp(nix)) {
+		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+		return -ENOTSUP;
+	}
+
+	if (roc_model_is_cn96_ax() && data->dev_started) {
+		/* On Ax, CQ should be in disabled state
+		 * while setting flow control configuration.
+		 */
+		plt_info("Stop the port=%d for setting flow control",
+			 data->port_id);
+		return 0;
+	}
+
+	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+		plt_err("Same TC can not be configured on multiple SQs");
+		return -ENOTSUP;
+	}
+
+	mode = conf->fc_cfg.mode;
+	rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
+	tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
+
+	/* Configure CQs */
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+	cq = &dev->cqs[rxq->qid];
+	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+	fc_cfg.cq_cfg.tc = conf->rx_tc;
+	fc_cfg.cq_cfg.enable = !!tx_pause;
+	fc_cfg.cq_cfg.rq = cq->qid;
+	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		goto exit;
+
+	/* Check if RX pause frame is enabled or not */
+	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+			goto exit;
+
+		if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_PFC) &&
+		    eth_dev->data->nb_tx_queues > 1) {
+			/*
+			 * Disabled xmit will be enabled when
+			 * new topology is available.
+			 */
+			rc = roc_nix_tm_hierarchy_disable(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_pfc_prepare_tree(nix);
+			if (rc)
+				goto exit;
+
+			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+							 true);
+			if (rc)
+				goto exit;
+		}
+	}
+
+	txq = ((struct cnxk_eth_txq_sp *)data->rx_queues[conf->tx_qid]) - 1;
+	sq = &dev->sqs[txq->qid];
+	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+	fc_cfg.type = ROC_NIX_FC_TM_CFG;
+	fc_cfg.tm_cfg.sq = sq->qid;
+	fc_cfg.tm_cfg.tc = conf->tx_tc;
+	fc_cfg.tm_cfg.enable = !!rx_pause;
+	rc = roc_nix_fc_config_set(nix, &fc_cfg);
+	if (rc)
+		return rc;
+
+	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+	/* Configure MAC block */
+	if (tx_pause)
+		pfc->class_en |= BIT(conf->rx_tc);
+	else
+		pfc->class_en &= ~BIT(conf->rx_tc);
+
+	if (pfc->class_en)
+		mode = RTE_ETH_FC_FULL;
+
+	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+	pfc_cfg.mode = mode_map[mode];
+	pfc_cfg.tc = conf->rx_tc;
+	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+	if (rc)
+		return rc;
+
+	pfc->fc_cfg.rx_pause = rx_pause;
+	pfc->fc_cfg.tx_pause = tx_pause;
+	pfc->fc_cfg.mode = mode;
+
+exit:
+	return rc;
+}