[v1] net/mlx5: fix missing marks on received packets

Message ID 20221110020950.662431-1-rongweil@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series [v1] net/mlx5: fix missing marks on received packets |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance fail Performance Testing issues
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS

Commit Message

Rongwei Liu Nov. 10, 2022, 2:09 a.m. UTC
  If HW Steering is enabled, Rx queues were configured to receive MARKs
when a table with MARK actions was created. After stopping the port,
Rx queue configuration is released, but during starting the port
the mark flag was not updated in the Rx queue configuration.

This patch introduces a reference count on the MARK action and it
increases/decreases per template_table create/destroy.

When the port is stopped, Rx queue configuration is not cleared if
reference count is not zero.

Fixes: 3a2f674b6aa8 ("net/mlx5: add queue and RSS HW steering action")
Cc: stable@dpdk.org
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |  3 ++-
 drivers/net/mlx5/mlx5.h         |  1 +
 drivers/net/mlx5/mlx5_flow.c    | 28 +++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow.h    |  2 ++
 drivers/net/mlx5/mlx5_flow_hw.c | 34 ++++++---------------------------
 drivers/net/mlx5/mlx5_trigger.c |  7 ++++++-
 6 files changed, 45 insertions(+), 30 deletions(-)
  

Comments

Raslan Darawsheh Nov. 10, 2022, 12:17 p.m. UTC | #1
Hi,

> -----Original Message-----
> From: Rongwei Liu <rongweil@nvidia.com>
> Sent: Thursday, November 10, 2022 4:10 AM
> To: Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; NBU-Contact-
> Thomas Monjalon (EXTERNAL) <thomas@monjalon.net>; Suanming Mou
> <suanmingm@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>;
> stable@dpdk.org
> Subject: [PATCH v1] net/mlx5: fix missing marks on received packets
> 
> If HW Steering is enabled, Rx queues were configured to receive MARKs
> when a table with MARK actions was created. After stopping the port, Rx
> queue configuration is released, but during starting the port the mark flag
> was not updated in the Rx queue configuration.
> 
> This patch introduces a reference count on the MARK action and it
> increases/decreases per template_table create/destroy.
> 
> When the port is stopped, Rx queue configuration is not cleared if reference
> count is not zero.
> 
> Fixes: 3a2f674b6aa8 ("net/mlx5: add queue and RSS HW steering action")
> Cc: stable@dpdk.org
> Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh
  

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 1cf6df6049..bef8e35b6d 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2010,7 +2010,8 @@  mlx5_dev_close(struct rte_eth_dev *dev)
 	 * If default mreg copy action is removed at the stop stage,
 	 * the search will return none and nothing will be done anymore.
 	 */
-	mlx5_flow_stop_default(dev);
+	if (priv->sh->config.dv_flow_en != 2)
+		mlx5_flow_stop_default(dev);
 	mlx5_traffic_disable(dev);
 	/*
 	 * If all the flows are already flushed in the device stop stage,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7d82cb7c8f..02bee5808d 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1747,6 +1747,7 @@  struct mlx5_priv {
 	uint32_t flex_item_map; /* Map of allocated flex item elements. */
 	uint32_t nb_queue; /* HW steering queue number. */
 	struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
+	uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	/* Item template list. */
 	LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index b3c1336ab8..a0cf677fb0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7499,6 +7499,34 @@  mlx5_flow_stop_default(struct rte_eth_dev *dev)
 	flow_rxq_flags_clear(dev);
 }
 
+/**
+ * Set rxq flag.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ * @param[in] enable
+ *   Flag to enable or not.
+ */
+void
+flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	unsigned int i;
+
+	if ((!priv->mark_enabled && !enable) ||
+	    (priv->mark_enabled && enable))
+		return;
+	for (i = 0; i < priv->rxqs_n; ++i) {
+		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
+
+		/* With RXQ start/stop feature, RXQ might be stopped. */
+		if (!rxq_ctrl)
+			continue;
+		rxq_ctrl->rxq.mark = enable;
+	}
+	priv->mark_enabled = enable;
+}
+
 /**
  * Start all default actions for flows.
  *
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 187e9a23bf..955383dd07 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2516,6 +2516,8 @@  mlx5_get_tof(const struct rte_flow_item *items,
 	     enum mlx5_tof_rule_type *rule_type);
 void
 flow_hw_resource_release(struct rte_eth_dev *dev);
+void
+flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
 int flow_dv_action_validate(struct rte_eth_dev *dev,
 			    const struct rte_flow_indir_action_conf *conf,
 			    const struct rte_flow_action *action,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7f1ae0fbb0..537bcf141d 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -155,34 +155,6 @@  static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
 	.type = 0,
 };
 
-/**
- * Set rxq flag.
- *
- * @param[in] dev
- *   Pointer to the rte_eth_dev structure.
- * @param[in] enable
- *   Flag to enable or not.
- */
-static void
-flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	unsigned int i;
-
-	if ((!priv->mark_enabled && !enable) ||
-	    (priv->mark_enabled && enable))
-		return;
-	for (i = 0; i < priv->rxqs_n; ++i) {
-		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
-
-		/* With RXQ start/stop feature, RXQ might be stopped. */
-		if (!rxq_ctrl)
-			continue;
-		rxq_ctrl->rxq.mark = enable;
-	}
-	priv->mark_enabled = enable;
-}
-
 /**
  * Set the hash fields according to the @p rss_desc information.
  *
@@ -462,6 +434,10 @@  __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
 		mlx5_ipool_free(priv->acts_ipool, data->idx);
 	}
 
+	if (acts->mark)
+		if (!__atomic_sub_fetch(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED))
+			flow_hw_rxq_flag_set(dev, false);
+
 	if (acts->jump) {
 		struct mlx5_flow_group *grp;
 
@@ -484,6 +460,7 @@  __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
 		if (acts->mhdr->action)
 			mlx5dr_action_destroy(acts->mhdr->action);
 		mlx5_free(acts->mhdr);
+		acts->mhdr = NULL;
 	}
 	if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
@@ -1422,6 +1399,7 @@  __flow_hw_actions_translate(struct rte_eth_dev *dev,
 				goto err;
 			acts->rule_acts[action_pos].action =
 				priv->hw_tag[!!attr->group];
+			__atomic_add_fetch(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
 			flow_hw_rxq_flag_set(dev, true);
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 4b821a1076..df7ebf4ee0 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1408,7 +1408,12 @@  mlx5_dev_stop(struct rte_eth_dev *dev)
 	mlx5_mp_os_req_stop_rxtx(dev);
 	rte_delay_us_sleep(1000 * priv->rxqs_n);
 	DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-	mlx5_flow_stop_default(dev);
+	if (priv->sh->config.dv_flow_en == 2) {
+		if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+			flow_hw_rxq_flag_set(dev, false);
+	} else {
+		mlx5_flow_stop_default(dev);
+	}
 	/* Control flows for default traffic can be removed firstly. */
 	mlx5_traffic_disable(dev);
 	/* All RX queue flags will be cleared in the flush interface. */