[06/27] net/mlx5: enable mark flag for all ports in the same domain

Message ID 20220923144334.27736-7-suanmingm@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series net/mlx5: HW steering PMD update |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Suanming Mou Sept. 23, 2022, 2:43 p.m. UTC
  From: Bing Zhao <bingz@nvidia.com>

In the switchdev mode, there is a unique FDB domain for all the
representors and only the eswitch manager can insert the rule into
this domain.

If a flow rule is like below:
flow create 0 ingress transfer pattern port_id id is X / eth / end
actions mark id 25 ...
It is used for representor X and the mark flag was not enabled for
the queues of this port.

To fix this, once the mark flag needs to be enabled, in a FDB case,
all the queues' mark flag belonging to the same domain will be
engaged for only once.

Fixes: e211aca851a7 ("net/mlx5: fix mark enabling for Rx")

Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
 drivers/net/mlx5/mlx5.h      |  2 ++
 drivers/net/mlx5/mlx5_flow.c | 28 ++++++++++++++++++++++++----
 2 files changed, 26 insertions(+), 4 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f3bd45d4c5..18d70e795f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1202,6 +1202,8 @@  struct mlx5_dev_ctx_shared {
 	uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */
 	uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */
 	uint32_t hws_tags:1; /* Check if tags info for HWS initialized. */
+	uint32_t shared_mark_enabled:1;
+	/* If mark action is enabled on Rxqs (shared E-Switch domain). */
 	uint32_t max_port; /* Maximal IB device port index. */
 	struct mlx5_bond_info bond; /* Bonding information. */
 	struct mlx5_common_device *cdev; /* Backend mlx5 device. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3abb39aa92..c856d249db 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1481,13 +1481,32 @@  flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *rxq_ctrl;
+	uint16_t port_id;
 
-	if (priv->mark_enabled)
+	if (priv->sh->shared_mark_enabled)
 		return;
-	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
-		rxq_ctrl->rxq.mark = 1;
+	if (priv->master || priv->representor) {
+		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
+			struct mlx5_priv *opriv =
+				rte_eth_devices[port_id].data->dev_private;
+
+			if (!opriv ||
+			    opriv->sh != priv->sh ||
+			    opriv->domain_id != priv->domain_id ||
+			    opriv->mark_enabled)
+				continue;
+			LIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) {
+				rxq_ctrl->rxq.mark = 1;
+			}
+			opriv->mark_enabled = 1;
+		}
+	} else {
+		LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+			rxq_ctrl->rxq.mark = 1;
+		}
+		priv->mark_enabled = 1;
 	}
-	priv->mark_enabled = 1;
+	priv->sh->shared_mark_enabled = 1;
 }
 
 /**
@@ -1623,6 +1642,7 @@  flow_rxq_flags_clear(struct rte_eth_dev *dev)
 		rxq->ctrl->rxq.tunnel = 0;
 	}
 	priv->mark_enabled = 0;
+	priv->sh->shared_mark_enabled = 0;
 }
 
 /**