[16/27] net/mlx5: support indirect count action for HW steering

Message ID 20220923144334.27736-17-suanmingm@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series net/mlx5: HW steering PMD update |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Suanming Mou Sept. 23, 2022, 2:43 p.m. UTC
  From: Xiaoyu Min <jackmin@nvidia.com>

The indirect counter action is taked as _shared_ counter between
the flows use it.

This _shared_ counter is gotten from counter pool at time the indirect
action is created. And be put back to counter pool when indirect action
is destroyed.

Signed-off-by: Xiaoyu Min <jackmin@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.h    |   3 +
 drivers/net/mlx5/mlx5_flow_hw.c | 104 +++++++++++++++++++++++++++++++-
 2 files changed, 104 insertions(+), 3 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c982cb953a..a39dacc60a 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1148,6 +1148,9 @@  struct mlx5_action_construct_data {
 			uint32_t level; /* RSS level. */
 			uint32_t idx; /* Shared action index. */
 		} shared_rss;
+		struct {
+			uint32_t id;
+		} shared_counter;
 	};
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index de82396a04..92b61b63d1 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -536,6 +536,44 @@  __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
 	return 0;
 }
 
+/**
+ * Append shared counter action to the dynamic action list.
+ *
+ * @param[in] priv
+ *   Pointer to the port private data structure.
+ * @param[in] acts
+ *   Pointer to the template HW steering DR actions.
+ * @param[in] type
+ *   Action type.
+ * @param[in] action_src
+ *   Offset of source rte flow action.
+ * @param[in] action_dst
+ *   Offset of destination DR action.
+ * @param[in] cnt_id
+ *   Shared counter id.
+ *
+ * @return
+ *    0 on success, negative value otherwise and rte_errno is set.
+ */
+static __rte_always_inline int
+__flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
+				     struct mlx5_hw_actions *acts,
+				     enum rte_flow_action_type type,
+				     uint16_t action_src,
+				     uint16_t action_dst,
+				     cnt_id_t cnt_id)
+{	struct mlx5_action_construct_data *act_data;
+
+	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+	if (!act_data)
+		return -1;
+	act_data->type = type;
+	act_data->shared_counter.id = cnt_id;
+	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+	return 0;
+}
+
+
 /**
  * Translate shared indirect action.
  *
@@ -577,6 +615,13 @@  flow_hw_shared_action_translate(struct rte_eth_dev *dev,
 		    action_src, action_dst, idx, shared_rss))
 			return -1;
 		break;
+	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
+			(enum rte_flow_action_type)
+			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
+			action_src, action_dst, act_idx))
+			return -1;
+		break;
 	default:
 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
 		break;
@@ -1454,6 +1499,13 @@  flow_hw_shared_action_construct(struct rte_eth_dev *dev,
 				(dev, &act_data, item_flags, rule_act))
 			return -1;
 		break;
+	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
+				act_idx,
+				&rule_act->action,
+				&rule_act->counter.offset))
+			return -1;
+		break;
 	default:
 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
 		break;
@@ -1761,6 +1813,17 @@  flow_hw_actions_construct(struct rte_eth_dev *dev,
 				return ret;
 			job->flow->cnt_id = cnt_id;
 			break;
+		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = mlx5_hws_cnt_pool_get_action_offset
+				(priv->hws_cpool,
+				 act_data->shared_counter.id,
+				 &rule_acts[act_data->action_dst].action,
+				 &rule_acts[act_data->action_dst].counter.offset
+				 );
+			if (ret != 0)
+				return ret;
+			job->flow->cnt_id = act_data->shared_counter.id;
+			break;
 		default:
 			break;
 		}
@@ -4860,10 +4923,28 @@  flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 			     void *user_data,
 			     struct rte_flow_error *error)
 {
+	struct rte_flow_action_handle *handle = NULL;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	cnt_id_t cnt_id;
+
 	RTE_SET_USED(queue);
 	RTE_SET_USED(attr);
 	RTE_SET_USED(user_data);
-	return flow_dv_action_create(dev, conf, action, error);
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_COUNT:
+		if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id))
+			rte_flow_error_set(error, ENODEV,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					NULL,
+					"counter are not configured!");
+		else
+			handle = (struct rte_flow_action_handle *)
+				 (uintptr_t)cnt_id;
+		break;
+	default:
+		handle = flow_dv_action_create(dev, conf, action, error);
+	}
+	return handle;
 }
 
 /**
@@ -4927,10 +5008,19 @@  flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
 			      void *user_data,
 			      struct rte_flow_error *error)
 {
+	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+	struct mlx5_priv *priv = dev->data->dev_private;
+
 	RTE_SET_USED(queue);
 	RTE_SET_USED(attr);
 	RTE_SET_USED(user_data);
-	return flow_dv_action_destroy(dev, handle, error);
+	switch (type) {
+	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+		return mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
+	default:
+		return flow_dv_action_destroy(dev, handle, error);
+	}
 }
 
 static int
@@ -5075,7 +5165,15 @@  flow_hw_action_query(struct rte_eth_dev *dev,
 		     const struct rte_flow_action_handle *handle, void *data,
 		     struct rte_flow_error *error)
 {
-	return flow_dv_action_query(dev, handle, data, error);
+	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
+	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+
+	switch (type) {
+	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+		return flow_hw_query_counter(dev, act_idx, data, error);
+	default:
+		return flow_dv_action_query(dev, handle, data, error);
+	}
 }
 
 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {