[v2,1/2] net/mlx5: move meter init functions

Message ID 20240221102130.10124-2-dsosnowski@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: add cross port meter mark action sharing |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Dariusz Sosnowski Feb. 21, 2024, 10:21 a.m. UTC
  Move mlx5_flow_meter_init() and mlx5_flow_meter_uinit()
to module for meter operations.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c    | 203 ----------------------------
 drivers/net/mlx5/mlx5_flow_meter.c | 207 +++++++++++++++++++++++++++++
 2 files changed, 207 insertions(+), 203 deletions(-)

--
2.34.1
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 3bb3a9a178..4d6b22c4e3 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -12859,209 +12859,6 @@  mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
 	return 0;
 }

-void
-mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-
-	if (priv->mtr_policy_arr) {
-		mlx5_free(priv->mtr_policy_arr);
-		priv->mtr_policy_arr = NULL;
-	}
-	if (priv->mtr_profile_arr) {
-		mlx5_free(priv->mtr_profile_arr);
-		priv->mtr_profile_arr = NULL;
-	}
-	if (priv->hws_mpool) {
-		mlx5_aso_mtr_queue_uninit(priv->sh, priv->hws_mpool, NULL);
-		mlx5_ipool_destroy(priv->hws_mpool->idx_pool);
-		mlx5_free(priv->hws_mpool);
-		priv->hws_mpool = NULL;
-	}
-	if (priv->mtr_bulk.aso) {
-		mlx5_free(priv->mtr_bulk.aso);
-		priv->mtr_bulk.aso = NULL;
-		priv->mtr_bulk.size = 0;
-		mlx5_aso_queue_uninit(priv->sh, ASO_OPC_MOD_POLICER);
-	}
-	if (priv->mtr_bulk.action) {
-		mlx5dr_action_destroy(priv->mtr_bulk.action);
-		priv->mtr_bulk.action = NULL;
-	}
-	if (priv->mtr_bulk.devx_obj) {
-		claim_zero(mlx5_devx_cmd_destroy(priv->mtr_bulk.devx_obj));
-		priv->mtr_bulk.devx_obj = NULL;
-	}
-}
-
-int
-mlx5_flow_meter_init(struct rte_eth_dev *dev,
-		     uint32_t nb_meters,
-		     uint32_t nb_meter_profiles,
-		     uint32_t nb_meter_policies,
-		     uint32_t nb_queues)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_devx_obj *dcs = NULL;
-	uint32_t log_obj_size;
-	int ret = 0;
-	int reg_id;
-	struct mlx5_aso_mtr *aso;
-	uint32_t i;
-	struct rte_flow_error error;
-	uint32_t flags;
-	uint32_t nb_mtrs = rte_align32pow2(nb_meters);
-	struct mlx5_indexed_pool_config cfg = {
-		.size = sizeof(struct mlx5_aso_mtr),
-		.trunk_size = 1 << 12,
-		.per_core_cache = 1 << 13,
-		.need_lock = 1,
-		.release_mem_en = !!priv->sh->config.reclaim_mode,
-		.malloc = mlx5_malloc,
-		.max_idx = nb_meters,
-		.free = mlx5_free,
-		.type = "mlx5_hw_mtr_mark_action",
-	};
-
-	if (!nb_meters) {
-		ret = ENOTSUP;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter configuration is invalid.");
-		goto err;
-	}
-	if (!priv->mtr_en || !priv->sh->meter_aso_en) {
-		ret = ENOTSUP;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter ASO is not supported.");
-		goto err;
-	}
-	priv->mtr_config.nb_meters = nb_meters;
-	log_obj_size = rte_log2_u32(nb_meters >> 1);
-	dcs = mlx5_devx_cmd_create_flow_meter_aso_obj
-		(priv->sh->cdev->ctx, priv->sh->cdev->pdn,
-			log_obj_size);
-	if (!dcs) {
-		ret = ENOMEM;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter ASO object allocation failed.");
-		goto err;
-	}
-	priv->mtr_bulk.devx_obj = dcs;
-	reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
-	if (reg_id < 0) {
-		ret = ENOTSUP;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter register is not available.");
-		goto err;
-	}
-	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
-	if (priv->sh->config.dv_esw_en && priv->master)
-		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
-	priv->mtr_bulk.action = mlx5dr_action_create_aso_meter
-			(priv->dr_ctx, (struct mlx5dr_devx_obj *)dcs,
-				reg_id - REG_C_0, flags);
-	if (!priv->mtr_bulk.action) {
-		ret = ENOMEM;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter action creation failed.");
-		goto err;
-	}
-	priv->mtr_bulk.aso = mlx5_malloc(MLX5_MEM_ZERO,
-					 sizeof(struct mlx5_aso_mtr) *
-					 nb_meters,
-					 RTE_CACHE_LINE_SIZE,
-					 SOCKET_ID_ANY);
-	if (!priv->mtr_bulk.aso) {
-		ret = ENOMEM;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter bulk ASO allocation failed.");
-		goto err;
-	}
-	priv->mtr_bulk.size = nb_meters;
-	aso = priv->mtr_bulk.aso;
-	for (i = 0; i < priv->mtr_bulk.size; i++) {
-		aso->type = ASO_METER_DIRECT;
-		aso->state = ASO_METER_WAIT;
-		aso->offset = i;
-		aso++;
-	}
-	priv->hws_mpool = mlx5_malloc(MLX5_MEM_ZERO,
-				sizeof(struct mlx5_aso_mtr_pool),
-				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
-	if (!priv->hws_mpool) {
-		ret = ENOMEM;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter ipool allocation failed.");
-		goto err;
-	}
-	priv->hws_mpool->devx_obj = priv->mtr_bulk.devx_obj;
-	priv->hws_mpool->action = priv->mtr_bulk.action;
-	priv->hws_mpool->nb_sq = nb_queues;
-	if (mlx5_aso_mtr_queue_init(priv->sh, priv->hws_mpool,
-				    &priv->sh->mtrmng->pools_mng, nb_queues)) {
-		ret = ENOMEM;
-		rte_flow_error_set(&error, ENOMEM,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL, "Meter ASO queue allocation failed.");
-		goto err;
-	}
-	/*
-	 * No need for local cache if Meter number is a small number.
-	 * Since flow insertion rate will be very limited in that case.
-	 * Here let's set the number to less than default trunk size 4K.
-	 */
-	if (nb_mtrs <= cfg.trunk_size) {
-		cfg.per_core_cache = 0;
-		cfg.trunk_size = nb_mtrs;
-	} else if (nb_mtrs <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
-		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
-	}
-	priv->hws_mpool->idx_pool = mlx5_ipool_create(&cfg);
-	if (nb_meter_profiles) {
-		priv->mtr_config.nb_meter_profiles = nb_meter_profiles;
-		priv->mtr_profile_arr =
-			mlx5_malloc(MLX5_MEM_ZERO,
-				    sizeof(struct mlx5_flow_meter_profile) *
-				    nb_meter_profiles,
-				    RTE_CACHE_LINE_SIZE,
-				    SOCKET_ID_ANY);
-		if (!priv->mtr_profile_arr) {
-			ret = ENOMEM;
-			rte_flow_error_set(&error, ENOMEM,
-					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-					   NULL, "Meter profile allocation failed.");
-			goto err;
-		}
-	}
-	if (nb_meter_policies) {
-		priv->mtr_config.nb_meter_policies = nb_meter_policies;
-		priv->mtr_policy_arr =
-			mlx5_malloc(MLX5_MEM_ZERO,
-				    sizeof(struct mlx5_flow_meter_policy) *
-				    nb_meter_policies,
-				    RTE_CACHE_LINE_SIZE,
-				    SOCKET_ID_ANY);
-		if (!priv->mtr_policy_arr) {
-			ret = ENOMEM;
-			rte_flow_error_set(&error, ENOMEM,
-					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-					   NULL, "Meter policy allocation failed.");
-			goto err;
-		}
-	}
-	return 0;
-err:
-	mlx5_flow_meter_uninit(dev);
-	return ret;
-}
-
 static __rte_always_inline uint32_t
 mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
 {
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 7cbf772ea4..9cb4614436 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -15,6 +15,213 @@ 
 #include "mlx5.h"
 #include "mlx5_flow.h"

+#ifdef HAVE_MLX5_HWS_SUPPORT
+
+void
+mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	if (priv->mtr_policy_arr) {
+		mlx5_free(priv->mtr_policy_arr);
+		priv->mtr_policy_arr = NULL;
+	}
+	if (priv->mtr_profile_arr) {
+		mlx5_free(priv->mtr_profile_arr);
+		priv->mtr_profile_arr = NULL;
+	}
+	if (priv->hws_mpool) {
+		mlx5_aso_mtr_queue_uninit(priv->sh, priv->hws_mpool, NULL);
+		mlx5_ipool_destroy(priv->hws_mpool->idx_pool);
+		mlx5_free(priv->hws_mpool);
+		priv->hws_mpool = NULL;
+	}
+	if (priv->mtr_bulk.aso) {
+		mlx5_free(priv->mtr_bulk.aso);
+		priv->mtr_bulk.aso = NULL;
+		priv->mtr_bulk.size = 0;
+		mlx5_aso_queue_uninit(priv->sh, ASO_OPC_MOD_POLICER);
+	}
+	if (priv->mtr_bulk.action) {
+		mlx5dr_action_destroy(priv->mtr_bulk.action);
+		priv->mtr_bulk.action = NULL;
+	}
+	if (priv->mtr_bulk.devx_obj) {
+		claim_zero(mlx5_devx_cmd_destroy(priv->mtr_bulk.devx_obj));
+		priv->mtr_bulk.devx_obj = NULL;
+	}
+}
+
+int
+mlx5_flow_meter_init(struct rte_eth_dev *dev,
+		     uint32_t nb_meters,
+		     uint32_t nb_meter_profiles,
+		     uint32_t nb_meter_policies,
+		     uint32_t nb_queues)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_devx_obj *dcs = NULL;
+	uint32_t log_obj_size;
+	int ret = 0;
+	int reg_id;
+	struct mlx5_aso_mtr *aso;
+	uint32_t i;
+	struct rte_flow_error error;
+	uint32_t flags;
+	uint32_t nb_mtrs = rte_align32pow2(nb_meters);
+	struct mlx5_indexed_pool_config cfg = {
+		.size = sizeof(struct mlx5_aso_mtr),
+		.trunk_size = 1 << 12,
+		.per_core_cache = 1 << 13,
+		.need_lock = 1,
+		.release_mem_en = !!priv->sh->config.reclaim_mode,
+		.malloc = mlx5_malloc,
+		.max_idx = nb_meters,
+		.free = mlx5_free,
+		.type = "mlx5_hw_mtr_mark_action",
+	};
+
+	if (!nb_meters) {
+		ret = ENOTSUP;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter configuration is invalid.");
+		goto err;
+	}
+	if (!priv->mtr_en || !priv->sh->meter_aso_en) {
+		ret = ENOTSUP;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter ASO is not supported.");
+		goto err;
+	}
+	priv->mtr_config.nb_meters = nb_meters;
+	log_obj_size = rte_log2_u32(nb_meters >> 1);
+	dcs = mlx5_devx_cmd_create_flow_meter_aso_obj
+		(priv->sh->cdev->ctx, priv->sh->cdev->pdn,
+			log_obj_size);
+	if (!dcs) {
+		ret = ENOMEM;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter ASO object allocation failed.");
+		goto err;
+	}
+	priv->mtr_bulk.devx_obj = dcs;
+	reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
+	if (reg_id < 0) {
+		ret = ENOTSUP;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter register is not available.");
+		goto err;
+	}
+	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
+	if (priv->sh->config.dv_esw_en && priv->master)
+		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
+	priv->mtr_bulk.action = mlx5dr_action_create_aso_meter
+			(priv->dr_ctx, (struct mlx5dr_devx_obj *)dcs,
+				reg_id - REG_C_0, flags);
+	if (!priv->mtr_bulk.action) {
+		ret = ENOMEM;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter action creation failed.");
+		goto err;
+	}
+	priv->mtr_bulk.aso = mlx5_malloc(MLX5_MEM_ZERO,
+					 sizeof(struct mlx5_aso_mtr) *
+					 nb_meters,
+					 RTE_CACHE_LINE_SIZE,
+					 SOCKET_ID_ANY);
+	if (!priv->mtr_bulk.aso) {
+		ret = ENOMEM;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter bulk ASO allocation failed.");
+		goto err;
+	}
+	priv->mtr_bulk.size = nb_meters;
+	aso = priv->mtr_bulk.aso;
+	for (i = 0; i < priv->mtr_bulk.size; i++) {
+		aso->type = ASO_METER_DIRECT;
+		aso->state = ASO_METER_WAIT;
+		aso->offset = i;
+		aso++;
+	}
+	priv->hws_mpool = mlx5_malloc(MLX5_MEM_ZERO,
+				sizeof(struct mlx5_aso_mtr_pool),
+				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (!priv->hws_mpool) {
+		ret = ENOMEM;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter ipool allocation failed.");
+		goto err;
+	}
+	priv->hws_mpool->devx_obj = priv->mtr_bulk.devx_obj;
+	priv->hws_mpool->action = priv->mtr_bulk.action;
+	priv->hws_mpool->nb_sq = nb_queues;
+	if (mlx5_aso_mtr_queue_init(priv->sh, priv->hws_mpool,
+				    &priv->sh->mtrmng->pools_mng, nb_queues)) {
+		ret = ENOMEM;
+		rte_flow_error_set(&error, ENOMEM,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				  NULL, "Meter ASO queue allocation failed.");
+		goto err;
+	}
+	/*
+	 * No need for local cache if Meter number is a small number.
+	 * Since flow insertion rate will be very limited in that case.
+	 * Here let's set the number to less than default trunk size 4K.
+	 */
+	if (nb_mtrs <= cfg.trunk_size) {
+		cfg.per_core_cache = 0;
+		cfg.trunk_size = nb_mtrs;
+	} else if (nb_mtrs <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
+		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
+	}
+	priv->hws_mpool->idx_pool = mlx5_ipool_create(&cfg);
+	if (nb_meter_profiles) {
+		priv->mtr_config.nb_meter_profiles = nb_meter_profiles;
+		priv->mtr_profile_arr =
+			mlx5_malloc(MLX5_MEM_ZERO,
+				    sizeof(struct mlx5_flow_meter_profile) *
+				    nb_meter_profiles,
+				    RTE_CACHE_LINE_SIZE,
+				    SOCKET_ID_ANY);
+		if (!priv->mtr_profile_arr) {
+			ret = ENOMEM;
+			rte_flow_error_set(&error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					   NULL, "Meter profile allocation failed.");
+			goto err;
+		}
+	}
+	if (nb_meter_policies) {
+		priv->mtr_config.nb_meter_policies = nb_meter_policies;
+		priv->mtr_policy_arr =
+			mlx5_malloc(MLX5_MEM_ZERO,
+				    sizeof(struct mlx5_flow_meter_policy) *
+				    nb_meter_policies,
+				    RTE_CACHE_LINE_SIZE,
+				    SOCKET_ID_ANY);
+		if (!priv->mtr_policy_arr) {
+			ret = ENOMEM;
+			rte_flow_error_set(&error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					   NULL, "Meter policy allocation failed.");
+			goto err;
+		}
+	}
+	return 0;
+err:
+	mlx5_flow_meter_uninit(dev);
+	return ret;
+}
+
+#endif /* HAVE_MLX5_HWS_SUPPORT */
+
 static int mlx5_flow_meter_disable(struct rte_eth_dev *dev,
 		uint32_t meter_id, struct rte_mtr_error *error);