[v1,11/16] net/mlx5/hws: support partial hash

Message ID 20230131093346.1261066-12-valex@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5/hws: support range and partial hash matching |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Alex Vesker Jan. 31, 2023, 9:33 a.m. UTC
  Hash definers allow performing hashing over a subset of
the fields which are used for matching. This allows combining
match templates which were considered invalid until now.
During matcher creation mlx5dr code will process the match
templates and check if such hash definer is needed based
on the definers bitmasks intersection.
Since current HW GTA implementation doesn't allow specifying
match and hash definers rule insertion is done using the FW
GTA WQE command.

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/common/mlx5/mlx5_prm.h        |   4 +
 drivers/net/mlx5/hws/mlx5dr_definer.c | 105 ++++++++++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_matcher.c |  66 +++++++++++++++-
 drivers/net/mlx5/hws/mlx5dr_matcher.h |  10 ++-
 4 files changed, 181 insertions(+), 4 deletions(-)
  

Patch

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index cf46296afb..cca2fb6af7 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2112,6 +2112,10 @@  enum mlx5_ifc_cross_vhca_allowed_objects_types {
 	MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC = 1 << 0xa,
 };
 
+enum {
+	MLX5_GENERATE_WQE_TYPE_FLOW_UPDATE = 1 << 1,
+};
+
 /*
  *  HCA Capabilities 2
  */
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 9560f8a0af..260e6c5d1d 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -1928,6 +1928,27 @@  int mlx5dr_definer_get_id(struct mlx5dr_definer *definer)
 	return definer->obj->id;
 }
 
+static int
+mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
+		       struct mlx5dr_definer *definer_b)
+{
+	int i;
+
+	for (i = 0; i < BYTE_SELECTORS; i++)
+		if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
+			return 1;
+
+	for (i = 0; i < DW_SELECTORS; i++)
+		if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
+			return 1;
+
+	for (i = 0; i < MLX5DR_JUMBO_TAG_SZ; i++)
+		if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
+			return 1;
+
+	return 0;
+}
+
 static int
 mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
 			   struct mlx5dr_definer *match_definer)
@@ -2070,6 +2091,80 @@  mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
 		mlx5dr_definer_free(matcher->mt[i].definer);
 }
 
+static int
+mlx5dr_definer_matcher_hash_init(struct mlx5dr_context *ctx,
+				 struct mlx5dr_matcher *matcher)
+{
+	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
+	struct mlx5dr_match_template *mt = matcher->mt;
+	struct ibv_context *ibv_ctx = ctx->ibv_ctx;
+	uint8_t *bit_mask;
+	int i, j;
+
+	for (i = 1; i < matcher->num_of_mt; i++)
+		if (mlx5dr_definer_compare(mt[i].definer, mt[i - 1].definer))
+			matcher->flags |= MLX5DR_MATCHER_FLAGS_HASH_DEFINER;
+
+	if (!(matcher->flags & MLX5DR_MATCHER_FLAGS_HASH_DEFINER))
+		return 0;
+
+	/* Insert by index requires all MT using the same definer */
+	if (matcher->attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
+		DR_LOG(ERR, "Insert by index not supported with MT combination");
+		rte_errno = EOPNOTSUPP;
+		return rte_errno;
+	}
+
+	matcher->hash_definer = simple_calloc(1, sizeof(*matcher->hash_definer));
+	if (!matcher->hash_definer) {
+		DR_LOG(ERR, "Failed to allocate memory for hash definer");
+		rte_errno = ENOMEM;
+		return rte_errno;
+	}
+
+	/* Calculate intersection between all match templates bitmasks.
+	 * We will use mt[0] as reference and intersect it with mt[1..n].
+	 * From this we will get:
+	 * hash_definer.selectors = mt[0].selecotrs
+	 * hash_definer.mask =  mt[0].mask & mt[0].mask & ... & mt[n].mask
+	 */
+
+	/* Use first definer which should also contain intersection fields */
+	memcpy(matcher->hash_definer, mt->definer, sizeof(struct mlx5dr_definer));
+
+	/* Calculate intersection between first to all match templates bitmasks */
+	for (i = 1; i < matcher->num_of_mt; i++) {
+		bit_mask = (uint8_t *)&mt[i].definer->mask;
+		for (j = 0; j < MLX5DR_JUMBO_TAG_SZ; j++)
+			((uint8_t *)&matcher->hash_definer->mask)[j] &= bit_mask[j];
+	}
+
+	def_attr.match_mask = matcher->hash_definer->mask.jumbo;
+	def_attr.dw_selector = matcher->hash_definer->dw_selector;
+	def_attr.byte_selector = matcher->hash_definer->byte_selector;
+	matcher->hash_definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
+	if (!matcher->hash_definer->obj) {
+		DR_LOG(ERR, "Failed to create hash definer");
+		goto free_hash_definer;
+	}
+
+	return 0;
+
+free_hash_definer:
+	simple_free(matcher->hash_definer);
+	return rte_errno;
+}
+
+static void
+mlx5dr_definer_matcher_hash_uninit(struct mlx5dr_matcher *matcher)
+{
+	if (!matcher->hash_definer)
+		return;
+
+	mlx5dr_cmd_destroy_obj(matcher->hash_definer->obj);
+	simple_free(matcher->hash_definer);
+}
+
 int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
 				struct mlx5dr_matcher *matcher)
 {
@@ -2093,8 +2188,17 @@  int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
 		goto free_fc;
 	}
 
+	/* Calculate partial hash definer */
+	ret = mlx5dr_definer_matcher_hash_init(ctx, matcher);
+	if (ret) {
+		DR_LOG(ERR, "Failed to init hash definer");
+		goto uninit_match_definer;
+	}
+
 	return 0;
 
+uninit_match_definer:
+	mlx5dr_definer_matcher_match_uninit(matcher);
 free_fc:
 	for (i = 0; i < matcher->num_of_mt; i++)
 		simple_free(matcher->mt[i].fc);
@@ -2109,6 +2213,7 @@  void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher)
 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLISION)
 		return;
 
+	mlx5dr_definer_matcher_hash_uninit(matcher);
 	mlx5dr_definer_matcher_match_uninit(matcher);
 
 	for (i = 0; i < matcher->num_of_mt; i++)
diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.c b/drivers/net/mlx5/hws/mlx5dr_matcher.c
index 7e332052b2..e860c274cf 100644
--- a/drivers/net/mlx5/hws/mlx5dr_matcher.c
+++ b/drivers/net/mlx5/hws/mlx5dr_matcher.c
@@ -337,6 +337,42 @@  static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher)
 	return 0;
 }
 
+static bool mlx5dr_matcher_supp_fw_wqe(struct mlx5dr_matcher *matcher)
+{
+	struct mlx5dr_cmd_query_caps *caps = matcher->tbl->ctx->caps;
+
+	if (matcher->flags & MLX5DR_MATCHER_FLAGS_HASH_DEFINER) {
+		if (matcher->hash_definer->type == MLX5DR_DEFINER_TYPE_MATCH &&
+		    !IS_BIT_SET(caps->supp_ste_fromat_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+			DR_LOG(ERR, "Gen WQE MATCH format not supported");
+			return false;
+		}
+
+		if (matcher->hash_definer->type == MLX5DR_DEFINER_TYPE_JUMBO) {
+			DR_LOG(ERR, "Gen WQE JUMBO format not supported");
+			return false;
+		}
+	}
+
+	if (matcher->attr.insert_mode != MLX5DR_MATCHER_INSERT_BY_HASH ||
+	    matcher->attr.distribute_mode != MLX5DR_MATCHER_DISTRIBUTE_BY_HASH) {
+		DR_LOG(ERR, "Gen WQE must be inserted and distribute by hash");
+		return false;
+	}
+
+	if (!(caps->supp_type_gen_wqe & MLX5_GENERATE_WQE_TYPE_FLOW_UPDATE)) {
+		DR_LOG(ERR, "Gen WQE command not supporting GTA");
+		return false;
+	}
+
+	if (!caps->rtc_max_hash_def_gen_wqe) {
+		DR_LOG(ERR, "Hash definer not supported");
+		return false;
+	}
+
+	return true;
+}
+
 static void mlx5dr_matcher_set_rtc_attr_sz(struct mlx5dr_matcher *matcher,
 					   struct mlx5dr_cmd_rtc_create_attr *rtc_attr,
 					   enum mlx5dr_matcher_rtc_type rtc_type,
@@ -432,8 +468,16 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 		if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_HASH) {
 			/* The usual Hash Table */
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
-			/* The first match template is used since all share the same definer */
-			rtc_attr.match_definer_0 = mlx5dr_definer_get_id(mt->definer);
+			if (matcher->hash_definer) {
+				/* Specify definer_id_0 is used for hashing */
+				rtc_attr.fw_gen_wqe = true;
+				rtc_attr.num_hash_definer = 1;
+				rtc_attr.match_definer_0 =
+					mlx5dr_definer_get_id(matcher->hash_definer);
+			} else {
+				/* The first mt is used since all share the same definer */
+				rtc_attr.match_definer_0 = mlx5dr_definer_get_id(mt->definer);
+			}
 		} else if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
 			rtc_attr.num_hash_definer = 1;
@@ -640,6 +684,12 @@  static int mlx5dr_matcher_bind_at(struct mlx5dr_matcher *matcher)
 	if (!matcher->action_ste.max_stes)
 		return 0;
 
+	if (mlx5dr_matcher_req_fw_wqe(matcher)) {
+		DR_LOG(ERR, "FW extended matcher cannot be binded to complex at");
+		rte_errno = ENOTSUP;
+		return rte_errno;
+	}
+
 	/* Allocate action STE mempool */
 	pool_attr.table_type = tbl->type;
 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STE;
@@ -701,13 +751,21 @@  static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher)
 	struct mlx5dr_pool_attr pool_attr = {0};
 	int ret;
 
-	/* Calculate match definers */
+	/* Calculate match and hash definers */
 	ret = mlx5dr_definer_matcher_init(ctx, matcher);
 	if (ret) {
 		DR_LOG(ERR, "Failed to set matcher templates with match definers");
 		return ret;
 	}
 
+	if (mlx5dr_matcher_req_fw_wqe(matcher) &&
+	    !mlx5dr_matcher_supp_fw_wqe(matcher)) {
+		DR_LOG(ERR, "Matcher requires FW WQE which is not supported");
+		rte_errno = ENOTSUP;
+		ret = rte_errno;
+		goto uninit_match_definer;
+	}
+
 	/* Create an STE pool per matcher*/
 	pool_attr.table_type = matcher->tbl->type;
 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STE;
@@ -719,6 +777,7 @@  static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher)
 	matcher->match_ste.pool = mlx5dr_pool_create(ctx, &pool_attr);
 	if (!matcher->match_ste.pool) {
 		DR_LOG(ERR, "Failed to allocate matcher STE pool");
+		ret = ENOTSUP;
 		goto uninit_match_definer;
 	}
 
@@ -932,6 +991,7 @@  mlx5dr_matcher_create_col_matcher(struct mlx5dr_matcher *matcher)
 	col_matcher->at = matcher->at;
 	col_matcher->num_of_at = matcher->num_of_at;
 	col_matcher->num_of_mt = matcher->num_of_mt;
+	col_matcher->hash_definer = matcher->hash_definer;
 	col_matcher->attr.priority = matcher->attr.priority;
 	col_matcher->flags = matcher->flags;
 	col_matcher->flags |= MLX5DR_MATCHER_FLAGS_COLISION;
diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.h b/drivers/net/mlx5/hws/mlx5dr_matcher.h
index 4bdb33b11f..c012c0c193 100644
--- a/drivers/net/mlx5/hws/mlx5dr_matcher.h
+++ b/drivers/net/mlx5/hws/mlx5dr_matcher.h
@@ -23,7 +23,8 @@ 
 #define MLX5DR_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
 
 enum mlx5dr_matcher_flags {
-	MLX5DR_MATCHER_FLAGS_COLISION		= 1 << 0,
+	MLX5DR_MATCHER_FLAGS_HASH_DEFINER	= 1 << 0,
+	MLX5DR_MATCHER_FLAGS_COLISION		= 1 << 1,
 };
 
 struct mlx5dr_match_template {
@@ -69,6 +70,7 @@  struct mlx5dr_matcher {
 	struct mlx5dr_matcher *col_matcher;
 	struct mlx5dr_matcher_match_ste match_ste;
 	struct mlx5dr_matcher_action_ste action_ste;
+	struct mlx5dr_definer *hash_definer;
 	LIST_ENTRY(mlx5dr_matcher) next;
 };
 
@@ -78,6 +80,12 @@  mlx5dr_matcher_mt_is_jumbo(struct mlx5dr_match_template *mt)
 	return mlx5dr_definer_is_jumbo(mt->definer);
 }
 
+static inline bool mlx5dr_matcher_req_fw_wqe(struct mlx5dr_matcher *matcher)
+{
+	/* Currently HWS doesn't support hash different from match or range */
+	return unlikely(matcher->flags & MLX5DR_MATCHER_FLAGS_HASH_DEFINER);
+}
+
 int mlx5dr_matcher_conv_items_to_prm(uint64_t *match_buf,
 				     struct rte_flow_item *items,
 				     uint8_t *match_criteria,