[v1,10/16] net/mlx5/hws: redesign definer create

Message ID 20230131093346.1261066-11-valex@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5/hws: support range and partial hash matching |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Alex Vesker Jan. 31, 2023, 9:33 a.m. UTC
  Until now definer creation and deletion get and put
functions were used, the get function would calculate the
definer field copy (fc), header layout (hl) and definer
layout internally without taking into account other match
templates used over the same matcher.
This logic had to be split to allow sharing the hl over
multiple definers. First calculate the shared hl than
create definers based on the definer shared layout.
Once all definers use the same layout it is possible to
hash over the shared fields since the location is the same
across all of the definers.

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c  | 301 ++++++++++++++++---------
 drivers/net/mlx5/hws/mlx5dr_definer.h  |  11 +-
 drivers/net/mlx5/hws/mlx5dr_internal.h |   2 +-
 drivers/net/mlx5/hws/mlx5dr_matcher.c  |  61 ++---
 drivers/net/mlx5/hws/mlx5dr_matcher.h  |  16 +-
 5 files changed, 230 insertions(+), 161 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index c268f94ad3..9560f8a0af 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -104,7 +104,6 @@  struct mlx5dr_definer_conv_data {
 	struct mlx5dr_definer_fc *fc;
 	uint8_t relaxed;
 	uint8_t tunnel;
-	uint8_t *hl;
 };
 
 /* Xmacro used to create generic item setter from items */
@@ -1504,6 +1503,36 @@  mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_mt_set_fc(struct mlx5dr_match_template *mt,
+			 struct mlx5dr_definer_fc *fc,
+			 uint8_t *hl)
+{
+	uint32_t fc_sz = 0;
+	int i;
+
+	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++)
+		if (fc[i].tag_set)
+			fc_sz++;
+
+	mt->fc = simple_calloc(fc_sz, sizeof(*mt->fc));
+	if (!mt->fc) {
+		rte_errno = ENOMEM;
+		return rte_errno;
+	}
+
+	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {
+		if (!fc[i].tag_set)
+			continue;
+
+		fc[i].fname = i;
+		memcpy(&mt->fc[mt->fc_sz++], &fc[i], sizeof(*mt->fc));
+		DR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+	}
+
+	return 0;
+}
+
 static int
 mlx5dr_definer_check_item_range_supp(struct rte_flow_item *item)
 {
@@ -1535,12 +1564,9 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 	struct mlx5dr_definer_conv_data cd = {0};
 	struct rte_flow_item *items = mt->items;
 	uint64_t item_flags = 0;
-	uint32_t total = 0;
-	int i, j;
-	int ret;
+	int i, ret;
 
 	cd.fc = fc;
-	cd.hl = hl;
 	cd.caps = ctx->caps;
 	cd.relaxed = mt->flags & MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH;
 
@@ -1660,29 +1686,11 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 
 	mt->item_flags = item_flags;
 
-	/* Fill in headers layout and calculate total number of fields  */
-	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {
-		if (fc[i].tag_set) {
-			total++;
-			DR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
-		}
-	}
-
-	mt->fc_sz = total;
-	mt->fc = simple_calloc(total, sizeof(*mt->fc));
-	if (!mt->fc) {
-		DR_LOG(ERR, "Failed to allocate field copy array");
-		rte_errno = ENOMEM;
-		return rte_errno;
-	}
-
-	j = 0;
-	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {
-		if (fc[i].tag_set) {
-			memcpy(&mt->fc[j], &fc[i], sizeof(*mt->fc));
-			mt->fc[j].fname = i;
-			j++;
-		}
+	/* Fill in headers layout and allocate fc array on mt */
+	ret = mlx5dr_definer_mt_set_fc(mt, fc, hl);
+	if (ret) {
+		DR_LOG(ERR, "Failed to set field copy to match template");
+		return ret;
 	}
 
 	return 0;
@@ -1837,8 +1845,8 @@  mlx5dr_definer_best_hl_fit_recu(struct mlx5dr_definer_sel_ctrl *ctrl,
 }
 
 static void
-mlx5dr_definer_apply_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,
-			      struct mlx5dr_definer *definer)
+mlx5dr_definer_copy_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,
+			     struct mlx5dr_definer *definer)
 {
 	memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
 	memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
@@ -1848,7 +1856,7 @@  mlx5dr_definer_apply_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,
 
 static int
 mlx5dr_definer_find_best_hl_fit(struct mlx5dr_context *ctx,
-				struct mlx5dr_match_template *mt,
+				struct mlx5dr_definer *definer,
 				uint8_t *hl)
 {
 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
@@ -1861,8 +1869,8 @@  mlx5dr_definer_find_best_hl_fit(struct mlx5dr_context *ctx,
 
 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
 	if (found) {
-		mlx5dr_definer_apply_sel_ctrl(&ctrl, mt->definer);
-		mt->definer->type = MLX5DR_DEFINER_TYPE_MATCH;
+		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
+		definer->type = MLX5DR_DEFINER_TYPE_MATCH;
 		return 0;
 	}
 
@@ -1875,8 +1883,8 @@  mlx5dr_definer_find_best_hl_fit(struct mlx5dr_context *ctx,
 
 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
 	if (found) {
-		mlx5dr_definer_apply_sel_ctrl(&ctrl, mt->definer);
-		mt->definer->type = MLX5DR_DEFINER_TYPE_JUMBO;
+		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
+		definer->type = MLX5DR_DEFINER_TYPE_JUMBO;
 		return 0;
 	}
 
@@ -1920,114 +1928,189 @@  int mlx5dr_definer_get_id(struct mlx5dr_definer *definer)
 	return definer->obj->id;
 }
 
-int mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
-			   struct mlx5dr_definer *definer_b)
+static int
+mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
+			   struct mlx5dr_definer *match_definer)
 {
-	int i;
+	struct mlx5dr_context *ctx = matcher->tbl->ctx;
+	struct mlx5dr_match_template *mt = matcher->mt;
+	uint8_t *match_hl, *hl_buff;
+	int i, ret;
 
-	if (definer_a->type != definer_b->type)
-		return 1;
+	/* Union header-layout (hl) is used for creating a single definer
+	 * field layout used with different bitmasks for hash and match.
+	 */
+	hl_buff = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));
+	if (!hl_buff) {
+		DR_LOG(ERR, "Failed to allocate memory for header layout");
+		rte_errno = ENOMEM;
+		return rte_errno;
+	}
 
-	for (i = 0; i < BYTE_SELECTORS; i++)
-		if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
-			return 1;
+	match_hl = hl_buff;
 
-	for (i = 0; i < DW_SELECTORS; i++)
-		if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
-			return 1;
+	/* Convert all mt items to header layout (hl)
+	 * and allocate the match field copy array (fc).
+	 */
+	for (i = 0; i < matcher->num_of_mt; i++) {
+		ret = mlx5dr_definer_conv_items_to_hl(ctx, &mt[i], match_hl);
+		if (ret) {
+			DR_LOG(ERR, "Failed to convert items to header layout");
+			goto free_fc;
+		}
+	}
 
-	for (i = 0; i < MLX5DR_JUMBO_TAG_SZ; i++)
-		if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
-			return 1;
+	/* Find the match definer layout for header layout match union */
+	ret = mlx5dr_definer_find_best_hl_fit(ctx, match_definer, match_hl);
+	if (ret) {
+		DR_LOG(ERR, "Failed to create match definer from header layout");
+		goto free_fc;
+	}
 
+	simple_free(hl_buff);
 	return 0;
+
+free_fc:
+	for (i = 0; i < matcher->num_of_mt; i++)
+		if (mt[i].fc)
+			simple_free(mt[i].fc);
+
+	simple_free(hl_buff);
+	return rte_errno;
 }
 
-int mlx5dr_definer_get(struct mlx5dr_context *ctx,
-		       struct mlx5dr_match_template *mt)
+static struct mlx5dr_definer *
+mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
+		     struct mlx5dr_definer_fc *fc,
+		     int fc_sz,
+		     struct rte_flow_item *items,
+		     struct mlx5dr_definer *layout)
 {
 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
-	struct ibv_context *ibv_ctx = ctx->ibv_ctx;
-	uint8_t *hl;
+	struct mlx5dr_definer *definer;
 	int ret;
 
-	if (mt->refcount++)
-		return 0;
-
-	mt->definer = simple_calloc(1, sizeof(*mt->definer));
-	if (!mt->definer) {
+	definer = simple_calloc(1, sizeof(*definer));
+	if (!definer) {
 		DR_LOG(ERR, "Failed to allocate memory for definer");
 		rte_errno = ENOMEM;
-		goto dec_refcount;
-	}
-
-	/* Header layout (hl) holds full bit mask per field */
-	hl = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));
-	if (!hl) {
-		DR_LOG(ERR, "Failed to allocate memory for header layout");
-		rte_errno = ENOMEM;
-		goto free_definer;
+		return NULL;
 	}
 
-	/* Convert items to hl and allocate the field copy array (fc) */
-	ret = mlx5dr_definer_conv_items_to_hl(ctx, mt, hl);
-	if (ret) {
-		DR_LOG(ERR, "Failed to convert items to hl");
-		goto free_hl;
-	}
+	memcpy(definer, layout, sizeof(*definer));
 
-	/* Find the definer for given header layout */
-	ret = mlx5dr_definer_find_best_hl_fit(ctx, mt, hl);
-	if (ret) {
-		DR_LOG(ERR, "Failed to create definer from header layout");
-		goto free_field_copy;
-	}
-
-	/* Align field copy array based on the new definer */
-	ret = mlx5dr_definer_fc_bind(mt->definer,
-				     mt->fc,
-				     mt->fc_sz);
+	/* Align field copy array based on given layout */
+	ret = mlx5dr_definer_fc_bind(definer, fc, fc_sz);
 	if (ret) {
 		DR_LOG(ERR, "Failed to bind field copy to definer");
-		goto free_field_copy;
+		goto free_definer;
 	}
 
 	/* Create the tag mask used for definer creation */
-	mlx5dr_definer_create_tag_mask(mt->items,
-				       mt->fc,
-				       mt->fc_sz,
-				       mt->definer->mask.jumbo);
+	mlx5dr_definer_create_tag_mask(items, fc, fc_sz, definer->mask.jumbo);
 
 	/* Create definer based on the bitmask tag */
-	def_attr.match_mask = mt->definer->mask.jumbo;
-	def_attr.dw_selector = mt->definer->dw_selector;
-	def_attr.byte_selector = mt->definer->byte_selector;
-	mt->definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
-	if (!mt->definer->obj)
-		goto free_field_copy;
+	def_attr.match_mask = definer->mask.jumbo;
+	def_attr.dw_selector = layout->dw_selector;
+	def_attr.byte_selector = layout->byte_selector;
 
-	simple_free(hl);
+	definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
+	if (!definer->obj)
+		goto free_definer;
 
-	return 0;
+	return definer;
 
-free_field_copy:
-	simple_free(mt->fc);
-free_hl:
-	simple_free(hl);
 free_definer:
-	simple_free(mt->definer);
-dec_refcount:
-	mt->refcount--;
+	simple_free(definer);
+	return NULL;
+}
+
+static void
+mlx5dr_definer_free(struct mlx5dr_definer *definer)
+{
+	mlx5dr_cmd_destroy_obj(definer->obj);
+	simple_free(definer);
+}
+
+static int
+mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
+				  struct mlx5dr_matcher *matcher,
+				  struct mlx5dr_definer *match_layout)
+{
+	struct mlx5dr_match_template *mt = matcher->mt;
+	int i;
+
+	/* Create mendatory match definer */
+	for (i = 0; i < matcher->num_of_mt; i++) {
+		mt[i].definer = mlx5dr_definer_alloc(ctx->ibv_ctx,
+						     mt[i].fc,
+						     mt[i].fc_sz,
+						     mt[i].items,
+						     match_layout);
+		if (!mt[i].definer) {
+			DR_LOG(ERR, "Failed to create match definer");
+			goto free_definers;
+		}
+	}
+	return 0;
+
+free_definers:
+	while (i--)
+		mlx5dr_definer_free(mt[i].definer);
 
 	return rte_errno;
 }
 
-void mlx5dr_definer_put(struct mlx5dr_match_template *mt)
+static void
+mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
 {
-	if (--mt->refcount)
+	int i;
+
+	for (i = 0; i < matcher->num_of_mt; i++)
+		mlx5dr_definer_free(matcher->mt[i].definer);
+}
+
+int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
+				struct mlx5dr_matcher *matcher)
+{
+	struct mlx5dr_definer match_layout = {0};
+	int ret, i;
+
+	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLISION)
+		return 0;
+
+	/* Calculate header layout based on matcher items */
+	ret = mlx5dr_definer_calc_layout(matcher, &match_layout);
+	if (ret) {
+		DR_LOG(ERR, "Failed to calculate matcher definer layout");
+		return ret;
+	}
+
+	/* Calculate definers needed for exact match */
+	ret = mlx5dr_definer_matcher_match_init(ctx, matcher, &match_layout);
+	if (ret) {
+		DR_LOG(ERR, "Failed to init match definers");
+		goto free_fc;
+	}
+
+	return 0;
+
+free_fc:
+	for (i = 0; i < matcher->num_of_mt; i++)
+		simple_free(matcher->mt[i].fc);
+
+	return ret;
+}
+
+void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher)
+{
+	int i;
+
+	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLISION)
 		return;
 
-	simple_free(mt->fc);
-	mlx5dr_cmd_destroy_obj(mt->definer->obj);
-	simple_free(mt->definer);
+	mlx5dr_definer_matcher_match_uninit(matcher);
+
+	for (i = 0; i < matcher->num_of_mt; i++)
+		simple_free(matcher->mt[i].fc);
 }
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index bab4baae4a..a14a08838a 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -573,14 +573,11 @@  void mlx5dr_definer_create_tag(const struct rte_flow_item *items,
 			       uint32_t fc_sz,
 			       uint8_t *tag);
 
-int mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
-			   struct mlx5dr_definer *definer_b);
-
 int mlx5dr_definer_get_id(struct mlx5dr_definer *definer);
 
-int mlx5dr_definer_get(struct mlx5dr_context *ctx,
-		       struct mlx5dr_match_template *mt);
+int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
+				struct mlx5dr_matcher *matcher);
 
-void mlx5dr_definer_put(struct mlx5dr_match_template *mt);
+void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher);
 
-#endif /* MLX5DR_DEFINER_H_ */
+#endif
diff --git a/drivers/net/mlx5/hws/mlx5dr_internal.h b/drivers/net/mlx5/hws/mlx5dr_internal.h
index faad2bbd0f..c3c077667d 100644
--- a/drivers/net/mlx5/hws/mlx5dr_internal.h
+++ b/drivers/net/mlx5/hws/mlx5dr_internal.h
@@ -30,12 +30,12 @@ 
 #include "mlx5dr_pool.h"
 #include "mlx5dr_context.h"
 #include "mlx5dr_table.h"
-#include "mlx5dr_matcher.h"
 #include "mlx5dr_send.h"
 #include "mlx5dr_rule.h"
 #include "mlx5dr_cmd.h"
 #include "mlx5dr_action.h"
 #include "mlx5dr_definer.h"
+#include "mlx5dr_matcher.h"
 #include "mlx5dr_debug.h"
 #include "mlx5dr_pat_arg.h"
 
diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.c b/drivers/net/mlx5/hws/mlx5dr_matcher.c
index b8db0a27ae..7e332052b2 100644
--- a/drivers/net/mlx5/hws/mlx5dr_matcher.c
+++ b/drivers/net/mlx5/hws/mlx5dr_matcher.c
@@ -406,6 +406,7 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 {
 	struct mlx5dr_matcher_attr *attr = &matcher->attr;
 	struct mlx5dr_cmd_rtc_create_attr rtc_attr = {0};
+	struct mlx5dr_match_template *mt = matcher->mt;
 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
 	struct mlx5dr_action_default_stc *default_stc;
 	struct mlx5dr_table *tbl = matcher->tbl;
@@ -413,8 +414,6 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 	struct mlx5dr_pool *ste_pool, *stc_pool;
 	struct mlx5dr_devx_obj *devx_obj;
 	struct mlx5dr_pool_chunk *ste;
-	uint8_t first_definer_id;
-	bool is_jumbo;
 	int ret;
 
 	switch (rtc_type) {
@@ -424,19 +423,17 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 		ste_pool = matcher->match_ste.pool;
 		ste = &matcher->match_ste.ste;
 		ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
+
 		rtc_attr.log_size = attr->table.sz_row_log;
 		rtc_attr.log_depth = attr->table.sz_col_log;
+		rtc_attr.is_frst_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
 		rtc_attr.miss_ft_id = matcher->end_ft->id;
 
-		is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt->definer);
-		first_definer_id = mlx5dr_definer_get_id(matcher->mt->definer);
-
 		if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_HASH) {
 			/* The usual Hash Table */
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
 			/* The first match template is used since all share the same definer */
-			rtc_attr.match_definer_0 = first_definer_id;
-			rtc_attr.is_frst_jumbo = is_jumbo;
+			rtc_attr.match_definer_0 = mlx5dr_definer_get_id(mt->definer);
 		} else if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
 			rtc_attr.num_hash_definer = 1;
@@ -444,8 +441,7 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 			if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_HASH) {
 				/* Hash Split Table */
 				rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
-				rtc_attr.match_definer_0 = first_definer_id;
-				rtc_attr.is_frst_jumbo = is_jumbo;
+				rtc_attr.match_definer_0 = mlx5dr_definer_get_id(mt->definer);
 			} else if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR) {
 				/* Linear Lookup Table */
 				rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
@@ -608,7 +604,7 @@  static void mlx5dr_matcher_set_pool_attr(struct mlx5dr_pool_attr *attr,
 
 static int mlx5dr_matcher_bind_at(struct mlx5dr_matcher *matcher)
 {
-	bool is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt->definer);
+	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt);
 	struct mlx5dr_cmd_stc_modify_attr stc_attr = {0};
 	struct mlx5dr_table *tbl = matcher->tbl;
 	struct mlx5dr_pool_attr pool_attr = {0};
@@ -703,34 +699,19 @@  static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher)
 {
 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
 	struct mlx5dr_pool_attr pool_attr = {0};
-	int i, created = 0;
-	int ret = -1;
-
-	for (i = 0; i < matcher->num_of_mt; i++) {
-		/* Get a definer for each match template */
-		ret = mlx5dr_definer_get(ctx, &matcher->mt[i]);
-		if (ret)
-			goto definer_put;
-
-		created++;
-
-		/* Verify all templates produce the same definer */
-		if (i == 0)
-			continue;
+	int ret;
 
-		ret = mlx5dr_definer_compare(matcher->mt[i].definer,
-					     matcher->mt[i - 1].definer);
-		if (ret) {
-			DR_LOG(ERR, "Match templates cannot be used on the same matcher");
-			rte_errno = ENOTSUP;
-			goto definer_put;
-		}
+	/* Calculate match definers */
+	ret = mlx5dr_definer_matcher_init(ctx, matcher);
+	if (ret) {
+		DR_LOG(ERR, "Failed to set matcher templates with match definers");
+		return ret;
 	}
 
 	/* Create an STE pool per matcher*/
+	pool_attr.table_type = matcher->tbl->type;
 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STE;
 	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_MATCHER_STE_POOL;
-	pool_attr.table_type = matcher->tbl->type;
 	pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
 				 matcher->attr.table.sz_row_log;
 	mlx5dr_matcher_set_pool_attr(&pool_attr, matcher);
@@ -738,26 +719,20 @@  static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher)
 	matcher->match_ste.pool = mlx5dr_pool_create(ctx, &pool_attr);
 	if (!matcher->match_ste.pool) {
 		DR_LOG(ERR, "Failed to allocate matcher STE pool");
-		goto definer_put;
+		goto uninit_match_definer;
 	}
 
 	return 0;
 
-definer_put:
-	while (created--)
-		mlx5dr_definer_put(&matcher->mt[created]);
-
+uninit_match_definer:
+	mlx5dr_definer_matcher_uninit(matcher);
 	return ret;
 }
 
 static void mlx5dr_matcher_unbind_mt(struct mlx5dr_matcher *matcher)
 {
-	int i;
-
-	for (i = 0; i < matcher->num_of_mt; i++)
-		mlx5dr_definer_put(&matcher->mt[i]);
-
 	mlx5dr_pool_destroy(matcher->match_ste.pool);
+	mlx5dr_definer_matcher_uninit(matcher);
 }
 
 static int
@@ -958,6 +933,8 @@  mlx5dr_matcher_create_col_matcher(struct mlx5dr_matcher *matcher)
 	col_matcher->num_of_at = matcher->num_of_at;
 	col_matcher->num_of_mt = matcher->num_of_mt;
 	col_matcher->attr.priority = matcher->attr.priority;
+	col_matcher->flags = matcher->flags;
+	col_matcher->flags |= MLX5DR_MATCHER_FLAGS_COLISION;
 	col_matcher->attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
 	col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
 	col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.h b/drivers/net/mlx5/hws/mlx5dr_matcher.h
index b957f5ea4b..4bdb33b11f 100644
--- a/drivers/net/mlx5/hws/mlx5dr_matcher.h
+++ b/drivers/net/mlx5/hws/mlx5dr_matcher.h
@@ -22,15 +22,19 @@ 
 /* Required depth of the main large table */
 #define MLX5DR_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
 
+enum mlx5dr_matcher_flags {
+	MLX5DR_MATCHER_FLAGS_COLISION		= 1 << 0,
+};
+
 struct mlx5dr_match_template {
 	struct rte_flow_item *items;
 	struct mlx5dr_definer *definer;
+	struct mlx5dr_definer *range_definer;
 	struct mlx5dr_definer_fc *fc;
-	uint32_t fc_sz;
+	uint16_t fc_sz;
 	uint64_t item_flags;
 	uint8_t vport_item_id;
 	enum mlx5dr_match_template_flags flags;
-	uint32_t refcount;
 };
 
 struct mlx5dr_matcher_match_ste {
@@ -59,6 +63,8 @@  struct mlx5dr_matcher {
 	uint8_t num_of_mt;
 	struct mlx5dr_action_template *at;
 	uint8_t num_of_at;
+	/* enum mlx5dr_matcher_flags */
+	uint8_t flags;
 	struct mlx5dr_devx_obj *end_ft;
 	struct mlx5dr_matcher *col_matcher;
 	struct mlx5dr_matcher_match_ste match_ste;
@@ -66,6 +72,12 @@  struct mlx5dr_matcher {
 	LIST_ENTRY(mlx5dr_matcher) next;
 };
 
+static inline bool
+mlx5dr_matcher_mt_is_jumbo(struct mlx5dr_match_template *mt)
+{
+	return mlx5dr_definer_is_jumbo(mt->definer);
+}
+
 int mlx5dr_matcher_conv_items_to_prm(uint64_t *match_buf,
 				     struct rte_flow_item *items,
 				     uint8_t *match_criteria,