[RFC,6/9] net/mlx5/hws: add hws flex item matching support

Message ID 20221221073918.3581151-7-rongweil@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series add hws flex item matching and modify field |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Rongwei Liu Dec. 21, 2022, 7:39 a.m. UTC
  Support flex item matching in hws and syntax follows
sws exactly.

Flex item should be created in advance and follow current
json mapping logic.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
---
 doc/guides/nics/features/mlx5.ini     |   1 +
 doc/guides/nics/mlx5.rst              |   1 +
 drivers/common/mlx5/mlx5_devx_cmds.c  |  14 ++-
 drivers/common/mlx5/mlx5_devx_cmds.h  |   7 +-
 drivers/common/mlx5/mlx5_prm.h        |  22 +++-
 drivers/net/mlx5/hws/mlx5dr_definer.c | 141 ++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5.c               |  17 +++-
 drivers/net/mlx5/mlx5.h               |   8 +-
 drivers/net/mlx5/mlx5_flow.h          |   1 +
 drivers/net/mlx5/mlx5_flow_flex.c     |  83 +++++++++++----
 drivers/net/mlx5/mlx5_flow_hw.c       |  48 ++++++++-
 11 files changed, 310 insertions(+), 33 deletions(-)
  

Patch

diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 62fd330e2b..135b5c035d 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -87,6 +87,7 @@  vlan                 = Y
 vxlan                = Y
 vxlan_gpe            = Y
 represented_port     = Y
+flex item            = Y
 
 [rte_flow actions]
 age                  = I
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 85a2b422c5..e442f9c015 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -106,6 +106,7 @@  Features
 - Sub-Function representors.
 - Sub-Function.
 - Matching on represented port.
+- Matching on flex item with specific pattern.
 
 
 Limitations
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 9e0b26fa11..deda33032c 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -607,7 +607,8 @@  mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
 
 int
 mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
-				  uint32_t ids[], uint32_t num)
+				  struct mlx5_ext_sample_id ids[],
+				  uint32_t num,  uint8_t *anchor)
 {
 	uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
 	uint32_t out[MLX5_ST_SZ_DW(create_flex_parser_out)] = {0};
@@ -636,6 +637,7 @@  mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
 			(void *)flex_obj);
 		return -rte_errno;
 	}
+	*anchor = MLX5_GET(parse_graph_flex, flex, head_anchor_id);
 	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
 		void *s_off = (void *)((char *)sample + i *
 			      MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
@@ -645,8 +647,8 @@  mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
 			      flow_match_sample_en);
 		if (!en)
 			continue;
-		ids[idx++] = MLX5_GET(parse_graph_flow_match_sample, s_off,
-				  flow_match_sample_field_id);
+		ids[idx++].id = MLX5_GET(parse_graph_flow_match_sample, s_off,
+					 flow_match_sample_field_id);
 	}
 	if (num != idx) {
 		rte_errno = EINVAL;
@@ -794,6 +796,12 @@  mlx5_devx_cmd_query_hca_parse_graph_node_cap
 					 max_num_arc_out);
 	attr->max_num_sample = MLX5_GET(parse_graph_node_cap, hcattr,
 					max_num_sample);
+	attr->anchor_en = MLX5_GET(parse_graph_node_cap, hcattr, anchor_en);
+	attr->ext_sample_id = MLX5_GET(parse_graph_node_cap, hcattr, ext_sample_id);
+	attr->sample_tunnel_inner2 = MLX5_GET(parse_graph_node_cap, hcattr,
+					      sample_tunnel_inner2);
+	attr->zero_size_supported = MLX5_GET(parse_graph_node_cap, hcattr,
+					     zero_size_supported);
 	attr->sample_id_in_out = MLX5_GET(parse_graph_node_cap, hcattr,
 					  sample_id_in_out);
 	attr->max_base_header_length = MLX5_GET(parse_graph_node_cap, hcattr,
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 1c86426e71..eff5a31b2e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -114,6 +114,10 @@  struct mlx5_hca_flex_attr {
 	uint8_t  max_num_arc_out;
 	uint8_t  max_num_sample;
 	uint8_t  max_num_prog_sample:5;	/* From HCA CAP 2 */
+	uint8_t  anchor_en:1;
+	uint8_t  ext_sample_id:1;
+	uint8_t  sample_tunnel_inner2:1;
+	uint8_t  zero_size_supported:1;
 	uint8_t  sample_id_in_out:1;
 	uint16_t max_base_header_length;
 	uint8_t  max_sample_base_offset;
@@ -736,7 +740,8 @@  int mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj *tir,
 			     struct mlx5_devx_modify_tir_attr *tir_attr);
 __rte_internal
 int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
-				      uint32_t ids[], uint32_t num);
+				      struct mlx5_ext_sample_id ids[],
+				      uint32_t num, uint8_t *anchor);
 
 __rte_internal
 struct mlx5_devx_obj *
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 5b84657e08..97bc1eac21 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1899,7 +1899,11 @@  struct mlx5_ifc_parse_graph_node_cap_bits {
 	u8 max_num_arc_in[0x08];
 	u8 max_num_arc_out[0x08];
 	u8 max_num_sample[0x08];
-	u8 reserved_at_78[0x07];
+	u8 reserved_at_78[0x3];
+	u8 anchor_en[0x1];
+	u8 ext_sample_id[0x1];
+	u8 sample_tunnel_inner2[0x1];
+	u8 zero_size_supported[0x1];
 	u8 sample_id_in_out[0x1];
 	u8 max_base_header_length[0x10];
 	u8 reserved_at_90[0x08];
@@ -1909,6 +1913,18 @@  struct mlx5_ifc_parse_graph_node_cap_bits {
 	u8 header_length_mask_width[0x08];
 };
 
+/* ext_sample_id structure, see PRM Table 539. */
+struct mlx5_ext_sample_id {
+	union {
+			struct {
+				uint32_t format_select_dw:8;
+				uint32_t modify_field_id:12;
+				uint32_t sample_id:12;
+			};
+			uint32_t id;
+	};
+};
+
 struct mlx5_ifc_flow_table_prop_layout_bits {
 	u8 ft_support[0x1];
 	u8 flow_tag[0x1];
@@ -4574,7 +4590,9 @@  struct mlx5_ifc_parse_graph_flex_bits {
 	u8 header_length_mode[0x4];
 	u8 header_length_field_offset[0x10];
 	u8 next_header_field_offset[0x10];
-	u8 reserved_at_160[0x1b];
+	u8 reserved_at_160[0x12];
+	u8 head_anchor_id[0x6];
+	u8 reserved_at_178[0x3];
 	u8 next_header_field_size[0x5];
 	u8 header_length_field_mask[0x20];
 	u8 reserved_at_224[0x20];
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index ced1b61b72..e9f3bbf55b 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -299,6 +299,57 @@  mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);
 }
 
+static uint32_t
+mlx5dr_definer_flex_parser_common(const struct mlx5dr_definer_fc *fc,
+				  const struct rte_flow_item_flex *flex,
+				  bool is_mask)
+{
+	struct mlx5_flex_item *tp = (struct mlx5_flex_item *)flex->handle;
+	struct mlx5_flex_pattern_field *map;
+	uint32_t i, val, pos, def;
+	int id;
+
+	tp = (struct mlx5_flex_item *)flex->handle;
+	for (i = 0, pos = 0, val = 0; i < tp->mapnum && pos < flex->length * CHAR_BIT; i++) {
+		map = tp->map + i;
+		id = mlx5_flex_get_sample_id(tp, i, &pos, fc->bit_off, &def);
+		if (id == -1)
+			continue;
+		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return -1;
+		if (tp->devx_fp->sample_ids[id].format_select_dw * 4 == fc->byte_off) {
+			val |= RTE_BE32(mlx5_flex_get_bitfield(flex, pos, map->width, map->shift)) &
+			       (is_mask ? def : UINT32_MAX);
+		}
+		pos += map->width;
+	}
+	return val;
+}
+
+static void
+mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
+			       const void *item,
+			       uint8_t *tag)
+{
+	uint32_t val;
+
+	val = mlx5dr_definer_flex_parser_common(fc, item, false);
+	DR_SET_BE32(tag, (val & fc->bit_mask), fc->byte_off, 0, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_flex_parser_mask_set(struct mlx5dr_definer_fc *fc,
+				    const void *item,
+				    uint8_t *tag)
+{
+	uint32_t mask;
+
+	mask = mlx5dr_definer_flex_parser_common(fc, item, true);
+	DR_SET_BE32(tag, (mask), fc->byte_off, 0, UINT32_MAX);
+	fc->bit_mask = mask;
+}
+
 static void
 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
 			   const void *item_spec,
@@ -1674,6 +1725,91 @@  mlx5dr_definer_conv_item_ipsec_syndrome(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static struct mlx5dr_definer_fc *
+mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd,
+				  struct mlx5_ext_sample_id reg, int item_idx)
+{
+	enum mlx5dr_definer_fname i = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+	struct mlx5dr_definer_fc *fc;
+
+	for (; i <= MLX5DR_DEFINER_FNAME_FLEX_PARSER_7; i++) {
+		fc = &cd->fc[i];
+		switch (i) {
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_0:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_0);
+			break;
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_1:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_1);
+			break;
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_2:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_2);
+			break;
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_3:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_3);
+			break;
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_4:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_4);
+			break;
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_5:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_5);
+			break;
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_6:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_6);
+			break;
+		case MLX5DR_DEFINER_FNAME_FLEX_PARSER_7:
+		default:
+			DR_CALC_SET_HDR(fc, flex_parser, flex_parser_7);
+			break;
+		}
+		if (fc->byte_off == reg.format_select_dw * 4)
+			break;
+	}
+	if (i > MLX5DR_DEFINER_FNAME_FLEX_PARSER_7) {
+		rte_errno = ENOTSUP;
+		return NULL;
+	}
+	fc->item_idx = item_idx;
+	fc->tag_set = &mlx5dr_definer_flex_parser_set;
+	fc->fname = i;
+	fc->tag_mask_set = &mlx5dr_definer_flex_parser_mask_set;
+	fc->bit_off = cd->tunnel;
+	return fc;
+}
+
+static int
+mlx5dr_definer_conv_item_flex(struct mlx5dr_definer_conv_data *cd,
+			      struct rte_flow_item *item,
+			      int item_idx)
+{
+	const struct rte_flow_item_flex *v, *m;
+	struct mlx5_flex_pattern_field *map;
+	bool is_inner = cd->tunnel;
+	struct mlx5_flex_item *tp;
+	uint32_t i, mask, def;
+	uint32_t pos;
+	int id;
+
+	MLX5_ASSERT(item->spec && item->mask);
+	m = item->mask;
+	v = item->spec;
+	tp = (struct mlx5_flex_item *)v->handle;
+	for (i = 0, pos = 0; i < tp->mapnum && pos < m->length * CHAR_BIT; i++) {
+		map = tp->map + i;
+		id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
+		if (id == -1)
+			continue;
+		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return -1;
+		mask = mlx5_flex_get_bitfield(m, pos, map->width, map->shift);
+		if (def & RTE_BE32(mask) &&
+		    !mlx5dr_definer_get_flex_parser_fc(cd, tp->devx_fp->sample_ids[id], item_idx))
+			return rte_errno;
+		pos += map->width;
+	}
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1807,6 +1943,11 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_ipsec_syndrome(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_IPSEC_SYNDROME;
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			ret = mlx5dr_definer_conv_item_flex(&cd, items, i);
+			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+				      MLX5_FLOW_ITEM_OUTER_FLEX;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index fe9897f83d..0791a6a155 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -974,11 +974,13 @@  int
 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
 	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
 	struct mlx5_devx_graph_node_attr node = {
 		.modify_field_select = 0,
 	};
-	uint32_t ids[8];
+	struct mlx5_ext_sample_id ids[8];
+	uint8_t anchor_id;
 	int ret;
 
 	if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
@@ -1014,15 +1016,20 @@  mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
 	}
 	prf->num = 2;
-	ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num);
+	ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num, &anchor_id);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to query sample IDs.");
 		return (rte_errno == 0) ? -ENODEV : -rte_errno;
 	}
 	prf->offset[0] = 0x0;
 	prf->offset[1] = sizeof(uint32_t);
-	prf->ids[0] = ids[0];
-	prf->ids[1] = ids[1];
+	if (attr->ext_sample_id) {
+		prf->ids[0] = ids[0].sample_id;
+		prf->ids[1] = ids[1].sample_id;
+	} else {
+		prf->ids[0] = ids[0].id;
+		prf->ids[1] = ids[1].id;
+	}
 	return 0;
 }
 
@@ -1037,7 +1044,7 @@  static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
+	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
 	if (prf->obj)
 		mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 761b5ac572..86a4c0a457 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1327,9 +1327,10 @@  struct mlx5_lag {
 struct mlx5_flex_parser_devx {
 	struct mlx5_list_entry entry;  /* List element at the beginning. */
 	uint32_t num_samples;
+	uint8_t anchor_id;
 	void *devx_obj;
 	struct mlx5_devx_graph_node_attr devx_conf;
-	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+	struct mlx5_ext_sample_id sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
 };
 
 /* Pattern field descriptor - how to translate flex pattern into samples. */
@@ -2346,6 +2347,11 @@  void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
 				   void *key, const struct rte_flow_item *item,
 				   bool is_inner);
+int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			    uint32_t idx, uint32_t *pos,
+			    bool is_inner, uint32_t *def);
+uint32_t mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
+				uint32_t pos, uint32_t width, uint32_t shift);
 int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
 			    struct rte_flow_item_flex_handle *handle,
 			    bool acquire);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 7148c10e96..82b5a4a81f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1280,6 +1280,7 @@  struct rte_flow_pattern_template {
 	 * tag pattern item for representor matching.
 	 */
 	bool implicit_tag;
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Flow action template struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index bec07b13c1..affec62384 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,7 +113,7 @@  mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
-static uint32_t
+uint32_t
 mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
 		       uint32_t pos, uint32_t width, uint32_t shift)
 {
@@ -198,6 +198,50 @@  mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
 	}
 #undef SET_FP_MATCH_SAMPLE_ID
 }
+
+/**
+ * Get the flex parser sample id and corresponding mask
+ * per shift and width information.
+ *
+ * @param[in] tp
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] idx
+ *   Mapping index.
+ * @param[in, out] pos
+ *   Where to search the value and mask.
+ * @param[in] is_inner
+ *   For inner matching or not.
+ * @param[in, def] def
+ *   Mask generated by mapping shift and width.
+ *
+ * @return
+ *   0 on success, -1 to ignore.
+ */
+int
+mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			uint32_t idx, uint32_t *pos,
+			bool is_inner, uint32_t *def)
+{
+	const struct mlx5_flex_pattern_field *map = tp->map + idx;
+	uint32_t id = map->reg_id;
+
+	*def = (RTE_BIT64(map->width) - 1) << map->shift;
+	/* Skip placeholders for DUMMY fields. */
+	if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+		*pos += map->width;
+		return -1;
+	}
+	MLX5_ASSERT(map->width);
+	MLX5_ASSERT(id < tp->devx_fp->num_samples);
+	if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+		uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+		MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+		MLX5_ASSERT(id < num_samples);
+		id += num_samples;
+	}
+	return id;
+}
 /**
  * Translate item pattern into matcher fields according to translation
  * array.
@@ -226,40 +270,38 @@  mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
 	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
 				     misc_parameters_4);
 	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
 	struct mlx5_flex_item *tp;
 	uint32_t i, pos = 0;
+	uint32_t sample_id;
 
 	RTE_SET_USED(dev);
 	MLX5_ASSERT(item->spec && item->mask);
 	spec = item->spec;
 	mask = item->mask;
 	tp = (struct mlx5_flex_item *)spec->handle;
-	MLX5_ASSERT(mlx5_flex_index(dev->data->dev_private, tp) >= 0);
+	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
 	for (i = 0; i < tp->mapnum; i++) {
 		struct mlx5_flex_pattern_field *map = tp->map + i;
-		uint32_t id = map->reg_id;
-		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
-		uint32_t val, msk;
+		uint32_t val, msk, def;
+		int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
 
-		/* Skip placeholders for DUMMY fields. */
-		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
-			pos += map->width;
+		if (id == -1)
 			continue;
-		}
+		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+		if (id >= (int)tp->devx_fp->num_samples ||
+		    id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
-		MLX5_ASSERT(map->width);
-		MLX5_ASSERT(id < tp->devx_fp->num_samples);
-		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
-			uint32_t num_samples = tp->devx_fp->num_samples / 2;
-
-			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
-			MLX5_ASSERT(id < num_samples);
-			id += num_samples;
-		}
+		if (attr->ext_sample_id)
+			sample_id = tp->devx_fp->sample_ids[id].sample_id;
+		else
+			sample_id = tp->devx_fp->sample_ids[id].id;
 		mlx5_flex_set_match_sample(misc4_m, misc4_v,
 					   def, msk & def, val & msk & def,
-					   tp->devx_fp->sample_ids[id], id);
+					   sample_id, id);
 		pos += map->width;
 	}
 }
@@ -1317,7 +1359,8 @@  mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
 	/* Query the firmware assigned sample ids. */
 	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
 						fp->sample_ids,
-						fp->num_samples);
+						fp->num_samples,
+						&fp->anchor_id);
 	if (ret)
 		goto error;
 	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b9c7459646..56f1e699fa 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4649,6 +4649,36 @@  flow_hw_actions_template_replace_container(const
 	}                                                             \
 })
 
+static int
+flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
+			  struct rte_flow_item_flex_handle *handle,
+			  uint8_t *flex_item)
+{
+	int index = mlx5_flex_acquire_index(dev, handle, false);
+
+	MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+	if (index < 0)
+		return -1;
+	if (!(*flex_item & RTE_BIT32(index))) {
+		/* Don't count same flex item again. */
+		if (mlx5_flex_acquire_index(dev, handle, true) != index)
+			MLX5_ASSERT(false);
+		*flex_item |= (uint8_t)RTE_BIT32(index);
+	}
+	return 0;
+}
+
+static void
+flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
+{
+	while (*flex_item) {
+		int index = rte_bsf32(*flex_item);
+
+		mlx5_flex_release_index(dev, index);
+		*flex_item &= ~(uint8_t)RTE_BIT32(index);
+	}
+}
+
 /**
  * Create flow action template.
  *
@@ -5037,6 +5067,7 @@  flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_QUOTA:
 		case RTE_FLOW_ITEM_TYPE_ESP:
 		case MLX5_FLOW_ITEM_TYPE_IPSEC_SYNDROME:
+		case RTE_FLOW_ITEM_TYPE_FLEX:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
@@ -5114,6 +5145,7 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		.mask = &tag_m,
 		.last = NULL
 	};
+	unsigned int i = 0;
 
 	if (flow_hw_pattern_validate(dev, attr, items, error))
 		return NULL;
@@ -5173,6 +5205,19 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			it->implicit_tag = true;
 		mlx5_free(copied_items);
 	}
+	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
+		if (items[i].type == RTE_FLOW_ITEM_TYPE_FLEX) {
+			const struct rte_flow_item_flex *spec =
+				(const struct rte_flow_item_flex *)items[i].spec;
+			struct rte_flow_item_flex_handle *handle = spec->handle;
+
+			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
+				claim_zero(mlx5dr_match_template_destroy(it->mt));
+				mlx5_free(it);
+				return NULL;
+			}
+		}
+	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
@@ -5192,7 +5237,7 @@  flow_hw_pattern_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
 			      struct rte_flow_pattern_template *template,
 			      struct rte_flow_error *error __rte_unused)
 {
@@ -5205,6 +5250,7 @@  flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				   "item template in using");
 	}
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	claim_zero(mlx5dr_match_template_destroy(template->mt));
 	mlx5_free(template);
 	return 0;