diff mbox series

[v2,12/14] net/mlx5: translate flex item configuration

Message ID 20211001193415.23288-13-viacheslavo@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers show
Series ethdev: introduce configurable flexible item | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Viacheslav Ovsiienko Oct. 1, 2021, 7:34 p.m. UTC
RTE Flow flex item configuration should be translated
into actual hardware settings:

  - translate header length and next protocol field samplings
  - translate data field sampling, the similar fields with the
    same mode and matching related parameters are relocated
    and grouped to be covered with minimal amount of hardware
    sampling registers (each register can cover arbitrary
    neighbour 32 bits (aligned to byte boundary) in the packet
    and we can combine the fields with smaller lengths or
    segments of bigger fields)
  - input and output links translation
  - preparing data for parsing flex item pattern on flow creation

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h           |  16 +-
 drivers/net/mlx5/mlx5_flow_flex.c | 748 +++++++++++++++++++++++++++++-
 2 files changed, 762 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 629ff6ebfe..d4fa946485 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -52,6 +52,9 @@ 
 /* Maximal number of flex items created on the port.*/
 #define MLX5_PORT_FLEX_ITEM_NUM			4
 
+/* Maximal number of field/field parts to map into sample registers .*/
+#define MLX5_FLEX_ITEM_MAPPING_NUM		32
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1124,10 +1127,21 @@  struct mlx5_flex_parser_devx {
 	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
 };
 
+/* Pattern field dscriptor - how to translate flex pattern into samples. */
+__extension__
+struct mlx5_flex_pattern_field {
+	uint16_t width:6;
+	uint16_t shift:5;
+	uint16_t reg_id:5;
+};
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
-	uint32_t refcnt; /**< Atomically accessed refcnt by flows. */
+	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+	uint32_t tunnel:1; /* Flex item presents tunnel protocol. */
+	uint32_t mapnum; /* Number of pattern translation entries. */
+	struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
 };
 
 /*
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b8a091e259..56b91da839 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,6 +113,750 @@  mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
+static int
+mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_header;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t len_width;
+
+	if (field->field_base % CHAR_BIT)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "not byte aligned header length field");
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "invalid header length field mode (DUMMY)");
+	case FIELD_MODE_FIXED:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (FIXED)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		if (field->offset_shift < 0 ||
+		    field->offset_shift > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid header length field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "negative header length field base (FIXED)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (OFFSET)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
+		if (field->offset_mask == 0 ||
+		    !rte_is_power_of_2(field->offset_mask + 1))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid length field offset mask (OFFSET)");
+		len_width = rte_fls_u32(field->offset_mask);
+		if (len_width > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field offset mask too wide (OFFSET)");
+		node->header_length_field_mask = field->offset_mask;
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (BITMASK)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
+		node->header_length_field_mask = field->offset_mask;
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown header length field mode");
+	}
+	if (field->field_base / CHAR_BIT >= 0 &&
+	    field->field_base / CHAR_BIT > attr->max_base_header_length)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "header length field base exceeds limit");
+	node->header_length_base_value = field->field_base / CHAR_BIT;
+	if (field->field_mode == FIELD_MODE_OFFSET ||
+	    field->field_mode == FIELD_MODE_BITMASK) {
+		if (field->offset_shift > 15 || field->offset_shift < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field shift exceeeds limit");
+		node->header_length_field_shift	= field->offset_shift;
+		node->header_length_field_offset = field->offset_base;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		if (conf->output_num)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "next protocof field is required (DUMMY)");
+		return 0;
+	case FIELD_MODE_FIXED:
+		break;
+	case FIELD_MODE_OFFSET:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (OFFSET)");
+		break;
+	case FIELD_MODE_BITMASK:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (BITMASK)");
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown next protocol field mode");
+	}
+	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
+	if (attr->max_next_header_offset < field->field_base)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "next protocol field base exceeds limit");
+	if (field->offset_shift)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field shift");
+	node->next_header_field_offset = field->field_base;
+	node->next_header_field_size = field->field_size;
+	return 0;
+}
+
+/* Helper structure to handle field bit intervals. */
+struct mlx5_flex_field_cover {
+	uint16_t num;
+	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
+	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
+	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
+};
+
+static void
+mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
+		       uint16_t num, int32_t start, int32_t end)
+{
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num <= cover->num);
+	if (num < cover->num) {
+		memmove(&cover->start[num + 1],	&cover->start[num],
+			(cover->num - num) * sizeof(int32_t));
+		memmove(&cover->end[num + 1],	&cover->end[num],
+			(cover->num - num) * sizeof(int32_t));
+	}
+	cover->start[num] = start;
+	cover->end[num] = end;
+	cover->num++;
+}
+
+static void
+mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
+{
+	uint32_t i, del = 0;
+	int32_t end;
+
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num < (cover->num - 1));
+	end = cover->end[num];
+	for (i = num + 1; i < cover->num; i++) {
+		if (end < cover->start[i])
+			break;
+		del++;
+		if (end <= cover->end[i]) {
+			cover->end[num] = cover->end[i];
+			break;
+		}
+	}
+	if (del) {
+		MLX5_ASSERT(del < (cover->num - 1u - num));
+		cover->num -= del;
+		MLX5_ASSERT(cover->num > num);
+		if ((cover->num - num) > 1) {
+			memmove(&cover->start[num + 1],
+				&cover->start[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+			memmove(&cover->end[num + 1],
+				&cover->end[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+		}
+	}
+}
+
+/*
+ * Validate the sample field and update interval array
+ * if parameters match with the 'match" field.
+ * Returns:
+ *    < 0  - error
+ *    == 0 - no match, interval array not updated
+ *    > 0  - match, interval array updated
+ */
+static int
+mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
+		       struct rte_flow_item_flex_field *field,
+		       struct rte_flow_item_flex_field *match,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	int32_t start, end;
+	uint32_t i;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return 0;
+	case FIELD_MODE_FIXED:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (FIXED)");
+		if (field->offset_shift)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field base (FIXED)");
+		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "sample field base exceeds limit (FIXED)");
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (OFFSET)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (BITMASK)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown data sample field mode");
+	}
+	if (!match) {
+		if (!field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"zero sample field width");
+		if (field->rss_hash)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"unsupported RSS hash over flex item fields");
+		if (field->tunnel_count != FLEX_TUNNEL_MODE_FIRST &&
+		    field->tunnel_count != FLEX_TUNNEL_MODE_OUTER &&
+		    field->tunnel_count != FLEX_TUNNEL_MODE_INNER)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"unsupported sample field tunnel mode");
+		if (field->field_id)
+			DRV_LOG(DEBUG, "sample field id hint ignored\n");
+	} else {
+		if (field->field_mode != match->field_mode ||
+		    field->rss_hash != match->rss_hash ||
+		    field->tunnel_count != match->tunnel_count ||
+		    field->offset_base | match->offset_base ||
+		    field->offset_mask | match->offset_mask ||
+		    field->offset_shift | match->offset_shift)
+			return 0;
+	}
+	start = field->field_base;
+	end = start + field->field_size;
+	/* Add the new or similar field to interval array. */
+	if (!cover->num) {
+		cover->start[cover->num] = start;
+		cover->end[cover->num] = end;
+		cover->num = 1;
+		return 1;
+	}
+	for (i = 0; i < cover->num; i++) {
+		if (start > cover->end[i]) {
+			if (i >= (cover->num - 1u)) {
+				mlx5_flex_insert_field(cover, cover->num,
+						       start, end);
+				break;
+			}
+			continue;
+		}
+		if (end < cover->start[i]) {
+			mlx5_flex_insert_field(cover, i, start, end);
+			break;
+		}
+		if (start < cover->start[i])
+			cover->start[i] = start;
+		if (end > cover->end[i]) {
+			cover->end[i] = end;
+			if (i < (cover->num - 1u))
+				mlx5_flex_merge_field(cover, i);
+		}
+		break;
+	}
+	return 1;
+}
+
+static void
+mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
+		       struct rte_flow_item_flex_field *field)
+{
+	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
+	na->flow_match_sample_en = 1;
+	switch (field->field_mode) {
+	case FIELD_MODE_FIXED:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	case FIELD_MODE_BITMASK:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+	switch (field->tunnel_count) {
+	case FLEX_TUNNEL_MODE_FIRST:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
+		break;
+	case FLEX_TUNNEL_MODE_OUTER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
+		break;
+	case FLEX_TUNNEL_MODE_INNER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+}
+
+/* Map specified field to set/subset of allocated sample registers. */
+static int
+mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
+		     struct mlx5_flex_parser_devx *parser,
+		     struct mlx5_flex_item *item,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	int32_t start = field->field_base;
+	int32_t end = start + field->field_size;
+	uint32_t i, done_bits = 0;
+
+	mlx5_flex_config_sample(&node, field);
+	for (i = 0; i < parser->num_samples; i++) {
+		struct mlx5_devx_match_sample_attr *sample =
+			&parser->devx_conf.sample[i];
+		int32_t reg_start, reg_end;
+		int32_t cov_start, cov_end;
+		struct mlx5_flex_pattern_field *trans;
+
+		MLX5_ASSERT(sample->flow_match_sample_en);
+		if (!sample->flow_match_sample_en)
+			break;
+		node.flow_match_sample_field_base_offset =
+			sample->flow_match_sample_field_base_offset;
+		if (memcmp(&node, sample, sizeof(node)))
+			continue;
+		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
+		reg_start *= CHAR_BIT;
+		reg_end = reg_start + 32;
+		if (end <= reg_start || start >= reg_end)
+			continue;
+		cov_start = RTE_MAX(reg_start, start);
+		cov_end = RTE_MIN(reg_end, end);
+		MLX5_ASSERT(cov_end > cov_start);
+		done_bits += cov_end - cov_start;
+		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "too many flex item pattern translations");
+		trans = &item->map[item->mapnum];
+		item->mapnum++;
+		trans->reg_id = i;
+		trans->shift = cov_start - reg_start;
+		trans->width = cov_end - cov_start;
+	}
+	if (done_bits != field->field_size) {
+		MLX5_ASSERT(false);
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "failed to map field to sample register");
+	}
+	return 0;
+}
+
+/* Allocate sample registers for the specified field type and interval array. */
+static int
+mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
+		       struct mlx5_flex_parser_devx *parser,
+		       struct rte_flow_item_flex_field *field,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	uint32_t idx = 0;
+
+	mlx5_flex_config_sample(&node, field);
+	while (idx < cover->num) {
+		int32_t start, end;
+
+		/* Sample base offsets are in bytes, should align. */
+		start = RTE_ALIGN_FLOOR(cover->start[idx], CHAR_BIT);
+		node.flow_match_sample_field_base_offset =
+						(start / CHAR_BIT) & 0xFF;
+		/* Allocate sample register. */
+		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+		    parser->num_samples >= attr->max_num_sample ||
+		    parser->num_samples >= attr->max_num_prog_sample)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "no sample registers to handle all flex item fields");
+		parser->devx_conf.sample[parser->num_samples] = node;
+		parser->num_samples++;
+		/* Remove or update covered intervals. */
+		end = start + 32;
+		while (idx < cover->num) {
+			if (end >= cover->end[idx]) {
+				idx++;
+				continue;
+			}
+			if (end > cover->start[idx])
+				cover->start[idx] = end;
+			break;
+		}
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *parser,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_flex_field_cover cover;
+	uint32_t i, j;
+	int ret;
+
+	if (conf->sample_num > MLX5_FLEX_ITEM_MAPPING_NUM)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "sample field number exceeds limit");
+	/*
+	 * The application can specify fields smaller or bigger than 32 bits
+	 * covered with single sample register and it can specify field
+	 * offsets in any order.
+	 *
+	 * Gather all similar fields together, build array of bit intervals
+	 * in asсending order and try to cover with the smallest set of sample
+	 * refgisters.
+	 */
+	memset(&cover, 0, sizeof(cover));
+	for (i = 0; i < conf->sample_num; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		/* Check whether field was covered in the previous iteration. */
+		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
+			continue;
+		if (fl->field_mode == FIELD_MODE_DUMMY)
+			continue;
+		/* Build an interval array for the field and similar ones */
+		cover.num = 0;
+		/* Add the first field to array unconditionally. */
+		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
+		if (ret < 0)
+			return ret;
+		MLX5_ASSERT(ret > 0);
+		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
+		for (j = i + 1; j < conf->sample_num; j++) {
+			struct rte_flow_item_flex_field *ft;
+
+			/* Add field to array if its type matches. */
+			ft = conf->sample_data + j;
+			ret = mlx5_flex_cover_sample(&cover, ft, fl,
+						     attr, error);
+			if (ret < 0)
+				return ret;
+			if (!ret)
+				continue;
+			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
+		}
+		/* Allocate sample registers to cover array of intervals. */
+		ret = mlx5_flex_alloc_sample(&cover, parser, fl, attr, error);
+		if (ret)
+			return ret;
+	}
+	/* Build the item pattern translating data on flow creation. */
+	item->mapnum = 0;
+	memset(&item->map, 0, sizeof(item->map));
+	for (i = 0; i < conf->sample_num; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		ret = mlx5_flex_map_sample(fl, parser, item, error);
+		if (ret) {
+			MLX5_ASSERT(false);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
+{
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		return  MLX5_GRAPH_ARC_NODE_MAC;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		return MLX5_GRAPH_ARC_NODE_UDP;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		return MLX5_GRAPH_ARC_NODE_TCP;
+	case RTE_FLOW_ITEM_TYPE_MPLS:
+		return MLX5_GRAPH_ARC_NODE_MPLS;
+	case RTE_FLOW_ITEM_TYPE_GRE:
+		return MLX5_GRAPH_ARC_NODE_GRE;
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		return MLX5_GRAPH_ARC_NODE_GENEVE;
+	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int
+mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.ether_type);
+}
+
+static int
+mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.dst_port);
+}
+
+static int
+mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t i;
+
+	RTE_SET_USED(item);
+	if (conf->input_num > attr->max_num_arc_in)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many input links");
+	for (i = 0; i < conf->input_num; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
+		struct rte_flow_item_flex_link *link = conf->input_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+		int ret;
+
+		if (!rte_item->spec || !rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid flex item IN arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, true);
+		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = link->tunnel ? 1 : 0;
+		/*
+		 * Configure arc IN condition value. The value location depends
+		 * on protocol. Current FW version supports IP & UDP for IN
+		 * arcs only, and locations for these protocols are defined.
+		 * Add more protocols when available.
+		 */
+		switch (rte_item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			ret = mlx5_flex_arc_in_eth(rte_item, error);
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			ret = mlx5_flex_arc_in_udp(rte_item, error);
+			break;
+		default:
+			MLX5_ASSERT(false);
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		}
+		if (ret < 0)
+			return ret;
+		arc->compare_condition_value = (uint16_t)ret;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
+			    const struct rte_flow_item_flex_conf *conf,
+			    struct mlx5_flex_parser_devx *devx,
+			    struct mlx5_flex_item *item,
+			    struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t i;
+
+	if (conf->output_num > attr->max_num_arc_out)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many output links");
+	for (i = 0; i < conf->output_num; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
+		struct rte_flow_item_flex_link *link = conf->output_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+
+		if (rte_item->spec || rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "flex node: invalid OUT arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, false);
+		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item OUT arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = link->tunnel ? 1 : 0;
+		arc->compare_condition_value = link->next;
+		if (link->tunnel)
+			item->tunnel = 1;
+	}
+	return 0;
+}
+
+/* Translate RTE flex item API configuration into flaex parser settings. */
+static int
+mlx5_flex_translate_conf(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct mlx5_flex_item *item,
+			 struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
+	int ret;
+
+	ret = mlx5_flex_translate_length(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_next(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	return 0;
+}
+
 /**
  * Create the flex item with specified configuration over the Ethernet device.
  *
@@ -145,6 +889,8 @@  flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
+		goto error;
 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
 	if (!ent) {
 		rte_flow_error_set(error, ENOMEM,
@@ -153,7 +899,6 @@  flow_dv_item_create(struct rte_eth_dev *dev,
 		goto error;
 	}
 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
-	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
@@ -278,6 +1023,7 @@  mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
 	RTE_SET_USED(list_ctx);
 	MLX5_ASSERT(fp->devx_obj);
 	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed\n", (const void *)fp);
 	mlx5_free(entry);
 }