[v2,13/14] net/mlx5: translate flex item pattern into matcher

Message ID 20211001193415.23288-14-viacheslavo@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series ethdev: introduce configurable flexible item |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Slava Ovsiienko Oct. 1, 2021, 7:34 p.m. UTC
  The matcher is an steering engine entity that represents
the flow pattern to hardware to match. It order to
provide match on the flex item pattern the appropriate
matcher fields should be confgiured with values and masks
accordingly.

The flex item related matcher fields is an array of eight
32-bit fields to match with data captured by sample registers
of confgiured flex parser. One packet field, presented in
item pattern can be split between several sample registers,
and multiple fields can be combined together into single
sample register to optimize hardware resources usage
(number os sample registers is limited), depending on field
modes, widths and offsets. Actual mapping is complicated
and controlled by special translation data, built by PMD
on flex item creation.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h           |   8 ++
 drivers/net/mlx5/mlx5_flow_flex.c | 209 ++++++++++++++++++++++++++++++
 2 files changed, 217 insertions(+)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index d4fa946485..5cca704977 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1871,6 +1871,14 @@  int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
+				   void *matcher, void *key,
+				   const struct rte_flow_item *item);
+int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
+			    struct rte_flow_item_flex_handle *handle,
+			    bool acquire);
+int mlx5_flex_release_index(struct rte_eth_dev *dev, int index);
+
 /* Flex parser list callbacks. */
 struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
 int mlx5_flex_parser_match_cb(void *list_ctx,
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 56b91da839..f695198833 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,6 +113,215 @@  mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
+__rte_always_inline static uint32_t
+mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
+		       uint32_t pos, uint32_t width)
+{
+	const uint8_t *ptr = item->pattern;
+	uint32_t val;
+
+	/* Proceed the bitfield start byte. */
+	MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT);
+	if (item->length <= pos / CHAR_BIT)
+		return 0;
+	val = ptr[pos / CHAR_BIT] >> (pos % CHAR_BIT);
+	if (width <= CHAR_BIT - pos % CHAR_BIT)
+		return val;
+	width -= CHAR_BIT - pos % CHAR_BIT;
+	pos += CHAR_BIT - pos % CHAR_BIT;
+	while (width >= CHAR_BIT) {
+		val <<= CHAR_BIT;
+		if (pos / CHAR_BIT < item->length)
+			val |= ptr[pos / CHAR_BIT];
+		width -= CHAR_BIT;
+		pos += CHAR_BIT;
+	}
+	/* Proceed the bitfield end byte. */
+	if (width) {
+		val <<= width;
+		if (pos / CHAR_BIT < item->length)
+			val |= ptr[pos / CHAR_BIT] & (RTE_BIT32(width) - 1);
+	}
+	return val;
+}
+
+#define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
+	do { \
+		uint32_t tmp, out = (def); \
+		tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
+			       prog_sample_field_value_##x); \
+		tmp = (tmp & ~out) | (msk); \
+		MLX5_SET(fte_match_set_misc4, misc4_m, \
+			 prog_sample_field_value_##x, tmp); \
+		tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
+			       prog_sample_field_value_##x); \
+		tmp = (tmp & ~out) | (val); \
+		MLX5_SET(fte_match_set_misc4, misc4_v, \
+			 prog_sample_field_value_##x, tmp); \
+		tmp = sid; \
+		MLX5_SET(fte_match_set_misc4, misc4_v, \
+			 prog_sample_field_id_##x, tmp);\
+		MLX5_SET(fte_match_set_misc4, misc4_m, \
+			 prog_sample_field_id_##x, tmp); \
+	} while (0)
+
+__rte_always_inline static void
+mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
+			   uint32_t def, uint32_t mask, uint32_t value,
+			   uint32_t sample_id, uint32_t id)
+{
+	switch (id) {
+	case 0:
+		SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
+		break;
+	case 1:
+		SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
+		break;
+	case 2:
+		SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
+		break;
+	case 3:
+		SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
+		break;
+	case 4:
+		SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
+		break;
+	case 5:
+		SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
+		break;
+	case 6:
+		SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
+		break;
+	case 7:
+		SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+#undef SET_FP_MATCH_SAMPLE_ID
+}
+/**
+ * Translate item pattern into matcher fields according to translation
+ * array.
+ *
+ * @param dev
+ *   Ethernet device to translate flex item on.
+ * @param[in, out] matcher
+ *   Flow matcher to confgiure
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+void
+mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
+			      void *matcher, void *key,
+			      const struct rte_flow_item *item)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item_flex *spec, *mask;
+	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
+				     misc_parameters_4);
+	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+	struct mlx5_flex_item *tp;
+	uint32_t i, pos = 0;
+
+	MLX5_ASSERT(item->spec && item->mask);
+	spec = item->spec;
+	mask = item->mask;
+	tp = (struct mlx5_flex_item *)spec->handle;
+	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
+	for (i = 0; i < tp->mapnum; i++) {
+		struct mlx5_flex_pattern_field *map = tp->map + i;
+		uint32_t id = map->reg_id;
+		uint32_t def = (RTE_BIT32(map->width) - 1) << map->shift;
+		uint32_t val = mlx5_flex_get_bitfield(spec, pos, map->width);
+		uint32_t msk = mlx5_flex_get_bitfield(mask, pos, map->width);
+
+		MLX5_ASSERT(map->width);
+		MLX5_ASSERT(id < tp->devx_fp->num_samples);
+		pos += map->width;
+		val <<= map->shift;
+		msk <<= map->shift;
+		mlx5_flex_set_match_sample(misc4_m, misc4_v,
+					   def, msk & def, val & msk & def,
+					   tp->devx_fp->sample_ids[id], id);
+	}
+}
+
+/**
+ * Convert flex item handle (from the RTE flow) to flex item index on port.
+ * Optionally can increment flex item object reference count.
+ *
+ * @param dev
+ *   Ethernet device to acquire flex item on.
+ * @param[in] handle
+ *   Flow item handle from item spec.
+ * @param[in] acquire
+ *   If set - increment reference counter.
+ *
+ * @return
+ *   >=0 - index on success, a negative errno value otherwise
+ *         and rte_errno is set.
+ */
+int
+mlx5_flex_acquire_index(struct rte_eth_dev *dev,
+			struct rte_flow_item_flex_handle *handle,
+			bool acquire)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
+	int ret = mlx5_flex_index(priv, flex);
+
+	if (ret < 0) {
+		errno = -EINVAL;
+		rte_errno = EINVAL;
+		return ret;
+	}
+	if (acquire)
+		__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return ret;
+}
+
+/**
+ * Release flex item index on port - decrements reference counter by index.
+ *
+ * @param dev
+ *   Ethernet device to acquire flex item on.
+ * @param[in] index
+ *   Flow item index.
+ *
+ * @return
+ *   0 - on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_release_index(struct rte_eth_dev *dev,
+			int index)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex;
+
+	if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
+	    !(priv->flex_item_map & (1u << index))) {
+		errno = EINVAL;
+		rte_errno = -EINVAL;
+		return -EINVAL;
+	}
+	flex = priv->flex_item + index;
+	if (flex->refcnt <= 1) {
+		MLX5_ASSERT(false);
+		errno = EINVAL;
+		rte_errno = -EINVAL;
+		return -EINVAL;
+	}
+	__atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return 0;
+}
+
 static int
 mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
 			   const struct rte_flow_item_flex_conf *conf,