@@ -173,6 +173,10 @@ enum mlx5_feature_name {
/* Conntrack item. */
#define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 35)
+/* Flex item */
+#define MLX5_FLOW_ITEM_FLEX (UINT64_C(1) << 36)
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 37)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -187,7 +191,8 @@ enum mlx5_feature_name {
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
- MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
@@ -686,6 +691,7 @@ struct mlx5_flow_handle {
uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
uint32_t mark:1; /**< Metadate rxq mark flag. */
uint32_t fate_action:3; /**< Fate action type. */
+ uint32_t flex_item; /**< referenced Flex Item bitmask. */
union {
uint32_t rix_hrxq; /**< Hash Rx queue object index. */
uint32_t rix_jump; /**< Index to the jump action resource. */
@@ -6825,6 +6825,38 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
return 0;
}
+static int
+flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t *last_item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_flex *flow_spec = item->spec;
+ const struct rte_flow_item_flex *flow_mask = item->mask;
+ struct mlx5_flex_item *flex;
+
+ if (!flow_spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item spec cannot be NULL");
+ if (!flow_mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item mask cannot be NULL");
+ if (item->last)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "flex flow item last not supported");
+ if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "invalid flex flow item handle");
+ flex = (struct mlx5_flex_item *)flow_spec->handle;
+ *last_item = flex->tunnel ? MLX5_FLOW_ITEM_FLEX_TUNNEL :
+ MLX5_FLOW_ITEM_FLEX;
+ return 0;
+}
+
/**
* Internal validation function. For validating both actions and items.
*
@@ -7266,6 +7298,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
* list it here as a supported type
*/
break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ if (item_flags & MLX5_FLOW_ITEM_FLEX)
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple flex items not supported");
+ ret = flow_dv_validate_item_flex(dev, items,
+ &last_item, error);
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -10123,6 +10163,25 @@ flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
reg_value, reg_mask);
}
+static void
+flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+ const struct rte_flow_item *item,
+ struct mlx5_flow *dev_flow)
+{
+ const struct rte_flow_item_flex *spec =
+ (const struct rte_flow_item_flex *)item->spec;
+ int index = mlx5_flex_acquire_index(dev, spec->handle, false);
+
+ MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+ if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
+ /* Don't count both inner and outer flex items in one rule. */
+ if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+ MLX5_ASSERT(false);
+ dev_flow->handle->flex_item |= RTE_BIT32(index);
+ }
+ mlx5_flex_flow_translate_item(dev, matcher, key, item);
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
@@ -13520,6 +13579,10 @@ flow_dv_translate(struct rte_eth_dev *dev,
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ flow_dv_translate_item_flex(dev, match_mask,
+ match_value, items,
+ dev_flow);
default:
break;
}
@@ -14393,6 +14456,12 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
if (!dev_handle)
return;
flow->dev_handles = dev_handle->next.next;
+ while (dev_handle->flex_item) {
+ int index = rte_bsf32(dev_handle->flex_item);
+
+ mlx5_flex_release_index(dev, index);
+ dev_handle->flex_item &= ~RTE_BIT32(index);
+ }
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
if (dev_handle->dvh.rix_sample)
@@ -18014,5 +18083,6 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.item_create = flow_dv_item_create,
.item_release = flow_dv_item_release,
};
+
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
@@ -1164,8 +1164,8 @@ flow_dv_item_release(struct rte_eth_dev *dev,
&flex->devx_fp->entry);
flex->devx_fp = NULL;
mlx5_flex_free(priv, flex);
- if (rc)
- return rte_flow_error_set(error, rc,
+ if (rc < 0)
+ return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
"flex item release failure");
return 0;