@@ -1027,6 +1027,9 @@ struct mlx5_devx_obj *
attr->flow.tunnel_header_0_1 = MLX5_GET
(flow_table_nic_cap, hcattr,
ft_field_support_2_nic_receive.tunnel_header_0_1);
+ attr->flow.tunnel_header_2_3 = MLX5_GET
+ (flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.tunnel_header_2_3);
attr->pkt_integrity_match = mlx5_devx_query_pkt_integrity_match(hcattr);
attr->inner_ipv4_ihl = MLX5_GET
(flow_table_nic_cap, hcattr,
@@ -1385,6 +1385,8 @@
}
if (config->hca_attr.flow.tunnel_header_0_1)
sh->tunnel_header_0_1 = 1;
+ if (config->hca_attr.flow.tunnel_header_2_3)
+ sh->tunnel_header_2_3 = 1;
#endif
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
if (config->hca_attr.flow_hit_aso &&
@@ -1150,6 +1150,7 @@ struct mlx5_dev_ctx_shared {
uint32_t meter_aso_en:1; /* Flow Meter ASO is supported. */
uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
+ uint32_t tunnel_header_2_3:1; /* tunnel_header_2_3 is supported. */
uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */
uint32_t reclaim_mode:1; /* Reclaim memory. */
uint32_t dr_drop_action_en:1; /* Use DR drop action. */
@@ -2802,6 +2802,100 @@ struct mlx5_flow_tunnel_info {
}
/**
+ * Validate GRE optional item.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit flags to mark detected items.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] gre_item
+ * Pointer to gre_item
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *gre_item,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_gre *gre_spec = gre_item->spec;
+ const struct rte_flow_item_gre *gre_mask = gre_item->mask;
+ const struct rte_gre_hdr_option *spec = item->spec;
+ const struct rte_gre_hdr_option *mask = item->mask;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
+
+ if (!(item_flags & MLX5_FLOW_LAYER_GRE))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "No preceding GRE header");
+ if (item_flags & MLX5_FLOW_LAYER_INNER)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GRE option following a wrong item");
+ if (!gre_mask)
+ gre_mask = &rte_flow_item_gre_mask;
+
+ struct rte_gre_hdr_option gre_option_default_mask = {
+ .checksum = 0xffff,
+ .key = 0xffffffff,
+ .sequence = 0xffffffff,
+ };
+
+ if (!mask)
+ mask = &gre_option_default_mask;
+
+ if (spec && mask->checksum)
+ if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x8000)) &&
+ !(gre_spec->c_rsvd0_ver & RTE_BE16(0x8000)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Checksum bit must be on");
+
+ if (spec && mask->key)
+ if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
+ !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Key bit must be on");
+
+ if (spec && mask->sequence)
+ if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x1000)) &&
+ !(gre_spec->c_rsvd0_ver & RTE_BE16(0x1000)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Sequence bit must be on");
+
+ if (spec && (mask->checksum || mask->sequence)) {
+ if (priv->sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
+ ((attr->group || attr->transfer) && !priv->sh->misc5_cap) ||
+ (!(priv->sh->tunnel_header_0_1 && priv->sh->tunnel_header_2_3) &&
+ !attr->group && !attr->transfer))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Checksum/Sequence not supported");
+ }
+
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&gre_option_default_mask,
+ sizeof(struct rte_gre_hdr_option), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
+
+ return ret;
+}
+
+/**
* Validate GRE item.
*
* @param[in] item
@@ -1523,6 +1523,12 @@ int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
uint64_t item_flags,
const struct rte_flow_item *gre_item,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ uint64_t item_flags,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *gre_item,
+ struct rte_flow_error *error);
int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
uint64_t item_flags,
uint64_t last_item,
@@ -7112,6 +7112,13 @@ struct mlx5_list_entry *
gre_item = items;
last_item = MLX5_FLOW_LAYER_GRE;
break;
+ case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ ret = mlx5_flow_validate_item_gre_option(dev, items, item_flags,
+ attr, gre_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GRE;
+ break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
ret = mlx5_flow_validate_item_nvgre(items, item_flags,
next_protocol,
@@ -8833,6 +8840,122 @@ struct mlx5_list_entry *
}
/**
+ * Add GRE optional items to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] gre_item
+ * Pointer to gre_item.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
+ */
+static void
+flow_dv_translate_item_gre_option(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ const struct rte_flow_item *gre_item,
+ uint64_t pattern_flags)
+{
+ void *misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
+ void *misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
+ const struct rte_gre_hdr_option *option_m = item->mask;
+ const struct rte_gre_hdr_option *option_v = item->spec;
+ const struct rte_flow_item_gre *gre_m = gre_item->mask;
+ const struct rte_flow_item_gre *gre_v = gre_item->spec;
+ static const struct rte_flow_item_gre empty_gre = {0};
+ struct rte_flow_item gre_key_item;
+ uint16_t c_rsvd0_ver_m, c_rsvd0_ver_v;
+ uint16_t protocol_m, protocol_v;
+ uint32_t *tunnel_header_v[4];
+ uint32_t *tunnel_header_m[4];
+
+ struct rte_gre_hdr_option gre_option_default_mask = {
+ .checksum = 0xffff,
+ .key = 0xffffffff,
+ .sequence = 0xffffffff,
+ };
+
+ if (!option_v)
+ return;
+ if (!option_m)
+ option_m = &gre_option_default_mask;
+
+ if (!(option_m->sequence || option_m->checksum)) {
+ flow_dv_translate_item_gre(matcher, key, gre_item, pattern_flags);
+ gre_key_item.spec = &option_v->key;
+ gre_key_item.mask = &option_m->key;
+ flow_dv_translate_item_gre_key(matcher, key, &gre_key_item);
+ return;
+ }
+
+ if (!gre_v) {
+ gre_v = &empty_gre;
+ gre_m = &empty_gre;
+ } else {
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ }
+
+ tunnel_header_v[0] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_v, tunnel_header_0);
+ tunnel_header_m[0] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_m, tunnel_header_0);
+ tunnel_header_v[1] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_v, tunnel_header_1);
+ tunnel_header_m[1] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_m, tunnel_header_1);
+ tunnel_header_v[2] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_v, tunnel_header_2);
+ tunnel_header_m[2] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_m, tunnel_header_2);
+ tunnel_header_v[3] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_v, tunnel_header_3);
+ tunnel_header_m[3] = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
+ misc5_m, tunnel_header_3);
+
+ protocol_v = gre_v->protocol;
+ protocol_m = gre_m->protocol;
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ uint16_t ether_type = mlx5_translate_tunnel_etypes(pattern_flags);
+ if (ether_type) {
+ protocol_v = rte_be_to_cpu_16(ether_type);
+ protocol_m = 0xFFFF;
+ }
+ }
+ c_rsvd0_ver_v = gre_v->c_rsvd0_ver;
+ c_rsvd0_ver_m = gre_m->c_rsvd0_ver;
+
+
+ if (option_m->sequence) {
+ c_rsvd0_ver_v |= RTE_BE16(0x1000);
+ c_rsvd0_ver_m |= RTE_BE16(0x1000);
+ }
+
+ if (option_m->key) {
+ c_rsvd0_ver_v |= RTE_BE16(0x2000);
+ c_rsvd0_ver_m |= RTE_BE16(0x2000);
+ }
+
+ if (option_m->checksum) {
+ c_rsvd0_ver_v |= RTE_BE16(0x8000);
+ c_rsvd0_ver_m |= RTE_BE16(0x8000);
+ }
+
+ *tunnel_header_v[0] = c_rsvd0_ver_v | protocol_v << 16;
+ *tunnel_header_m[0] = c_rsvd0_ver_m | protocol_m << 16;
+ *tunnel_header_v[1] = option_v->checksum;
+ *tunnel_header_m[1] = option_m->checksum;
+ *tunnel_header_v[2] = option_v->key;
+ *tunnel_header_m[2] = option_m->key;
+ *tunnel_header_v[3] = option_v->sequence;
+ *tunnel_header_m[3] = option_m->sequence;
+}
+
+/**
* Add NVGRE item to matcher and to the value.
*
* @param[in, out] matcher
@@ -12708,6 +12831,7 @@ struct mlx5_list_entry *
};
const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
const struct rte_flow_item *tunnel_item = NULL;
+ const struct rte_flow_item *gre_item = NULL;
if (!wks)
return rte_flow_error_set(error, ENOMEM,
@@ -13480,12 +13604,18 @@ struct mlx5_list_entry *
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
tunnel_item = items;
+ gre_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
match_value, items);
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
+ case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
+ break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
@@ -13645,6 +13775,9 @@ struct mlx5_list_entry *
else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
flow_dv_translate_item_nvgre(match_mask, match_value,
tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
+ flow_dv_translate_item_gre_option(match_mask, match_value,
+ tunnel_item, gre_item, item_flags);
else
MLX5_ASSERT(false);
}