@@ -7108,7 +7108,7 @@ mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
struct rte_flow_item_port_id port_spec = {
.id = MLX5_PORT_ESW_MGR,
};
- struct mlx5_rte_flow_item_tx_queue txq_spec = {
+ struct mlx5_rte_flow_item_sq txq_spec = {
.queue = txq,
};
struct rte_flow_item pattern[] = {
@@ -7118,7 +7118,7 @@ mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
},
{
.type = (enum rte_flow_item_type)
- MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ MLX5_RTE_FLOW_ITEM_TYPE_SQ,
.spec = &txq_spec,
},
{
@@ -7504,16 +7504,16 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
.egress = 1,
.priority = 0,
};
- struct mlx5_rte_flow_item_tx_queue queue_spec = {
+ struct mlx5_rte_flow_item_sq queue_spec = {
.queue = queue,
};
- struct mlx5_rte_flow_item_tx_queue queue_mask = {
+ struct mlx5_rte_flow_item_sq queue_mask = {
.queue = UINT32_MAX,
};
struct rte_flow_item items[] = {
{
.type = (enum rte_flow_item_type)
- MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ MLX5_RTE_FLOW_ITEM_TYPE_SQ,
.spec = &queue_spec,
.last = NULL,
.mask = &queue_mask,
@@ -28,7 +28,7 @@
enum mlx5_rte_flow_item_type {
MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
MLX5_RTE_FLOW_ITEM_TYPE_TAG,
- MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ MLX5_RTE_FLOW_ITEM_TYPE_SQ,
MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
};
@@ -95,7 +95,7 @@ struct mlx5_flow_action_copy_mreg {
};
/* Matches on source queue. */
-struct mlx5_rte_flow_item_tx_queue {
+struct mlx5_rte_flow_item_sq {
uint32_t queue;
};
@@ -159,7 +159,7 @@ enum mlx5_feature_name {
#define MLX5_FLOW_LAYER_GENEVE (1u << 26)
/* Queue items. */
-#define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27)
+#define MLX5_FLOW_ITEM_SQ (1u << 27)
/* Pattern tunnel Layer bits (continued). */
#define MLX5_FLOW_LAYER_GTP (1u << 28)
@@ -196,6 +196,9 @@ enum mlx5_feature_name {
#define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
#define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
+/* Meter color item */
+#define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -1009,6 +1012,18 @@ flow_items_to_tunnel(const struct rte_flow_item items[])
return items[0].spec;
}
+/* HW steering flow attributes. */
+struct mlx5_flow_attr {
+ uint32_t port_id; /* Port index. */
+ uint32_t group; /* Flow group. */
+ uint32_t priority; /* Original Priority. */
+ /* rss level, used by priority adjustment. */
+ uint32_t rss_level;
+ /* Action flags, used by priority adjustment. */
+ uint32_t act_flags;
+ uint32_t tbl_type; /* Flow table type. */
+};
+
/* Flow structure. */
struct rte_flow {
uint32_t dev_handles;
@@ -1769,6 +1784,32 @@ mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error);
+
+/*
+ * Convert rte_mtr_color to mlx5 color.
+ *
+ * @param[in] rcol
+ * rte_mtr_color.
+ *
+ * @return
+ * mlx5 color.
+ */
+static inline int
+rte_col_2_mlx5_col(enum rte_color rcol)
+{
+ switch (rcol) {
+ case RTE_COLOR_GREEN:
+ return MLX5_FLOW_COLOR_GREEN;
+ case RTE_COLOR_YELLOW:
+ return MLX5_FLOW_COLOR_YELLOW;
+ case RTE_COLOR_RED:
+ return MLX5_FLOW_COLOR_RED;
+ default:
+ break;
+ }
+ return MLX5_FLOW_COLOR_UNDEFINED;
+}
+
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
@@ -2128,4 +2169,9 @@ int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
bool *all_ports,
struct rte_flow_error *error);
+int flow_dv_translate_items_hws(const struct rte_flow_item *items,
+ struct mlx5_flow_attr *attr, void *key,
+ uint32_t key_type, uint64_t *item_flags,
+ uint8_t *match_criteria,
+ struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
@@ -212,31 +212,6 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
attr->valid = 1;
}
-/*
- * Convert rte_mtr_color to mlx5 color.
- *
- * @param[in] rcol
- * rte_mtr_color.
- *
- * @return
- * mlx5 color.
- */
-static inline int
-rte_col_2_mlx5_col(enum rte_color rcol)
-{
- switch (rcol) {
- case RTE_COLOR_GREEN:
- return MLX5_FLOW_COLOR_GREEN;
- case RTE_COLOR_YELLOW:
- return MLX5_FLOW_COLOR_YELLOW;
- case RTE_COLOR_RED:
- return MLX5_FLOW_COLOR_RED;
- default:
- break;
- }
- return MLX5_FLOW_COLOR_UNDEFINED;
-}
-
struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */
@@ -7338,8 +7313,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_ITEM_TAG;
break;
- case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
- last_item = MLX5_FLOW_ITEM_TX_QUEUE;
+ case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ last_item = MLX5_FLOW_ITEM_SQ;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
break;
@@ -8225,7 +8200,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
* work due to metadata regC0 mismatch.
*/
if ((!attr->transfer && attr->egress) && priv->representor &&
- !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE))
+ !(item_flags & MLX5_FLOW_ITEM_SQ))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL,
@@ -11244,9 +11219,9 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
const struct rte_flow_item *item,
uint32_t key_type)
{
- const struct mlx5_rte_flow_item_tx_queue *queue_m;
- const struct mlx5_rte_flow_item_tx_queue *queue_v;
- const struct mlx5_rte_flow_item_tx_queue queue_mask = {
+ const struct mlx5_rte_flow_item_sq *queue_m;
+ const struct mlx5_rte_flow_item_sq *queue_v;
+ const struct mlx5_rte_flow_item_sq queue_mask = {
.queue = UINT32_MAX,
};
void *misc_v =
@@ -13231,9 +13206,9 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
flow_dv_translate_mlx5_item_tag(dev, key, items, key_type);
last_item = MLX5_FLOW_ITEM_TAG;
break;
- case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_tx_queue(dev, key, items, key_type);
- last_item = MLX5_FLOW_ITEM_TX_QUEUE;
+ last_item = MLX5_FLOW_ITEM_SQ;
break;
case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(key, items, tunnel, key_type);
@@ -13273,6 +13248,99 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Fill the HW steering flow with DV spec.
+ *
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] key
+ * Pointer to the flow matcher key.
+ * @param[in] key_type
+ * Key type.
+ * @param[in, out] item_flags
+ * Pointer to the flow item flags.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_translate_items_hws(const struct rte_flow_item *items,
+ struct mlx5_flow_attr *attr, void *key,
+ uint32_t key_type, uint64_t *item_flags,
+ uint8_t *match_criteria,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_rss_desc rss_desc = { .level = attr->rss_level };
+ struct rte_flow_attr rattr = {
+ .group = attr->group,
+ .priority = attr->priority,
+ .ingress = !!(attr->tbl_type == MLX5DR_TABLE_TYPE_NIC_RX),
+ .egress = !!(attr->tbl_type == MLX5DR_TABLE_TYPE_NIC_TX),
+ .transfer = !!(attr->tbl_type == MLX5DR_TABLE_TYPE_FDB),
+ };
+ struct mlx5_dv_matcher_workspace wks = {
+ .action_flags = attr->act_flags,
+ .item_flags = item_flags ? *item_flags : 0,
+ .external = 0,
+ .next_protocol = 0xff,
+ .attr = &rattr,
+ .rss_desc = &rss_desc,
+ };
+ int ret;
+
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ if (!mlx5_flow_os_item_supported(items->type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ ret = flow_dv_translate_items(&rte_eth_devices[attr->port_id],
+ items, &wks, key, key_type, NULL);
+ if (ret)
+ return ret;
+ }
+ if (wks.item_flags & MLX5_FLOW_LAYER_VXLAN_GPE) {
+ flow_dv_translate_item_vxlan_gpe(key,
+ wks.tunnel_item,
+ wks.item_flags,
+ key_type);
+ } else if (wks.item_flags & MLX5_FLOW_LAYER_GENEVE) {
+ flow_dv_translate_item_geneve(key,
+ wks.tunnel_item,
+ wks.item_flags,
+ key_type);
+ } else if (wks.item_flags & MLX5_FLOW_LAYER_GRE) {
+ if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE) {
+ flow_dv_translate_item_gre(key,
+ wks.tunnel_item,
+ wks.item_flags,
+ key_type);
+ } else if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION) {
+ flow_dv_translate_item_gre_option(key,
+ wks.tunnel_item,
+ wks.gre_item,
+ wks.item_flags,
+ key_type);
+ } else if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
+ flow_dv_translate_item_nvgre(key,
+ wks.tunnel_item,
+ wks.item_flags,
+ key_type);
+ } else {
+ MLX5_ASSERT(false);
+ }
+ }
+
+ if (match_criteria)
+ *match_criteria = flow_dv_matcher_enable(key);
+ if (item_flags)
+ *item_flags = wks.item_flags;
+ return 0;
+}
+
/**
* Fill the SW steering flow with DV spec.
*