@@ -1590,6 +1590,8 @@ struct mlx5_priv {
void *root_drop_action; /* Pointer to root drop action. */
rte_spinlock_t hw_ctrl_lock;
LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;
+ struct mlx5dr_action *hw_push_vlan[MLX5DR_TABLE_TYPE_MAX];
+ struct mlx5dr_action *hw_pop_vlan[MLX5DR_TABLE_TYPE_MAX];
struct mlx5dr_action **hw_vport;
struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
struct rte_flow_template_table *hw_esw_sq_miss_tbl;
@@ -2434,4 +2434,8 @@ int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
struct rte_flow_error *error);
int flow_hw_table_update(struct rte_eth_dev *dev,
struct rte_flow_error *error);
+int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
+ enum rte_flow_field_id field, int inherit,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
@@ -1326,7 +1326,7 @@ flow_dv_convert_action_modify_ipv6_dscp
MLX5_MODIFICATION_TYPE_SET, error);
}
-static int
+int
mlx5_flow_item_field_width(struct rte_eth_dev *dev,
enum rte_flow_field_id field, int inherit,
const struct rte_flow_attr *attr,
@@ -48,12 +48,22 @@
/* Lowest priority for HW non-root table. */
#define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
+#define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
+#define MLX5_HW_VLAN_PUSH_VID_IDX 1
+#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
+
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
uint32_t group,
uint32_t *table_group,
struct rte_flow_error *error);
+static __rte_always_inline int
+flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
+ struct mlx5_hw_q_job *job,
+ struct mlx5_action_construct_data *act_data,
+ const struct mlx5_hw_actions *hw_acts,
+ const struct rte_flow_action *action);
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
@@ -1039,6 +1049,52 @@ flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t start_pos,
return 0;
}
+static __rte_always_inline bool
+is_of_vlan_pcp_present(const struct rte_flow_action *actions)
+{
+ /*
+ * Order of RTE VLAN push actions is
+ * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
+ */
+ return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
+}
+
+static __rte_always_inline bool
+is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
+{
+ /*
+ * In masked push VLAN template all RTE push actions are masked.
+ */
+ return mask && mask->ethertype != 0;
+}
+
+static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
+{
+/*
+ * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
+ */
+ rte_be32_t type, vid, pcp;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ rte_be32_t vid_lo, vid_hi;
+#endif
+
+ type = ((const struct rte_flow_action_of_push_vlan *)
+ actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
+ vid = ((const struct rte_flow_action_of_set_vlan_vid *)
+ actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
+ pcp = is_of_vlan_pcp_present(actions) ?
+ ((const struct rte_flow_action_of_set_vlan_pcp *)
+ actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ vid_hi = vid & 0xff;
+ vid_lo = vid >> 8;
+ return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
+#else
+ return (type << 16) | (pcp << 13) | vid;
+#endif
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1141,6 +1197,26 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
priv->hw_tag[!!attr->group];
flow_hw_rxq_flag_set(dev, true);
break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ action_pos = at->actions_off[actions - at->actions];
+ acts->rule_acts[action_pos].action =
+ priv->hw_push_vlan[type];
+ if (is_template_masked_push_vlan(masks->conf))
+ acts->rule_acts[action_pos].push_vlan.vlan_hdr =
+ vlan_hdr_to_be32(actions);
+ else if (__flow_hw_act_data_general_append
+ (priv, acts, actions->type,
+ actions - action_start, action_pos))
+ goto err;
+ actions += is_of_vlan_pcp_present(actions) ?
+ MLX5_HW_VLAN_PUSH_PCP_IDX :
+ MLX5_HW_VLAN_PUSH_VID_IDX;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ action_pos = at->actions_off[actions - at->actions];
+ acts->rule_acts[action_pos].action =
+ priv->hw_pop_vlan[type];
+ break;
case RTE_FLOW_ACTION_TYPE_JUMP:
action_pos = at->actions_off[actions - action_start];
if (masks->conf &&
@@ -1746,8 +1822,17 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
cnt_id_t cnt_id;
action = &actions[act_data->action_src];
- MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
- (int)action->type == act_data->type);
+ /*
+ * action template construction replaces
+ * OF_SET_VLAN_VID with MODIFY_FIELD
+ */
+ if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
+ MLX5_ASSERT(act_data->type ==
+ RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
+ else
+ MLX5_ASSERT(action->type ==
+ RTE_FLOW_ACTION_TYPE_INDIRECT ||
+ (int)action->type == act_data->type);
switch (act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
@@ -1763,6 +1848,10 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(action->conf))->id);
rule_acts[act_data->action_dst].tag.value = tag;
break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
+ vlan_hdr_to_be32(action);
+ break;
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_group = ((const struct rte_flow_action_jump *)
action->conf)->group;
@@ -1814,10 +1903,16 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
act_data->encap.len);
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- ret = flow_hw_modify_field_construct(job,
- act_data,
- hw_acts,
- action);
+ if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
+ ret = flow_hw_set_vlan_vid_construct(dev, job,
+ act_data,
+ hw_acts,
+ action);
+ else
+ ret = flow_hw_modify_field_construct(job,
+ act_data,
+ hw_acts,
+ action);
if (ret)
return -1;
break;
@@ -2841,6 +2936,56 @@ flow_hw_action_meta_copy_insert(const struct rte_flow_action actions[],
return 0;
}
+static int
+flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct rte_flow_error *error)
+{
+#define X_FIELD(ptr, t, f) ((t *)((ptr)->conf))->f
+ /*
+ * 1. Mandatory actions order:
+ * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
+ * 2. All actions ether masked or not.
+ */
+ const bool masked_action = action[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf &&
+ X_FIELD(action + MLX5_HW_VLAN_PUSH_TYPE_IDX,
+ const struct rte_flow_action_of_push_vlan,
+ ethertype) != 0;
+ bool masked_param;
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(attr);
+ RTE_SET_USED(mask);
+ if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "OF_PUSH_VLAN: invalid actions order");
+ masked_param = action[MLX5_HW_VLAN_PUSH_VID_IDX].conf &&
+ X_FIELD(action + MLX5_HW_VLAN_PUSH_VID_IDX,
+ const struct rte_flow_action_of_set_vlan_vid, vlan_vid);
+ if (!(masked_action & masked_param))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "OF_SET_VLAN_VID: template mask does not match OF_PUSH_VLAN");
+ if (is_of_vlan_pcp_present(action)) {
+ masked_param = action[MLX5_HW_VLAN_PUSH_PCP_IDX].conf &&
+ X_FIELD(action + MLX5_HW_VLAN_PUSH_PCP_IDX,
+ const struct rte_flow_action_of_set_vlan_pcp,
+ vlan_pcp);
+ if (!(masked_action & masked_param))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "OF_SET_VLAN_PCP: template mask does not match OF_PUSH_VLAN");
+ }
+
+ return 0;
+#undef X_FIELD
+}
+
static int
flow_hw_actions_validate(struct rte_eth_dev *dev,
const struct rte_flow_actions_template_attr *attr,
@@ -2931,6 +3076,18 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
/* TODO: Validation logic */
break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ ret = flow_hw_validate_action_push_vlan
+ (dev, attr, action, mask, error);
+ if (ret != 0)
+ return ret;
+ i += is_of_vlan_pcp_present(action) ?
+ MLX5_HW_VLAN_PUSH_PCP_IDX :
+ MLX5_HW_VLAN_PUSH_VID_IDX;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -2958,6 +3115,8 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
[RTE_FLOW_ACTION_TYPE_COUNT] = MLX5DR_ACTION_TYP_CTR,
[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
+ [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
+ [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
};
static int
@@ -3074,6 +3233,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
goto err_actions_num;
action_types[curr_off++] = MLX5DR_ACTION_TYP_FT;
break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ type = mlx5_hw_dr_action_types[at->actions[i].type];
+ at->actions_off[i] = curr_off;
+ action_types[curr_off++] = type;
+ i += is_of_vlan_pcp_present(at->actions + i) ?
+ MLX5_HW_VLAN_PUSH_PCP_IDX :
+ MLX5_HW_VLAN_PUSH_VID_IDX;
+ break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
at->actions_off[i] = curr_off;
@@ -3101,6 +3268,95 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
return NULL;
}
+static void
+flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ const struct rte_flow_action *masks,
+ struct rte_flow_action *ra, struct rte_flow_action *rm,
+ struct rte_flow_action_modify_field *spec,
+ struct rte_flow_action_modify_field *mask,
+ uint32_t act_num, int set_vlan_vid_ix)
+{
+ struct rte_flow_error error;
+ const bool masked = masks[set_vlan_vid_ix].conf &&
+ (((const struct rte_flow_action_of_set_vlan_vid *)
+ masks[set_vlan_vid_ix].conf)->vlan_vid != 0);
+ const struct rte_flow_action_of_set_vlan_vid *conf =
+ actions[set_vlan_vid_ix].conf;
+ rte_be16_t vid = masked ? conf->vlan_vid : 0;
+ int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
+ NULL, &error);
+ if (actions == ra) {
+ size_t copy_sz = sizeof(ra[0]) * act_num;
+ rte_memcpy(ra, actions, copy_sz);
+ rte_memcpy(rm, masks, copy_sz);
+ }
+ *spec = (typeof(*spec)) {
+ .operation = RTE_FLOW_MODIFY_SET,
+ .dst = {
+ .field = RTE_FLOW_FIELD_VLAN_ID,
+ .level = 0, .offset = 0,
+ },
+ .src = {
+ .field = RTE_FLOW_FIELD_VALUE,
+ .level = vid,
+ .offset = 0,
+ },
+ .width = width,
+ };
+ *mask = (typeof(*mask)) {
+ .operation = RTE_FLOW_MODIFY_SET,
+ .dst = {
+ .field = RTE_FLOW_FIELD_VLAN_ID,
+ .level = 0xffffffff, .offset = 0xffffffff,
+ },
+ .src = {
+ .field = RTE_FLOW_FIELD_VALUE,
+ .level = masked ? (1U << width) - 1 : 0,
+ .offset = 0,
+ },
+ .width = 0xffffffff,
+ };
+ ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
+ ra[set_vlan_vid_ix].conf = spec;
+ rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
+ rm[set_vlan_vid_ix].conf = mask;
+}
+
+static __rte_always_inline int
+flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
+ struct mlx5_hw_q_job *job,
+ struct mlx5_action_construct_data *act_data,
+ const struct mlx5_hw_actions *hw_acts,
+ const struct rte_flow_action *action)
+{
+ struct rte_flow_error error;
+ rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
+ action->conf)->vlan_vid;
+ int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
+ NULL, &error);
+ struct rte_flow_action_modify_field conf = {
+ .operation = RTE_FLOW_MODIFY_SET,
+ .dst = {
+ .field = RTE_FLOW_FIELD_VLAN_ID,
+ .level = 0, .offset = 0,
+ },
+ .src = {
+ .field = RTE_FLOW_FIELD_VALUE,
+ .level = vid,
+ .offset = 0,
+ },
+ .width = width,
+ };
+ struct rte_flow_action modify_action = {
+ .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
+ .conf = &conf
+ };
+
+ return flow_hw_modify_field_construct(job, act_data, hw_acts,
+ &modify_action);
+}
+
/**
* Create flow action template.
*
@@ -3132,8 +3388,11 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
uint16_t pos = MLX5_HW_MAX_ACTS;
struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
- const struct rte_flow_action *ra;
- const struct rte_flow_action *rm;
+ struct rte_flow_action *ra = (void *)(uintptr_t)actions;
+ struct rte_flow_action *rm = (void *)(uintptr_t)masks;
+ int set_vlan_vid_ix = -1;
+ struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
+ struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
const struct rte_flow_action_modify_field rx_mreg = {
.operation = RTE_FLOW_MODIFY_SET,
.dst = {
@@ -3173,22 +3432,42 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
return NULL;
if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
priv->sh->config.dv_esw_en) {
+ /* Application should make sure only one Q/RSS exist in one rule. */
if (flow_hw_action_meta_copy_insert(actions, masks, &rx_cpy, &rx_cpy_mask,
tmp_action, tmp_mask, &pos)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Failed to concatenate new action/mask");
return NULL;
+ } else if (pos != MLX5_HW_MAX_ACTS) {
+ ra = tmp_action;
+ rm = tmp_mask;
}
}
- /* Application should make sure only one Q/RSS exist in one rule. */
- if (pos == MLX5_HW_MAX_ACTS) {
- ra = actions;
- rm = masks;
- } else {
- ra = tmp_action;
- rm = tmp_mask;
+ for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
+ switch (ra[i].type) {
+ /* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ i += is_of_vlan_pcp_present(ra + i) ?
+ MLX5_HW_VLAN_PUSH_PCP_IDX :
+ MLX5_HW_VLAN_PUSH_VID_IDX;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ set_vlan_vid_ix = i;
+ break;
+ default:
+ break;
+ }
}
+ /* Count flow actions to allocate required space for storing DR offsets. */
+ act_num = i;
+ if (act_num >= MLX5_HW_MAX_ACTS)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
+ if (set_vlan_vid_ix != -1)
+ flow_hw_set_vlan_vid(dev, actions, masks, ra, rm,
+ &set_vlan_vid_spec, &set_vlan_vid_mask,
+ act_num, set_vlan_vid_ix);
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
if (act_len <= 0)
return NULL;
@@ -3197,10 +3476,6 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
return NULL;
len += RTE_ALIGN(mask_len, 16);
- /* Count flow actions to allocate required space for storing DR offsets. */
- act_num = 0;
- for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i)
- act_num++;
len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
@@ -4719,6 +4994,48 @@ flow_hw_ct_pool_create(struct rte_eth_dev *dev,
return NULL;
}
+static void
+flow_hw_destroy_vlan(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5dr_table_type i;
+
+ for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+ if (priv->hw_pop_vlan[i]) {
+ mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
+ priv->hw_pop_vlan[i] = NULL;
+ }
+ if (priv->hw_push_vlan[i]) {
+ mlx5dr_action_destroy(priv->hw_push_vlan[i]);
+ priv->hw_push_vlan[i] = NULL;
+ }
+ }
+}
+
+static int
+flow_hw_create_vlan(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5dr_table_type i;
+ const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
+ MLX5DR_ACTION_FLAG_HWS_RX,
+ MLX5DR_ACTION_FLAG_HWS_TX,
+ MLX5DR_ACTION_FLAG_HWS_FDB
+ };
+
+ for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+ priv->hw_pop_vlan[i] =
+ mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
+ if (!priv->hw_pop_vlan[i])
+ return -ENOENT;
+ priv->hw_push_vlan[i] =
+ mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
+ if (!priv->hw_pop_vlan[i])
+ return -ENOENT;
+ }
+ return 0;
+}
+
/**
* Configure port HWS resources.
*
@@ -4915,6 +5232,9 @@ flow_hw_configure(struct rte_eth_dev *dev,
if (priv->hws_cpool == NULL)
goto err;
}
+ ret = flow_hw_create_vlan(dev);
+ if (ret)
+ goto err;
return 0;
err:
if (priv->hws_ctpool) {
@@ -4928,6 +5248,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
if (priv->hw_tag[i])
mlx5dr_action_destroy(priv->hw_tag[i]);
}
+ flow_hw_destroy_vlan(dev);
if (dr_ctx)
claim_zero(mlx5dr_context_close(dr_ctx));
mlx5_free(priv->hw_q);
@@ -4986,6 +5307,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
if (priv->hw_tag[i])
mlx5dr_action_destroy(priv->hw_tag[i]);
}
+ flow_hw_destroy_vlan(dev);
flow_hw_free_vport_actions(priv);
if (priv->acts_ipool) {
mlx5_ipool_destroy(priv->acts_ipool);