@@ -512,6 +512,17 @@ static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
const struct rte_flow_action *actions,
struct rte_flow_error *error);
+static struct i40e_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+struct i40e_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+ struct i40e_vsi *vsi;
+};
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
@@ -609,6 +620,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
static const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = (void *)i40e_flow_create,
};
/* store statistics names and its offset in stats structure */
@@ -11572,3 +11584,84 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
return ret;
}
+
+static struct i40e_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_mac_filter macvlan_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+ struct i40e_flow *flow = NULL;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct i40e_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return flow;
+ }
+
+ memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = cons_parse_ethertype_filter(attr, pattern, actions,
+ ðertype_filter, error);
+ if (!ret) {
+ ret = i40e_ethertype_filter_set(pf, ðertype_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ return flow;
+ }
+
+ memset(&fdir_filter, 0, sizeof(struct rte_eth_fdir_filter));
+ ret = i40e_parse_fdir_filter(attr, pattern, actions,
+ &fdir_filter, error);
+ if (!ret) {
+ ret = i40e_add_del_fdir_filter(dev, &fdir_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ return flow;
+ }
+
+ memset(&macvlan_filter, 0, sizeof(struct rte_eth_mac_filter));
+ ret = i40e_parse_macvlan_filter(attr, pattern, actions,
+ &macvlan_filter, error);
+ if (!ret) {
+ struct i40e_vsi *vsi;
+
+ ret = i40e_vf_mac_filter_set(pf, &macvlan_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_MACVLAN;
+ vsi = pf->vfs[macvlan_filter.dst_id].vsi;
+ flow->vsi = vsi;
+ flow->rule = TAILQ_LAST(&vsi->mac_list, i40e_mac_filter_list);
+ return flow;
+ }
+
+ memset(&tunnel_filter, 0, sizeof(struct rte_eth_tunnel_filter_conf));
+ ret = i40e_parse_tunnel_filter(attr, pattern, actions,
+ &tunnel_filter, error);
+ if (!ret) {
+ ret = i40e_dev_tunnel_filter_set(pf, &tunnel_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_TUNNEL;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ return flow;
+ }
+
+free:
+ rte_free(flow);
+ return NULL;
+}
@@ -684,6 +684,9 @@ int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
void i40e_fdir_filter_restore(struct i40e_pf *pf);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
@@ -1093,7 +1093,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)