[v5] net/ice: enable switch filter

Message ID 1561105343-37821-1-git-send-email-wei.zhao1@intel.com (mailing list archive)
State Superseded, archived
Headers
Series [v5] net/ice: enable switch filter |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Zhao1, Wei June 21, 2019, 8:22 a.m. UTC
  From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 521 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 573 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h
  

Comments

Stillwell Jr, Paul M June 21, 2019, 2:49 p.m. UTC | #1
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Wei Zhao
> Sent: Friday, June 21, 2019 1:22 AM
> To: dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [dpdk-dev] [PATCH v5] net/ice: enable switch filter
> 
> From: wei zhao <wei.zhao1@intel.com>
> 
> The patch enables the backend of rte_flow. It transfers rte_flow_xxx to
> device specific data structure and configures packet process engine's binary
> classifier
> (switch) properly.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ice/Makefile            |   1 +
>  drivers/net/ice/ice_ethdev.c        |  18 ++
>  drivers/net/ice/ice_ethdev.h        |   7 +
>  drivers/net/ice/ice_switch_filter.c | 521
> ++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h |  24 ++
>  drivers/net/ice/meson.build         |   3 +-
>  6 files changed, 573 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> 0e5c55e..b10d826 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
>  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> 
> +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
>  ifeq ($(findstring
> RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX
> 2)
>  	CC_AVX2_SUPPORT=1
>  else
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
> index d1e15ab..0382521 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -1357,6 +1357,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
>  	return err;
>  }
> 
> +static void
> +ice_base_queue_get(struct ice_pf *pf)
> +{
> +	uint32_t reg;
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +
> +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> +		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> +	} else {
> +		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
> +					" index");
> +	}
> +}
> +
>  static int
>  ice_dev_init(struct rte_eth_dev *dev)
>  {
> @@ -1453,6 +1468,9 @@ ice_dev_init(struct rte_eth_dev *dev)
>  	/* enable uio intr after callback register */
>  	rte_intr_enable(intr_handle);
> 
> +	/* get base queue pairs index  in the device */
> +	ice_base_queue_get(pf);
> +
>  	return 0;
> 
>  err_pf_setup:
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 1385afa..50b966c 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -234,6 +234,12 @@ struct ice_vsi {
>  	bool offset_loaded;
>  };
> 
> +/* Struct to store flow created. */
> +struct rte_flow {
> +	TAILQ_ENTRY(rte_flow) node;
> +	void *rule;
> +};
> +
>  struct ice_pf {
>  	struct ice_adapter *adapter; /* The adapter this PF associate to */
>  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -
> 252,6 +258,7 @@ struct ice_pf {
>  	uint16_t hash_lut_size; /* The size of hash lookup table */
>  	uint16_t lan_nb_qp_max;
>  	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
> +	uint16_t base_queue; /* The base queue pairs index  in the device
> */
>  	struct ice_hw_port_stats stats_offset;
>  	struct ice_hw_port_stats stats;
>  	/* internal packet statistics, it should be excluded from the total */
> diff --git a/drivers/net/ice/ice_switch_filter.c
> b/drivers/net/ice/ice_switch_filter.c
> new file mode 100644
> index 0000000..41752fa
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -0,0 +1,521 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_debug.h>
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_eth_ctrl.h>
> +#include <rte_tailq.h>
> +#include <rte_flow_driver.h>
> +
> +#include "ice_logs.h"
> +#include "base/ice_type.h"
> +#include "ice_switch_filter.h"
> +
> +static int
> +ice_parse_switch_filter(const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow_error *error,
> +			struct ice_adv_rule_info *rule_info,
> +			struct ice_adv_lkup_elem **lkup_list,
> +			uint16_t *lkups_num)
> +{
> +	const struct rte_flow_item *item = pattern;
> +	enum rte_flow_item_type item_type;
> +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
> +	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
> +	struct ice_adv_lkup_elem *list;
> +	uint16_t j, t = 0;
> +	uint16_t item_num = 0;
> +	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> +	uint16_t tunnel_valid = 0;
> +
> +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
> +			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> +			item_num++;
> +		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
> +			tun_type = ICE_SW_TUN_VXLAN;
> +		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> +			tun_type = ICE_SW_TUN_NVGRE;

should the line

		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)

be else if since if it matched VXLAN it shouldn't match NVGRE?

> +	}
> +
> +	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
> +	if (!list) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> +				   "No memory for PMD internal items");
> +		goto out;
> +	}
> +	*lkup_list = list;
> +
> +	for (item = pattern; item->type !=
> +			RTE_FLOW_ITEM_TYPE_END; item++) {
> +		item_type = item->type;
> +
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = item->spec;
> +			eth_mask = item->mask;
> +			if (eth_spec && eth_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_MAC_OFOS : ICE_MAC_IL;
> +				struct ice_ether_hdr *h;
> +				struct ice_ether_hdr *m;
> +				h = &list[t].h_u.eth_hdr;
> +				m = &list[t].m_u.eth_hdr;
> +				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> +					if (eth_mask->src.addr_bytes[j] ==
> +								UINT8_MAX)
> {
> +						h->src_addr[j] =
> +						eth_spec->src.addr_bytes[j];
> +						m->src_addr[j] =
> +						eth_mask-
> >src.addr_bytes[j];
> +					}
> +					if (eth_mask->dst.addr_bytes[j] ==
> +								UINT8_MAX)
> {
> +						h->dst_addr[j] =
> +						eth_spec->dst.addr_bytes[j];
> +						m->dst_addr[j] =
> +						eth_mask-
> >dst.addr_bytes[j];
> +					}
> +				}
> +				if (eth_mask->type == UINT16_MAX) {
> +					h->ethtype_id =
> +					rte_be_to_cpu_16(eth_spec->type);
> +					m->ethtype_id = UINT16_MAX;
> +				}
> +				t++;
> +			} else if (!eth_spec && !eth_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_MAC_OFOS : ICE_MAC_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +			if (ipv4_spec && ipv4_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +				if (ipv4_mask->hdr.src_addr ==
> UINT32_MAX) {
> +					list[t].h_u.ipv4_hdr.src_addr =
> +						ipv4_spec->hdr.src_addr;
> +					list[t].m_u.ipv4_hdr.src_addr =
> +						UINT32_MAX;
> +				}
> +				if (ipv4_mask->hdr.dst_addr ==
> UINT32_MAX) {
> +					list[t].h_u.ipv4_hdr.dst_addr =
> +						ipv4_spec->hdr.dst_addr;
> +					list[t].m_u.ipv4_hdr.dst_addr =
> +						UINT32_MAX;
> +				}
> +				if (ipv4_mask->hdr.time_to_live ==
> UINT8_MAX) {
> +					list[t].h_u.ipv4_hdr.time_to_live =
> +						ipv4_spec->hdr.time_to_live;
> +					list[t].m_u.ipv4_hdr.time_to_live =
> +						UINT8_MAX;
> +				}
> +				if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX) {
> +					list[t].h_u.ipv4_hdr.protocol =
> +						ipv4_spec-
> >hdr.next_proto_id;
> +					list[t].m_u.ipv4_hdr.protocol =
> +						UINT8_MAX;
> +				}
> +				if (ipv4_mask->hdr.type_of_service ==
> +						UINT8_MAX) {
> +					list[t].h_u.ipv4_hdr.tos =
> +						ipv4_spec-
> >hdr.type_of_service;
> +					list[t].m_u.ipv4_hdr.tos =
> UINT8_MAX;
> +				}
> +				t++;
> +			} else if (!ipv4_spec && !ipv4_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +			if (ipv6_spec && ipv6_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV6_OFOS : ICE_IPV6_IL;
> +				struct ice_ipv6_hdr *f;
> +				struct ice_ipv6_hdr *s;
> +				f = &list[t].h_u.ice_ipv6_ofos_hdr;
> +				s = &list[t].m_u.ice_ipv6_ofos_hdr;
> +				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
> +					if (ipv6_mask->hdr.src_addr[j] ==
> +								UINT8_MAX)
> {
> +						f->src_addr[j] =
> +						ipv6_spec->hdr.src_addr[j];
> +						s->src_addr[j] =
> +						ipv6_mask->hdr.src_addr[j];
> +					}
> +					if (ipv6_mask->hdr.dst_addr[j] ==
> +								UINT8_MAX)
> {
> +						f->dst_addr[j] =
> +						ipv6_spec->hdr.dst_addr[j];
> +						s->dst_addr[j] =
> +						ipv6_mask->hdr.dst_addr[j];
> +					}
> +				}
> +				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> +					f->next_hdr =
> +						ipv6_spec->hdr.proto;
> +					s->next_hdr = UINT8_MAX;
> +				}
> +				if (ipv6_mask->hdr.hop_limits ==
> UINT8_MAX) {
> +					f->hop_limit =
> +						ipv6_spec->hdr.hop_limits;
> +					s->hop_limit = UINT8_MAX;
> +				}
> +				t++;
> +			} else if (!ipv6_spec && !ipv6_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			udp_spec = item->spec;
> +			udp_mask = item->mask;
> +			if (udp_spec && udp_mask) {
> +				if (tun_type == ICE_SW_TUN_VXLAN &&
> +						tunnel_valid == 0)
> +					list[t].type = ICE_UDP_OF;
> +				else
> +					list[t].type = ICE_UDP_ILOS;
> +				if (udp_mask->hdr.src_port ==
> UINT16_MAX) {
> +					list[t].h_u.l4_hdr.src_port =
> +						udp_spec->hdr.src_port;
> +					list[t].m_u.l4_hdr.src_port =
> +						udp_mask->hdr.src_port;
> +				}
> +				if (udp_mask->hdr.dst_port ==
> UINT16_MAX) {
> +					list[t].h_u.l4_hdr.dst_port =
> +						udp_spec->hdr.dst_port;
> +					list[t].m_u.l4_hdr.dst_port =
> +						udp_mask->hdr.dst_port;
> +				}
> +				t++;
> +			} else if (!udp_spec && !udp_mask) {
> +				list[t].type = ICE_UDP_ILOS;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			tcp_spec = item->spec;
> +			tcp_mask = item->mask;
> +			if (tcp_spec && tcp_mask) {
> +				list[t].type = ICE_TCP_IL;
> +				if (tcp_mask->hdr.src_port == UINT16_MAX)
> {
> +					list[t].h_u.l4_hdr.src_port =
> +						tcp_spec->hdr.src_port;
> +					list[t].m_u.l4_hdr.src_port =
> +						tcp_mask->hdr.src_port;
> +				}
> +				if (tcp_mask->hdr.dst_port == UINT16_MAX)
> {
> +					list[t].h_u.l4_hdr.dst_port =
> +						tcp_spec->hdr.dst_port;
> +					list[t].m_u.l4_hdr.dst_port =
> +						tcp_mask->hdr.dst_port;
> +				}
> +				t++;
> +			} else if (!tcp_spec && !tcp_mask) {
> +				list[t].type = ICE_TCP_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_SCTP:
> +			sctp_spec = item->spec;
> +			sctp_mask = item->mask;
> +			if (sctp_spec && sctp_mask) {
> +				list[t].type = ICE_SCTP_IL;
> +				if (sctp_mask->hdr.src_port ==
> UINT16_MAX) {
> +					list[t].h_u.sctp_hdr.src_port =
> +						sctp_spec->hdr.src_port;
> +					list[t].m_u.sctp_hdr.src_port =
> +						sctp_mask->hdr.src_port;
> +				}
> +				if (sctp_mask->hdr.dst_port ==
> UINT16_MAX) {
> +					list[t].h_u.sctp_hdr.dst_port =
> +						sctp_spec->hdr.dst_port;
> +					list[t].m_u.sctp_hdr.dst_port =
> +						sctp_mask->hdr.dst_port;
> +				}
> +				t++;
> +			} else if (!sctp_spec && !sctp_mask) {
> +				list[t].type = ICE_SCTP_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> +			vxlan_spec = item->spec;
> +			vxlan_mask = item->mask;
> +			tunnel_valid = 1;
> +			if (vxlan_spec && vxlan_mask) {
> +				list[t].type = ICE_VXLAN;
> +				if (vxlan_mask->vni[0] == UINT8_MAX &&
> +					vxlan_mask->vni[1] == UINT8_MAX
> &&
> +					vxlan_mask->vni[2] == UINT8_MAX)
> {
> +					list[t].h_u.tnl_hdr.vni =
> +						(vxlan_spec->vni[2] << 16) |
> +						(vxlan_spec->vni[1] << 8) |
> +						vxlan_spec->vni[0];
> +					list[t].m_u.tnl_hdr.vni =
> +						UINT32_MAX;
> +				}
> +				t++;
> +			} else if (!vxlan_spec && !vxlan_mask) {
> +				list[t].type = ICE_VXLAN;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_NVGRE:
> +			nvgre_spec = item->spec;
> +			nvgre_mask = item->mask;
> +			tunnel_valid = 1;
> +			if (nvgre_spec && nvgre_mask) {
> +				list[t].type = ICE_NVGRE;
> +				if (nvgre_mask->tni[0] == UINT8_MAX &&
> +					nvgre_mask->tni[1] == UINT8_MAX
> &&
> +					nvgre_mask->tni[2] == UINT8_MAX)
> {
> +					list[t].h_u.nvgre_hdr.tni_flow =
> +						(nvgre_spec->tni[2] << 16) |
> +						(nvgre_spec->tni[1] << 8) |
> +						nvgre_spec->tni[0];
> +					list[t].m_u.nvgre_hdr.tni_flow =
> +						UINT32_MAX;
> +				}
> +				t++;
> +			} else if (!nvgre_spec && !nvgre_mask) {
> +				list[t].type = ICE_NVGRE;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VOID:
> +		case RTE_FLOW_ITEM_TYPE_END:
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> +				   "Invalid pattern item.");
> +			goto out;
> +		}
> +	}
> +
> +	rule_info->tun_type = tun_type;
> +	*lkups_num = t;
> +
> +	return 0;
> +out:
> +	return -rte_errno;
> +}
> +
> +/* By now ice switch filter action code implement only

Minor suggestion: By now => For now

> +  * supports QUEUE or DROP.
> +  */
> +static int
> +ice_parse_switch_action(struct ice_pf *pf,
> +				 const struct rte_flow_action *actions,
> +				 struct rte_flow_error *error,
> +				 struct ice_adv_rule_info *rule_info) {
> +	struct ice_vsi *vsi = pf->main_vsi;
> +	const struct rte_flow_action_queue *act_q;
> +	uint16_t base_queue;
> +	const struct rte_flow_action *action;
> +	enum rte_flow_action_type action_type;
> +
> +	base_queue = pf->base_queue;
> +	for (action = actions; action->type !=
> +			RTE_FLOW_ACTION_TYPE_END; action++) {
> +		action_type = action->type;
> +		switch (action_type) {
> +		case RTE_FLOW_ACTION_TYPE_QUEUE:
> +			act_q = action->conf;
> +			rule_info->sw_act.fltr_act =
> +				ICE_FWD_TO_Q;
> +			rule_info->sw_act.fwd_id.q_id =
> +				base_queue + act_q->index;
> +			if (act_q->index >=
> +				pf->dev_data->nb_rx_queues) {
> +				rte_flow_error_set(error,
> +					EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ACTION,
> +					actions, "Invalid queue ID"
> +					" for switch filter.");
> +				return -rte_errno;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_DROP:
> +			rule_info->sw_act.fltr_act =
> +				ICE_DROP_PACKET;
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_VOID:
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error,
> +				EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ITEM,
> +				actions,
> +				"Invalid action type");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	rule_info->sw_act.vsi_handle = vsi->idx;
> +	rule_info->rx = 1;
> +	rule_info->sw_act.src = vsi->idx;
> +
> +	return 0;
> +}
> +
> +static int
> +ice_switch_rule_set(struct ice_pf *pf,
> +			struct ice_adv_lkup_elem *list,
> +			uint16_t lkups_cnt,
> +			struct ice_adv_rule_info *rule_info,
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data rule_added = {0};
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> +			"item number too large for rule");
> +		return -rte_errno;
> +	}
> +	if (!list) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
> +			"lookup list should not be NULL");
> +		return -rte_errno;
> +	}
> +
> +	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> +
> +	if (!ret) {
> +		filter_ptr = rte_zmalloc("ice_switch_filter",
> +			sizeof(struct ice_rule_query_data), 0);
> +		if (!filter_ptr) {
> +			PMD_DRV_LOG(ERR, "failed to allocate memory");
> +			return -EINVAL;
> +		}
> +		flow->rule = filter_ptr;
> +		rte_memcpy(filter_ptr,
> +			&rule_added,
> +			sizeof(struct ice_rule_query_data));
> +	}
> +
> +	return ret;
> +}
> +
> +int
> +ice_create_switch_filter(struct ice_pf *pf,
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	struct ice_adv_rule_info rule_info = {0};
> +	struct ice_adv_lkup_elem *list = NULL;
> +	uint16_t lkups_num = 0;
> +
> +	ret = ice_parse_switch_filter(pattern, actions, error,
> +			&rule_info, &list, &lkups_num);
> +	if (ret)
> +		goto error;
> +
> +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> +	if (ret)
> +		goto error;
> +
> +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow,
> error);
> +	if (ret)
> +		goto error;
> +
> +	rte_free(list);
> +	return 0;
> +
> +error:
> +	rte_free(list);
> +
> +	return -rte_errno;
> +}
> +
> +int
> +ice_destroy_switch_filter(struct ice_pf *pf,
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	filter_ptr = (struct ice_rule_query_data *)
> +			flow->rule;
> +
> +	if (!filter_ptr) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +			"no such flow"
> +			" create by switch filter");
> +		return -rte_errno;
> +	}
> +
> +	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
> +	if (ret) {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +			"fail to destroy switch filter rule");
> +		return -rte_errno;
> +	}
> +
> +	rte_free(filter_ptr);
> +	return ret;
> +}
> +
> +void
> +ice_free_switch_filter_rule(void *rule) {
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	filter_ptr = (struct ice_rule_query_data *)rule;
> +
> +	rte_free(filter_ptr);
> +}
> diff --git a/drivers/net/ice/ice_switch_filter.h
> b/drivers/net/ice/ice_switch_filter.h
> new file mode 100644
> index 0000000..cea4799
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.h
> @@ -0,0 +1,24 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#ifndef _ICE_SWITCH_FILTER_H_
> +#define _ICE_SWITCH_FILTER_H_
> +
> +#include "base/ice_switch.h"
> +#include "base/ice_type.h"
> +#include "ice_ethdev.h"
> +
> +int
> +ice_create_switch_filter(struct ice_pf *pf,
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error);
> +int
> +ice_destroy_switch_filter(struct ice_pf *pf,
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error);
> +void
> +ice_free_switch_filter_rule(void *rule); #endif /*
> +_ICE_SWITCH_FILTER_H_ */
> diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index
> 2bec688..8697676 100644
> --- a/drivers/net/ice/meson.build
> +++ b/drivers/net/ice/meson.build
> @@ -6,7 +6,8 @@ objs = [base_objs]
> 
>  sources = files(
>  	'ice_ethdev.c',
> -	'ice_rxtx.c'
> +	'ice_rxtx.c',
> +	'ice_switch_filter.c'
>  	)
> 
>  deps += ['hash']
> --
> 2.7.5
  

Patch

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@  ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index d1e15ab..0382521 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1357,6 +1357,21 @@  static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1453,6 +1468,9 @@  ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@  struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@  struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..41752fa
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,521 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list;
+	uint16_t j, t = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+	uint16_t tunnel_valid = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "No memory for PMD internal items");
+		goto out;
+	}
+	*lkup_list = list;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				struct ice_ether_hdr *h;
+				struct ice_ether_hdr *m;
+				h = &list[t].h_u.eth_hdr;
+				m = &list[t].m_u.eth_hdr;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						m->src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						m->dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					h->ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					m->ethtype_id = UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				struct ice_ipv6_hdr *f;
+				struct ice_ipv6_hdr *s;
+				f = &list[t].h_u.ice_ipv6_ofos_hdr;
+				s = &list[t].m_u.ice_ipv6_ofos_hdr;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						f->src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						s->src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						f->dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						s->dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					f->next_hdr =
+						ipv6_spec->hdr.proto;
+					s->next_hdr = UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					f->hop_limit =
+						ipv6_spec->hdr.hop_limits;
+					s->hop_limit = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+  * supports QUEUE or DROP.
+  */
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			if (act_q->index >=
+				pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error,
+					EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue ID"
+					" for switch filter.");
+				return -rte_errno;
+			}
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@  objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']