[v20.11.7.5] net/iavf: support flow rule with raw pattern

Message ID 20240312082243.981075-1-mingjinx.ye@intel.com (mailing list archive)
State Not Applicable, archived
Headers
Series [v20.11.7.5] net/iavf: support flow rule with raw pattern |

Checks

Context Check Description
ci/loongarch-compilation warning apply patch failure
ci/checkpatch warning coding style issues
ci/Intel-compilation warning apply issues

Commit Message

Mingjin Ye March 12, 2024, 8:22 a.m. UTC
  Add raw pattern support to VF, including FDIR/RSS flow rule.

This patch is based on DPDK v20.11.7.5, for customer cherry-pick.

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 doc/guides/nics/features/ice_dcf.ini    |    1 +
 drivers/common/iavf/virtchnl.h          |   38 +-
 drivers/net/iavf/iavf.h                 |   22 +-
 drivers/net/iavf/iavf_ethdev.c          |  266 ++++--
 drivers/net/iavf/iavf_fdir.c            |  229 ++++-
 drivers/net/iavf/iavf_generic_flow.c    | 1021 ++++++++++++++++++++---
 drivers/net/iavf/iavf_generic_flow.h    |  137 +++
 drivers/net/iavf/iavf_hash.c            |  498 ++++++++---
 drivers/net/iavf/iavf_rxtx.h            |    4 +-
 drivers/net/iavf/iavf_rxtx_vec_avx2.c   |    8 +-
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |    9 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c    |    7 +-
 drivers/net/iavf/iavf_vchnl.c           |   85 +-
 drivers/net/ice/ice_dcf_ethdev.c        |   69 ++
 drivers/net/ice/ice_dcf_ethdev.h        |    6 +
 15 files changed, 2091 insertions(+), 309 deletions(-)
  

Patch

diff --git a/doc/guides/nics/features/ice_dcf.ini b/doc/guides/nics/features/ice_dcf.ini
index e2b5659..ec2e0b4 100644
--- a/doc/guides/nics/features/ice_dcf.ini
+++ b/doc/guides/nics/features/ice_dcf.ini
@@ -12,6 +12,7 @@  Flow API             = Y
 CRC offload          = Y
 L3 checksum offload  = P
 L4 checksum offload  = P
+MTU update           = Y
 Basic stats          = Y
 Linux UIO            = Y
 Linux VFIO           = Y
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index b931da6..8ee2f41 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -828,6 +828,7 @@  enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
 #define PROTO_HDR_SHIFT			5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
 					(proto_hdr_type << PROTO_HDR_SHIFT)
@@ -890,6 +891,13 @@  enum virtchnl_proto_hdr_type {
 	VIRTCHNL_PROTO_HDR_AH,
 	VIRTCHNL_PROTO_HDR_PFCP,
 	VIRTCHNL_PROTO_HDR_GTPC,
+	/* IPv4 and IPv6 Fragment header types are only associated to
+	 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
+	 * cannot be used independently.
+	 */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+	VIRTCHNL_PROTO_HDR_GRE,
 };
 
 /* Protocol header field within a protocol header. */
@@ -970,6 +978,17 @@  enum virtchnl_proto_hdr_field {
 	/* GTPC */
 	VIRTCHNL_PROTO_HDR_GTPC_TEID =
 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
+	/* GTPU_DWN/UP */
+	VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
+	VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+	/* IPv4 Dummy Fragment */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
+	/* IPv6 Extension Fragment */
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
 };
 
 struct virtchnl_proto_hdr {
@@ -989,13 +1008,26 @@  struct virtchnl_proto_hdrs {
 	u8 tunnel_level;
 	/**
 	 * specify where protocol header start from.
+	 * must be 0 when sending a raw packet request.
 	 * 0 - from the outer layer
 	 * 1 - from the first inner layer
 	 * 2 - from the second inner layer
 	 * ....
-	 **/
-	int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
-	struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+	 */
+	int count;
+	/**
+	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
+	 * must be 0 for a raw packet request.
+	 */
+	union {
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct {
+			u16 pkt_len;
+			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+			u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+		} raw;
+	};
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 1c0ebc7..72a0ffc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -46,11 +46,18 @@ 
 	VIRTCHNL_VF_OFFLOAD_RX_POLLING)
 
 #define IAVF_RSS_OFFLOAD_ALL ( \
+	ETH_RSS_IPV4 | \
 	ETH_RSS_FRAG_IPV4 |         \
 	ETH_RSS_NONFRAG_IPV4_TCP |  \
 	ETH_RSS_NONFRAG_IPV4_UDP |  \
 	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER)
+	ETH_RSS_NONFRAG_IPV4_OTHER | \
+	ETH_RSS_IPV6 | \
+	ETH_RSS_FRAG_IPV6 | \
+	ETH_RSS_NONFRAG_IPV6_TCP | \
+	ETH_RSS_NONFRAG_IPV6_UDP | \
+	ETH_RSS_NONFRAG_IPV6_SCTP | \
+	ETH_RSS_NONFRAG_IPV6_OTHER)
 
 #define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
 #define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
@@ -66,6 +73,7 @@ 
 #define IAVF_VLAN_TAG_SIZE               4
 #define IAVF_ETH_OVERHEAD \
 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)
+#define IAVF_ETH_MAX_LEN (RTE_ETHER_MTU + IAVF_ETH_OVERHEAD)
 
 #define IAVF_32_BIT_WIDTH (CHAR_BIT * 4)
 #define IAVF_48_BIT_WIDTH (CHAR_BIT * 6)
@@ -153,6 +161,7 @@  struct iavf_info {
 
 	uint8_t *rss_lut;
 	uint8_t *rss_key;
+	uint64_t rss_hf;
 	uint16_t nb_msix;   /* number of MSI-X interrupts on Rx */
 	uint16_t msix_base; /* msix vector base from */
 	uint16_t max_rss_qregion; /* max RSS queue region supported by PF */
@@ -165,6 +174,8 @@  struct iavf_info {
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
 	bool lv_enabled;
+
+	struct rte_eth_dev *eth_dev;
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -193,7 +204,7 @@  struct iavf_devargs {
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
-	struct rte_eth_dev *eth_dev;
+	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
 
 	bool rx_bulk_alloc_allowed;
@@ -219,8 +230,6 @@  struct iavf_adapter {
 	(&(((struct iavf_vsi *)vsi)->adapter->hw))
 #define IAVF_VSI_TO_VF(vsi) \
 	(&(((struct iavf_vsi *)vsi)->adapter->vf))
-#define IAVF_VSI_TO_ETH_DEV(vsi) \
-	(((struct iavf_vsi *)vsi)->adapter->eth_dev)
 
 static inline void
 iavf_init_adminq_parameter(struct iavf_hw *hw)
@@ -321,9 +330,12 @@  int iavf_fdir_check(struct iavf_adapter *adapter,
 		struct iavf_fdir_conf *filter);
 int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 			 struct virtchnl_rss_cfg *rss_cfg, bool add);
+int iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps);
+int iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena);
+int iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add);
 int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
 			uint32_t mc_addrs_num, bool add);
-int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num);
+int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 70b5049..42f9d1b 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -242,6 +242,107 @@  iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 	return err;
 }
 
+static int
+iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
+{
+	static const uint64_t map_hena_rss[] = {
+		/* IPv4 */
+		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+				ETH_RSS_NONFRAG_IPV4_UDP,
+		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+				ETH_RSS_NONFRAG_IPV4_UDP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
+				ETH_RSS_NONFRAG_IPV4_UDP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+				ETH_RSS_NONFRAG_IPV4_TCP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
+				ETH_RSS_NONFRAG_IPV4_TCP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+				ETH_RSS_NONFRAG_IPV4_SCTP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+				ETH_RSS_NONFRAG_IPV4_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+
+		/* IPv6 */
+		[IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+				ETH_RSS_NONFRAG_IPV6_UDP,
+		[IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+				ETH_RSS_NONFRAG_IPV6_UDP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
+				ETH_RSS_NONFRAG_IPV6_UDP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+				ETH_RSS_NONFRAG_IPV6_TCP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
+				ETH_RSS_NONFRAG_IPV6_TCP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+				ETH_RSS_NONFRAG_IPV6_SCTP,
+		[IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+				ETH_RSS_NONFRAG_IPV6_OTHER,
+		[IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+
+		/* L2 Payload */
+		[IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+	};
+
+	const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
+				  ETH_RSS_NONFRAG_IPV4_TCP |
+				  ETH_RSS_NONFRAG_IPV4_SCTP |
+				  ETH_RSS_NONFRAG_IPV4_OTHER |
+				  ETH_RSS_FRAG_IPV4;
+
+	const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
+				  ETH_RSS_NONFRAG_IPV6_TCP |
+				  ETH_RSS_NONFRAG_IPV6_SCTP |
+				  ETH_RSS_NONFRAG_IPV6_OTHER |
+				  ETH_RSS_FRAG_IPV6;
+
+	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
+	uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
+	uint32_t i;
+	int ret;
+
+	ret = iavf_get_hena_caps(adapter, &caps);
+	if (ret)
+		return ret;
+	/**
+	 * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+	 * generalizations of all other IPv4 and IPv6 RSS types.
+	 */
+	if (rss_hf & ETH_RSS_IPV4)
+		rss_hf |= ipv4_rss;
+
+	if (rss_hf & ETH_RSS_IPV6)
+		rss_hf |= ipv6_rss;
+
+	RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
+
+	for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
+		uint64_t bit = BIT_ULL(i);
+
+		if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
+			valid_rss_hf |= map_hena_rss[i];
+			hena |= bit;
+		}
+	}
+
+	ret = iavf_set_hena(adapter, hena);
+	if (ret)
+		return ret;
+
+	if (valid_rss_hf & ipv4_rss)
+		valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+
+	if (valid_rss_hf & ipv6_rss)
+		valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+
+	if (rss_hf & ~valid_rss_hf)
+		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
+			    rss_hf & ~valid_rss_hf);
+
+	vf->rss_hf = valid_rss_hf;
+	return 0;
+}
+
 static int
 iavf_init_rss(struct iavf_adapter *adapter)
 {
@@ -250,26 +351,14 @@  iavf_init_rss(struct iavf_adapter *adapter)
 	uint16_t i, j, nb_q;
 	int ret;
 
-	rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
-	nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+	rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
+	nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
 		       vf->max_rss_qregion);
 
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
 		return -ENOTSUP;
 	}
-	if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
-		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
-		/* set all lut items to default queue */
-		for (i = 0; i < vf->vf_res->rss_lut_size; i++)
-			vf->rss_lut[i] = 0;
-		ret = iavf_configure_rss_lut(adapter);
-		return ret;
-	}
-
-	/* In IAVF, RSS enablement is set by PF driver. It is not supported
-	 * to set based on rss_conf->rss_hf.
-	 */
 
 	/* configure RSS key */
 	if (!rss_conf->rss_key) {
@@ -295,6 +384,19 @@  iavf_init_rss(struct iavf_adapter *adapter)
 	if (ret)
 		return ret;
 
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
+		/* Set RSS hash configuration based on rss_conf->rss_hf. */
+		ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "fail to set default RSS");
+			return ret;
+		}
+	} else {
+		ret = iavf_config_rss_hf(adapter, rss_conf->rss_hf);
+		if (ret != -ENOTSUP)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -306,7 +408,7 @@  iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
 	int ret;
 
-	ret = iavf_request_queues(ad, num);
+	ret = iavf_request_queues(dev, num);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "request queues from PF failed");
 		return ret;
@@ -414,29 +516,14 @@  iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
 	max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
 
-	/* Check if the jumbo frame and maximum packet length are set
-	 * correctly.
-	 */
-	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-		if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
-		    max_pkt_len > IAVF_FRAME_SIZE_MAX) {
-			PMD_DRV_LOG(ERR, "maximum packet length must be "
-				    "larger than %u and smaller than %u, "
-				    "as jumbo frame is enabled",
-				    (uint32_t)RTE_ETHER_MAX_LEN,
-				    (uint32_t)IAVF_FRAME_SIZE_MAX);
-			return -EINVAL;
-		}
-	} else {
-		if (max_pkt_len < RTE_ETHER_MIN_LEN ||
-		    max_pkt_len > RTE_ETHER_MAX_LEN) {
-			PMD_DRV_LOG(ERR, "maximum packet length must be "
-				    "larger than %u and smaller than %u, "
-				    "as jumbo frame is disabled",
-				    (uint32_t)RTE_ETHER_MIN_LEN,
-				    (uint32_t)RTE_ETHER_MAX_LEN);
-			return -EINVAL;
-		}
+	/* Check if maximum packet length is set correctly.  */
+	if (max_pkt_len <= RTE_ETHER_MIN_LEN ||
+	    max_pkt_len > IAVF_FRAME_SIZE_MAX) {
+		PMD_DRV_LOG(ERR, "maximum packet length must be "
+			    "larger than %u and smaller than %u",
+			    (uint32_t)IAVF_ETH_MAX_LEN,
+			    (uint32_t)IAVF_FRAME_SIZE_MAX);
+		return -EINVAL;
 	}
 
 	rxq->max_pkt_len = max_pkt_len;
@@ -1102,33 +1189,85 @@  iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 }
 
 static int
-iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
-			struct rte_eth_rss_conf *rss_conf)
+iavf_set_rss_key(struct iavf_adapter *adapter, uint8_t *key, uint8_t key_len)
 {
-	struct iavf_adapter *adapter =
-		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 
-	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
-		return -ENOTSUP;
-
 	/* HENA setting, it is enabled by default, no change */
-	if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
+	if (!key || key_len == 0) {
 		PMD_DRV_LOG(DEBUG, "No key to be configured");
 		return 0;
-	} else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
+	} else if (key_len != vf->vf_res->rss_key_size) {
 		PMD_DRV_LOG(ERR, "The size of hash key configured "
 			"(%d) doesn't match the size of hardware can "
-			"support (%d)", rss_conf->rss_key_len,
+			"support (%d)", key_len,
 			vf->vf_res->rss_key_size);
 		return -EINVAL;
 	}
 
-	rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
+	rte_memcpy(vf->rss_key, key, key_len);
 
 	return iavf_configure_rss_key(adapter);
 }
 
+static int
+iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	int ret;
+
+	adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
+
+	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+		return -ENOTSUP;
+
+	/* Set hash key. */
+	ret = iavf_set_rss_key(adapter, rss_conf->rss_key,
+			       rss_conf->rss_key_len);
+	if (ret)
+		return ret;
+
+	if (rss_conf->rss_hf == 0) {
+		vf->rss_hf = 0;
+		ret = iavf_set_hena(adapter, 0);
+
+		/* It is a workaround, temporarily allow error to be returned
+		 * due to possible lack of PF handling for hena = 0.
+		 */
+		if (ret)
+			PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support");
+		return 0;
+	}
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
+		/* Clear existing RSS. */
+		ret = iavf_set_hena(adapter, 0);
+
+		/* It is a workaround, temporarily allow error to be returned
+		 * due to possible lack of PF handling for hena = 0.
+		 */
+		if (ret)
+			PMD_DRV_LOG(WARNING, "fail to clean existing RSS,"
+				    "lack PF support");
+
+		/* Set new RSS configuration. */
+		ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "fail to set new RSS");
+			return ret;
+		}
+	} else {
+		ret = iavf_config_rss_hf(adapter, rss_conf->rss_hf);
+		if (ret != -ENOTSUP)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int
 iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 			  struct rte_eth_rss_conf *rss_conf)
@@ -1140,8 +1279,7 @@  iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
-	 /* Just set it to default value now. */
-	rss_conf->rss_hf = IAVF_RSS_OFFLOAD_ALL;
+	rss_conf->rss_hf = vf->rss_hf;
 
 	if (!rss_conf->rss_key)
 		return 0;
@@ -1167,7 +1305,7 @@  iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 		return -EBUSY;
 	}
 
-	if (frame_size > RTE_ETHER_MAX_LEN)
+	if (frame_size > IAVF_ETH_MAX_LEN)
 		dev->data->dev_conf.rxmode.offloads |=
 				DEV_RX_OFFLOAD_JUMBO_FRAME;
 	else
@@ -1787,6 +1925,8 @@  iavf_init_vf(struct rte_eth_dev *dev)
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	vf->eth_dev = dev;
+
 	err = iavf_parse_devargs(dev);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
@@ -1939,6 +2079,24 @@  iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+static void
+iavf_default_rss_disable(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	int ret = 0;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+		/* Set hena = 0 to ask PF to cleanup all existing RSS. */
+		ret = iavf_set_hena(adapter, 0);
+		if (ret)
+			/* It is a workaround, temporarily allow error to be
+			 * returned due to possible lack of PF handling for
+			 * hena = 0.
+			 */
+			PMD_INIT_LOG(WARNING, "fail to disable default RSS,"
+				    "lack PF support");
+	}
+}
 
 static int
 iavf_dev_init(struct rte_eth_dev *eth_dev)
@@ -1981,7 +2139,7 @@  iavf_dev_init(struct rte_eth_dev *eth_dev)
 	hw->bus.func = pci_dev->addr.function;
 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
 	hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
-	adapter->eth_dev = eth_dev;
+	adapter->dev_data = eth_dev->data;
 	adapter->stopped = 1;
 
 	if (iavf_init_vf(eth_dev) != 0) {
@@ -2027,6 +2185,8 @@  iavf_dev_init(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
+	iavf_default_rss_disable(adapter);
+
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 7054bde..f9d7b81 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -25,6 +25,9 @@ 
 #define IAVF_FDIR_IPV6_TC_OFFSET 20
 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
 
+#define IAVF_GTPU_EH_DWLINK 0
+#define IAVF_GTPU_EH_UPLINK 1
+
 #define IAVF_FDIR_INSET_ETH (\
 	IAVF_INSET_ETHERTYPE)
 
@@ -72,6 +75,19 @@ 
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV4 (\
+	IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
+	IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
+	IAVF_INSET_TUN_IPV4_TTL)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV4 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -80,6 +96,19 @@ 
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID)
 
+#define IAVF_FDIR_INSET_GTPU_IPV6 (\
+	IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
+	IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
+	IAVF_INSET_TUN_IPV6_HOP_LIMIT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
+
+#define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
+	IAVF_FDIR_INSET_GTPU_IPV6 | \
+	IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
+
 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
@@ -105,6 +134,7 @@ 
 	IAVF_INSET_PFCP_S_FIELD)
 
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
+	{iavf_pattern_raw,			 IAVF_INSET_NONE,		IAVF_INSET_NONE},
 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
@@ -115,7 +145,19 @@  static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_IPV4_GTPU,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_udp,	 IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv4_tcp,	 IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_udp,	 IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_ipv6_tcp,	 IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4,	 IAVF_FDIR_INSET_GTPU_IPV4,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6,	 IAVF_FDIR_INSET_GTPU_IPV6,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP,	IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP,	IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
@@ -284,7 +326,7 @@  iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
 		}
 	}
 
-	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+	if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION, act,
 				"Invalid queue region indexes.");
@@ -364,7 +406,7 @@  iavf_fdir_parse_action(struct iavf_adapter *ad,
 			filter_action->act_conf.queue.index = act_q->index;
 
 			if (filter_action->act_conf.queue.index >=
-				ad->eth_dev->data->nb_rx_queues) {
+				ad->dev_data->nb_rx_queues) {
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION,
 					actions, "Invalid queue for FDIR.");
@@ -448,15 +490,68 @@  iavf_fdir_parse_action(struct iavf_adapter *ad,
 	return 0;
 }
 
+static bool
+iavf_fdir_refine_input_set(const uint64_t input_set,
+			   const uint64_t input_set_mask,
+			   struct iavf_fdir_conf *filter)
+{
+	struct virtchnl_proto_hdr *hdr, *hdr_last;
+	struct rte_flow_item_ipv4 ipv4_spec;
+	struct rte_flow_item_ipv6 ipv6_spec;
+	int last_layer;
+	uint8_t proto_id;
+
+	if (input_set & ~input_set_mask)
+		return false;
+	else if (input_set)
+		return true;
+
+	last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
+	/* Last layer of TCP/UDP pattern isn't less than 2. */
+	if (last_layer < 2)
+		return false;
+	hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
+	if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
+		proto_id = 6;
+	else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
+		proto_id = 17;
+	else
+		return false;
+
+	hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
+	switch (hdr->type) {
+	case VIRTCHNL_PROTO_HDR_IPV4:
+		VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+		memset(&ipv4_spec, 0, sizeof(ipv4_spec));
+		ipv4_spec.hdr.next_proto_id = proto_id;
+		rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
+			   sizeof(ipv4_spec.hdr));
+		return true;
+	case VIRTCHNL_PROTO_HDR_IPV6:
+		VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+		memset(&ipv6_spec, 0, sizeof(ipv6_spec));
+		ipv6_spec.hdr.proto = proto_id;
+		rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
+			   sizeof(ipv6_spec.hdr));
+		return true;
+	default:
+		return false;
+	}
+}
+
 static int
 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 			const struct rte_flow_item pattern[],
+			const uint64_t input_set_mask,
 			struct rte_flow_error *error,
 			struct iavf_fdir_conf *filter)
 {
+	struct virtchnl_proto_hdrs *hdrs =
+		&filter->add_fltr.rule_cfg.proto_hdrs;
 	const struct rte_flow_item *item = pattern;
 	enum rte_flow_item_type item_type;
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@@ -473,7 +568,8 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 
 	enum rte_flow_item_type next_type;
 	uint16_t ether_type;
-
+	uint8_t item_num = 0;
+	u8 tun_inner = 0;
 	int layer = 0;
 	struct virtchnl_proto_hdr *hdr;
 
@@ -490,8 +586,72 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 		}
 
 		item_type = item->type;
+		item_num++;
 
 		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_RAW: {
+				raw_spec = item->spec;
+				raw_mask = item->mask;
+
+				if (item_num != 1)
+					return -rte_errno;
+
+				if (raw_spec->length != raw_mask->length)
+					return -rte_errno;
+
+				uint16_t pkt_len = 0;
+				uint16_t tmp_val = 0;
+				uint8_t tmp = 0;
+				int i, j;
+
+				pkt_len = raw_spec->length;
+
+				for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+					tmp = raw_spec->pattern[i];
+					if (tmp >= 'a' && tmp <= 'f')
+						tmp_val = tmp - 'a' + 10;
+					if (tmp >= 'A' && tmp <= 'F')
+						tmp_val = tmp - 'A' + 10;
+					if (tmp >= '0' && tmp <= '9')
+						tmp_val = tmp - '0';
+
+					tmp_val *= 16;
+					tmp = raw_spec->pattern[i + 1];
+					if (tmp >= 'a' && tmp <= 'f')
+						tmp_val += (tmp - 'a' + 10);
+					if (tmp >= 'A' && tmp <= 'F')
+						tmp_val += (tmp - 'A' + 10);
+					if (tmp >= '0' && tmp <= '9')
+						tmp_val += (tmp - '0');
+
+					hdrs->raw.spec[j] = tmp_val;
+
+					tmp = raw_mask->pattern[i];
+					if (tmp >= 'a' && tmp <= 'f')
+						tmp_val = tmp - 'a' + 10;
+					if (tmp >= 'A' && tmp <= 'F')
+						tmp_val = tmp - 'A' + 10;
+					if (tmp >= '0' && tmp <= '9')
+						tmp_val = tmp - '0';
+
+					tmp_val *= 16;
+					tmp = raw_mask->pattern[i + 1];
+					if (tmp >= 'a' && tmp <= 'f')
+						tmp_val += (tmp - 'a' + 10);
+					if (tmp >= 'A' && tmp <= 'F')
+						tmp_val += (tmp - 'A' + 10);
+					if (tmp >= '0' && tmp <= '9')
+						tmp_val += (tmp - '0');
+
+					hdrs->raw.mask[j] = tmp_val;
+				}
+
+				hdrs->raw.pkt_len = pkt_len / 2;
+				hdrs->tunnel_level = 0;
+				hdrs->count = 0;
+				return 0;
+			}
+
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			eth_spec = item->spec;
 			eth_mask = item->mask;
@@ -590,6 +750,11 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV4_OUTER;
+					input_set |= IAVF_PROT_IPV4_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv4_spec->hdr,
 					sizeof(ipv4_spec->hdr));
@@ -642,6 +807,11 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_IPV6_OUTER;
+					input_set |= IAVF_PROT_IPV6_INNER;
+				}
+
 				rte_memcpy(hdr->buffer,
 					&ipv6_spec->hdr,
 					sizeof(ipv6_spec->hdr));
@@ -676,6 +846,11 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_UDP_OUTER;
+					input_set |= IAVF_PROT_UDP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&udp_spec->hdr,
@@ -720,6 +895,11 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
 				}
 
+				if (tun_inner) {
+					input_set &= ~IAVF_PROT_TCP_OUTER;
+					input_set |= IAVF_PROT_TCP_INNER;
+				}
+
 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
 					rte_memcpy(hdr->buffer,
 						&tcp_spec->hdr,
@@ -798,6 +978,8 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 					gtp_spec, sizeof(*gtp_spec));
 			}
 
+			tun_inner = 1;
+
 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
 			break;
 
@@ -807,12 +989,29 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 
 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
 
-			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+			if (!gtp_psc_spec)
+				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+			else if ((gtp_psc_mask->qfi) && !(gtp_psc_mask->pdu_type))
+				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
+			else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_UPLINK)
+				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
+			else if (gtp_psc_spec->pdu_type == IAVF_GTPU_EH_DWLINK)
+				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
 
 			if (gtp_psc_spec && gtp_psc_mask) {
 				if (gtp_psc_mask->qfi == UINT8_MAX) {
 					input_set |= IAVF_INSET_GTPU_QFI;
-					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
+					if (gtp_psc_spec->pdu_type ==
+								IAVF_GTPU_EH_UPLINK)
+						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
+										 GTPU_UP, QFI);
+					else if (gtp_psc_spec->pdu_type ==
+								IAVF_GTPU_EH_DWLINK)
+						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
+										 GTPU_DWN, QFI);
+					else
+						VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
+										 GTPU_EH, QFI);
 				}
 
 				rte_memcpy(hdr->buffer, gtp_psc_spec,
@@ -924,6 +1123,13 @@  iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
 		return -rte_errno;
 	}
 
+	if (!iavf_fdir_refine_input_set(input_set, input_set_mask, filter)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
 	filter->input_set = input_set;
 
 	return 0;
@@ -941,7 +1147,6 @@  iavf_fdir_parse(struct iavf_adapter *ad,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
 	struct iavf_fdir_conf *filter = &vf->fdir.conf;
 	struct iavf_pattern_match_item *item = NULL;
-	uint64_t input_set;
 	int ret;
 
 	memset(filter, 0, sizeof(*filter));
@@ -950,19 +1155,11 @@  iavf_fdir_parse(struct iavf_adapter *ad,
 	if (!item)
 		return -rte_errno;
 
-	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
+	ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
+				      error, filter);
 	if (ret)
 		goto error;
 
-	input_set = filter->input_set;
-	if (!input_set || input_set & ~item->input_set_mask) {
-		rte_flow_error_set(error, EINVAL,
-				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
-				"Invalid input set");
-		ret = -rte_errno;
-		goto error;
-	}
-
 	ret = iavf_fdir_parse_action(ad, actions, error, filter);
 	if (ret)
 		goto error;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 00e7f15..3d2c304 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -48,6 +48,12 @@  const struct rte_flow_ops iavf_flow_ops = {
 	.query = iavf_flow_query,
 };
 
+/* raw */
+enum rte_flow_item_type iavf_pattern_raw[] = {
+	RTE_FLOW_ITEM_TYPE_RAW,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 /* empty */
 enum rte_flow_item_type iavf_pattern_empty[] = {
 	RTE_FLOW_ITEM_TYPE_END,
@@ -219,6 +225,30 @@  enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+enum rte_flow_item_type iavf_pattern_eth_ipv6_frag_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_frag_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_frag_ext[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
@@ -409,377 +439,1166 @@  enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_icmp[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* IPV4 GTPU IPv6 */
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_udp[] = {
+/* IPV4 GRE IPv4 UDP GTPU IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_tcp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_icmp[] = {
+/* IPV4 GRE IPv4 UDP GTPU IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_ICMP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* IPV6 GTPU IPv4 */
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
-	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_udp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
-	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_tcp[] = {
+/* IPV4 GRE IPv6 UDP GTPU IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_icmp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* IPV6 GTPU IPv6 */
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_udp[] = {
+/* IPV4 GRE IPv6 UDP GTPU IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_tcp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_icmp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* IPV4 GTPU EH IPv4 */
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
-	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
+/* IPV6 GRE IPv4 UDP GTPU IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* IPV4 GTPU EH IPv6 */
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6[] = {
+/* IPV4 GRE IPv4 UDP GTPU IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_icmp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
-	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
-	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_ICMP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* IPV6 GTPU EH IPv4 */
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4[] = {
+/* IPV6 GRE IPv6 UDP GTPU IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_udp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_tcp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_icmp[] = {
+/* IPV6 GRE IPv6 UDP GTPU IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
-	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* IPV6 GTPU EH IPv6 */
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_udp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
-	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_tcp[] = {
+/* IPV4 GRE IPv4 UDP GTPU EH IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
-	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
-	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_icmp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
-	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_GTPU,
 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
-	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* ESP */
-enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
+/* IPV4 GRE IPv4 UDP GTPU IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_UDP,
-	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
-	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* AH */
-enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
+/* IPV4 GRE IPv6 UDP GTPU EH IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_AH,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_AH,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* L2TPV3 */
-enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
-	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
+/* IPV4 GRE IPv6 UDP GTPU EH IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
 	RTE_FLOW_ITEM_TYPE_IPV6,
-	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-/* PFCP */
-enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
-	RTE_FLOW_ITEM_TYPE_PFCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
-enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV6 GRE IPv4 UDP GTPU EH IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV4 GRE IPv4 UDP GTPU EH IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV6 GRE IPv6 UDP GTPU EH IPv4*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV6 GRE IPv6 UDP GTPU EH IPv6*/
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV4 GTPU IPv6 */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV6 GTPU IPv4 */
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV6 GTPU IPv6 */
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV4 GTPU EH IPv4 */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV4 GTPU EH IPv6 */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV6 GTPU EH IPv4 */
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPV6 GTPU EH IPv6 */
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_GTPU,
+	RTE_FLOW_ITEM_TYPE_GTP_PSC,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* ESP */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ESP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* AH */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_AH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_AH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* L2TPV3 */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* PFCP */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_PFCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_PFCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* GRE */
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
 	RTE_FLOW_ITEM_TYPE_IPV6,
 	RTE_FLOW_ITEM_TYPE_UDP,
-	RTE_FLOW_ITEM_TYPE_PFCP,
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index efc7f22..5368a5a 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -93,6 +93,27 @@ 
 #define IAVF_INSET_IPV6_TC \
 	(IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
 
+#define IAVF_INSET_TUN_IPV4_SRC \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV4_DST \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV4_TOS \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TOS)
+#define IAVF_INSET_TUN_IPV4_PROTO \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV4_TTL \
+	(IAVF_PROT_IPV4_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_SRC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_SRC)
+#define IAVF_INSET_TUN_IPV6_DST \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_DST)
+#define IAVF_INSET_TUN_IPV6_NEXT_HDR \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_PROTO)
+#define IAVF_INSET_TUN_IPV6_HOP_LIMIT \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TTL)
+#define IAVF_INSET_TUN_IPV6_TC \
+	(IAVF_PROT_IPV6_INNER | IAVF_IP_TOS)
+
 #define IAVF_INSET_TCP_SRC_PORT \
 	(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_TCP_DST_PORT \
@@ -101,6 +122,16 @@ 
 	(IAVF_PROT_UDP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_UDP_DST_PORT \
 	(IAVF_PROT_UDP_OUTER | IAVF_DPORT)
+
+#define IAVF_INSET_TUN_TCP_SRC_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_TCP_DST_PORT \
+	(IAVF_PROT_TCP_INNER | IAVF_DPORT)
+#define IAVF_INSET_TUN_UDP_SRC_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_SPORT)
+#define IAVF_INSET_TUN_UDP_DST_PORT \
+	(IAVF_PROT_UDP_INNER | IAVF_DPORT)
+
 #define IAVF_INSET_SCTP_SRC_PORT \
 	(IAVF_PROT_SCTP_OUTER | IAVF_SPORT)
 #define IAVF_INSET_SCTP_DST_PORT \
@@ -136,6 +167,8 @@ 
 #define IAVF_INSET_PFCP_SEID \
 	(IAVF_PROT_PFCP | IAVF_PFCP_S_FIELD | IAVF_PFCP_SEID)
 
+/* raw pattern */
+extern enum rte_flow_item_type iavf_pattern_raw[];
 
 /* empty pattern */
 extern enum rte_flow_item_type iavf_pattern_empty[];
@@ -169,6 +202,9 @@  extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv6[];
 extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[];
 extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_frag_ext[];
+extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_frag_ext[];
+extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_frag_ext[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[];
 extern enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[];
 extern enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[];
@@ -202,6 +238,94 @@  extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_udp[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_tcp[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_icmp[];
 
+/* IPv4 GRE IPv4 UDP GTPU IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp[];
+
+/* IPv4 GRE IPv4 UDP GTPU IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp[];
+
+/* IPv4 GRE IPv6 UDP GTPU IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp[];
+
+/* IPv4 GRE IPv6 UDP GTPU IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp[];
+
+/* IPv6 GRE IPv4 UDP GTPU IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp[];
+
+/* IPv6 GRE IPv4 UDP GTPU IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp[];
+
+/* IPv6 GRE IPv6 UDP GTPU IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp[];
+
+/* IPv6 GRE IPv6 UDP GTPU IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp[];
+
+/* IPv4 GRE IPv4 UDP GTPU EH IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp[];
+
+/* IPv4 GRE IPv4 UDP GTPU EH IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp[];
+
+/* IPv4 GRE IPv6 UDP GTPU EH IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp[];
+
+/* IPv4 GRE IPv6 UDP GTPU EH IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp[];
+
+/* IPv6 GRE IPv4 UDP GTPU EH IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp[];
+
+/* IPv6 GRE IPv4 UDP GTPU EH IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp[];
+
+/* IPv6 GRE IPv6 UDP GTPU EH IPv4 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp[];
+
+/* IPv6 GRE IPv6 UDP GTPU EH IPv6 */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp[];
+
 /* IPv4 GTPU IPv6 */
 extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_udp[];
@@ -262,6 +386,19 @@  extern enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[];
 extern enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[];
 
+/* GRE */
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv4_gre_ipv6_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_tcp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv4_udp[];
+extern enum rte_flow_item_type iavf_pattern_eth_ipv6_gre_ipv6_udp[];
 
 extern const struct rte_flow_ops iavf_flow_ops;
 
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index c4c73e6..0c2a790 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -30,6 +30,13 @@ 
 #define	IAVF_PHINT_GTPU_EH_UP			BIT_ULL(3)
 #define IAVF_PHINT_OUTER_IPV4			BIT_ULL(4)
 #define IAVF_PHINT_OUTER_IPV6			BIT_ULL(5)
+#define IAVF_PHINT_GRE				BIT_ULL(6)
+/* the second IP header of GTPoGRE */
+#define IAVF_PHINT_MID_IPV4			BIT_ULL(7)
+#define IAVF_PHINT_MID_IPV6			BIT_ULL(8)
+
+/* Raw */
+#define IAVF_PHINT_RAW				BIT_ULL(11)
 
 #define IAVF_PHINT_GTPU_MSK	(IAVF_PHINT_GTPU	| \
 				 IAVF_PHINT_GTPU_EH	| \
@@ -51,6 +58,7 @@  struct iavf_hash_match_type {
 struct iavf_rss_meta {
 	struct virtchnl_proto_hdrs proto_hdrs;
 	enum virtchnl_rss_algorithm rss_algorithm;
+	bool raw_ena;
 };
 
 struct iavf_hash_flow_cfg {
@@ -112,6 +120,10 @@  iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), {BUFF_NOUSED} }
 
+#define proto_hdr_ipv6_frag { \
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, \
+	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID), {BUFF_NOUSED} }
+
 #define proto_hdr_ipv6_with_prot { \
 	VIRTCHNL_PROTO_HDR_IPV6, \
 	FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
@@ -158,140 +170,171 @@  iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 /* proto_hdrs template */
 struct virtchnl_proto_hdrs outer_ipv4_tmplt = {
 	TUNNEL_LEVEL_OUTER, 4,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4}
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv4_udp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 5,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
 	 proto_hdr_ipv4_with_prot,
-	 proto_hdr_udp}
+	 proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv4_tcp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 5,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
 	 proto_hdr_ipv4_with_prot,
-	 proto_hdr_tcp}
+	 proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv4_sctp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 5,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
-	 proto_hdr_sctp}
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
+	 proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_tmplt = {
 	TUNNEL_LEVEL_OUTER, 4,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6}
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6}}
+};
+
+struct virtchnl_proto_hdrs outer_ipv6_frag_tmplt = {
+	TUNNEL_LEVEL_OUTER, 5,
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+	 proto_hdr_ipv6, proto_hdr_ipv6_frag}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_udp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 5,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
 	 proto_hdr_ipv6_with_prot,
-	 proto_hdr_udp}
+	 proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_tcp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 5,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
 	 proto_hdr_ipv6_with_prot,
-	 proto_hdr_tcp}
+	 proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_sctp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 5,
-	{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
-	 proto_hdr_sctp}
+	{{proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
+	 proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_tmplt = {
-	TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv4}
+	TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv4}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_udp_tmplt = {
-	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
+	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_tcp_tmplt = {
-	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
+	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_tcp}}
+};
+
+struct virtchnl_proto_hdrs second_inner_ipv4_tmplt = {
+	2, 1, {{proto_hdr_ipv4}}
+};
+
+struct virtchnl_proto_hdrs second_inner_ipv4_udp_tmplt = {
+	2, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_udp}}
+};
+
+struct virtchnl_proto_hdrs second_inner_ipv4_tcp_tmplt = {
+	2, 2, {{proto_hdr_ipv4_with_prot, proto_hdr_tcp}}
+};
+
+struct virtchnl_proto_hdrs second_inner_ipv6_tmplt = {
+	2, 1, {{proto_hdr_ipv6}}
+};
+
+struct virtchnl_proto_hdrs second_inner_ipv6_udp_tmplt = {
+	2, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_udp}}
+};
+
+struct virtchnl_proto_hdrs second_inner_ipv6_tcp_tmplt = {
+	2, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_sctp_tmplt = {
-	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_sctp}
+	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv4, proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_tmplt = {
-	TUNNEL_LEVEL_INNER, 1, {proto_hdr_ipv6}
+	TUNNEL_LEVEL_INNER, 1, {{proto_hdr_ipv6}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_udp_tmplt = {
-	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
+	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_udp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_tcp_tmplt = {
-	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
+	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6_with_prot, proto_hdr_tcp}}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_sctp_tmplt = {
-	TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_sctp}
+	TUNNEL_LEVEL_INNER, 2, {{proto_hdr_ipv6, proto_hdr_sctp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_esp_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_esp}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_udp_esp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 3,
-	{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_esp}
+	{{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_ah_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_ah}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_ah}}
 };
 
 struct virtchnl_proto_hdrs ipv6_esp_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_esp}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv6_udp_esp_tmplt = {
 	TUNNEL_LEVEL_OUTER, 3,
-	{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_esp}
+	{{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_esp}}
 };
 
 struct virtchnl_proto_hdrs ipv6_ah_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_ah}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_ah}}
 };
 
 struct virtchnl_proto_hdrs ipv4_l2tpv3_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_l2tpv3}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_l2tpv3}}
 };
 
 struct virtchnl_proto_hdrs ipv6_l2tpv3_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_l2tpv3}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_l2tpv3}}
 };
 
 struct virtchnl_proto_hdrs ipv4_pfcp_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv4, proto_hdr_pfcp}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv4, proto_hdr_pfcp}}
 };
 
 struct virtchnl_proto_hdrs ipv6_pfcp_tmplt = {
-	TUNNEL_LEVEL_OUTER, 2, {proto_hdr_ipv6, proto_hdr_pfcp}
+	TUNNEL_LEVEL_OUTER, 2, {{proto_hdr_ipv6, proto_hdr_pfcp}}
 };
 
 struct virtchnl_proto_hdrs ipv4_udp_gtpc_tmplt = {
-	TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv4, proto_hdr_udp, proto_hdr_gtpc}
+	TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv4, proto_hdr_udp, proto_hdr_gtpc}}
 };
 
 struct virtchnl_proto_hdrs ipv6_udp_gtpc_tmplt = {
-	TUNNEL_LEVEL_OUTER, 3, {proto_hdr_ipv6, proto_hdr_udp, proto_hdr_gtpc}
+	TUNNEL_LEVEL_OUTER, 3, {{proto_hdr_ipv6, proto_hdr_udp, proto_hdr_gtpc}}
 };
 
 /* rss type super set */
 
 /* IPv4 outer */
-#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4)
+#define IAVF_RSS_TYPE_OUTER_IPV4	(ETH_RSS_ETH | ETH_RSS_IPV4 | \
+					 ETH_RSS_FRAG_IPV4)
 #define IAVF_RSS_TYPE_OUTER_IPV4_UDP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
 					 ETH_RSS_NONFRAG_IPV4_UDP)
 #define IAVF_RSS_TYPE_OUTER_IPV4_TCP	(IAVF_RSS_TYPE_OUTER_IPV4 | \
@@ -300,6 +343,8 @@  struct virtchnl_proto_hdrs ipv6_udp_gtpc_tmplt = {
 					 ETH_RSS_NONFRAG_IPV4_SCTP)
 /* IPv6 outer */
 #define IAVF_RSS_TYPE_OUTER_IPV6	(ETH_RSS_ETH | ETH_RSS_IPV6)
+#define IAVF_RSS_TYPE_OUTER_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6 | \
+					 ETH_RSS_FRAG_IPV6)
 #define IAVF_RSS_TYPE_OUTER_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
 					 ETH_RSS_NONFRAG_IPV6_UDP)
 #define IAVF_RSS_TYPE_OUTER_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6 | \
@@ -318,6 +363,8 @@  struct virtchnl_proto_hdrs ipv6_udp_gtpc_tmplt = {
 /* VLAN IPv6 */
 #define IAVF_RSS_TYPE_VLAN_IPV6		(IAVF_RSS_TYPE_OUTER_IPV6 | \
 					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
+#define IAVF_RSS_TYPE_VLAN_IPV6_FRAG	(IAVF_RSS_TYPE_OUTER_IPV6_FRAG | \
+					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_UDP	(IAVF_RSS_TYPE_OUTER_IPV6_UDP | \
 					 ETH_RSS_S_VLAN | ETH_RSS_C_VLAN)
 #define IAVF_RSS_TYPE_VLAN_IPV6_TCP	(IAVF_RSS_TYPE_OUTER_IPV6_TCP | \
@@ -372,6 +419,7 @@  struct virtchnl_proto_hdrs ipv6_udp_gtpc_tmplt = {
  */
 static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	/* IPv4 */
+	{iavf_pattern_raw,				IAVF_INSET_NONE,		NULL},
 	{iavf_pattern_eth_ipv4,				IAVF_RSS_TYPE_OUTER_IPV4,	&outer_ipv4_tmplt},
 	{iavf_pattern_eth_ipv4_udp,			IAVF_RSS_TYPE_OUTER_IPV4_UDP,	&outer_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv4_tcp,			IAVF_RSS_TYPE_OUTER_IPV4_TCP,	&outer_ipv4_tcp_tmplt},
@@ -393,18 +441,50 @@  static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_gtpu_eh_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&inner_ipv4_tmplt},
 	{iavf_pattern_eth_ipv6_gtpu_eh_ipv4_udp,	IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&inner_ipv4_udp_tmplt},
 	{iavf_pattern_eth_ipv6_gtpu_eh_ipv4_tcp,	IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp,		IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp,		IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp,	IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp,	IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp,	IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp,	IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp,	IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp,	IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4,		IAVF_RSS_TYPE_GTPU_IPV4,	&second_inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp,	IAVF_RSS_TYPE_GTPU_IPV4_UDP,	&second_inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp,	IAVF_RSS_TYPE_GTPU_IPV4_TCP,	&second_inner_ipv4_tcp_tmplt},
 	{iavf_pattern_eth_ipv4_esp,			IAVF_RSS_TYPE_IPV4_ESP,		&ipv4_esp_tmplt},
 	{iavf_pattern_eth_ipv4_udp_esp,			IAVF_RSS_TYPE_IPV4_ESP,		&ipv4_udp_esp_tmplt},
 	{iavf_pattern_eth_ipv4_ah,			IAVF_RSS_TYPE_IPV4_AH,		&ipv4_ah_tmplt},
 	{iavf_pattern_eth_ipv4_l2tpv3,			IAVF_RSS_TYPE_IPV4_L2TPV3,	&ipv4_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv4_pfcp,			IAVF_RSS_TYPE_IPV4_PFCP,	&ipv4_pfcp_tmplt},
 	{iavf_pattern_eth_ipv4_gtpc,			ETH_RSS_IPV4,			&ipv4_udp_gtpc_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4,	&inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4,		IAVF_RSS_TYPE_INNER_IPV4, &inner_ipv4_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_tcp,	IAVF_RSS_TYPE_INNER_IPV4_TCP, &inner_ipv4_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_udp,	IAVF_RSS_TYPE_INNER_IPV4_UDP, &inner_ipv4_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_udp,	IAVF_RSS_TYPE_INNER_IPV4_UDP, &inner_ipv4_udp_tmplt},
 	/* IPv6 */
 	{iavf_pattern_eth_ipv6,				IAVF_RSS_TYPE_OUTER_IPV6,	&outer_ipv6_tmplt},
+	{iavf_pattern_eth_ipv6_frag_ext,		IAVF_RSS_TYPE_OUTER_IPV6_FRAG,	&outer_ipv6_frag_tmplt},
 	{iavf_pattern_eth_ipv6_udp,			IAVF_RSS_TYPE_OUTER_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv6_tcp,			IAVF_RSS_TYPE_OUTER_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_ipv6_sctp,			IAVF_RSS_TYPE_OUTER_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6,			IAVF_RSS_TYPE_VLAN_IPV6,	&outer_ipv6_tmplt},
+	{iavf_pattern_eth_vlan_ipv6_frag_ext,		IAVF_RSS_TYPE_OUTER_IPV6_FRAG,	&outer_ipv6_frag_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_udp,		IAVF_RSS_TYPE_VLAN_IPV6_UDP,	&outer_ipv6_udp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_tcp,		IAVF_RSS_TYPE_VLAN_IPV6_TCP,	&outer_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_vlan_ipv6_sctp,		IAVF_RSS_TYPE_VLAN_IPV6_SCTP,	&outer_ipv6_sctp_tmplt},
@@ -421,23 +501,42 @@  static struct iavf_pattern_match_item iavf_hash_pattern_list[] = {
 	{iavf_pattern_eth_ipv6_gtpu_eh_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&inner_ipv6_tmplt},
 	{iavf_pattern_eth_ipv6_gtpu_eh_ipv6_udp,	IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&inner_ipv6_udp_tmplt},
 	{iavf_pattern_eth_ipv6_gtpu_eh_ipv6_tcp,	IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp,		IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp,		IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp,	IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp,	IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp,	IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp,	IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp,	IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp,	IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6,		IAVF_RSS_TYPE_GTPU_IPV6,	&second_inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp,	IAVF_RSS_TYPE_GTPU_IPV6_UDP,	&second_inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp,	IAVF_RSS_TYPE_GTPU_IPV6_TCP,	&second_inner_ipv6_tcp_tmplt},
 	{iavf_pattern_eth_ipv6_esp,			IAVF_RSS_TYPE_IPV6_ESP,		&ipv6_esp_tmplt},
 	{iavf_pattern_eth_ipv6_udp_esp,			IAVF_RSS_TYPE_IPV6_ESP,		&ipv6_udp_esp_tmplt},
 	{iavf_pattern_eth_ipv6_ah,			IAVF_RSS_TYPE_IPV6_AH,		&ipv6_ah_tmplt},
 	{iavf_pattern_eth_ipv6_l2tpv3,			IAVF_RSS_TYPE_IPV6_L2TPV3,	&ipv6_l2tpv3_tmplt},
 	{iavf_pattern_eth_ipv6_pfcp,			IAVF_RSS_TYPE_IPV6_PFCP,	&ipv6_pfcp_tmplt},
 	{iavf_pattern_eth_ipv6_gtpc,			ETH_RSS_IPV6,			&ipv6_udp_gtpc_tmplt},
-};
-
-struct virtchnl_proto_hdrs *iavf_hash_default_hdrs[] = {
-	&inner_ipv4_tmplt,
-	&inner_ipv4_udp_tmplt,
-	&inner_ipv4_tcp_tmplt,
-	&inner_ipv4_sctp_tmplt,
-	&inner_ipv6_tmplt,
-	&inner_ipv6_udp_tmplt,
-	&inner_ipv6_tcp_tmplt,
-	&inner_ipv6_sctp_tmplt,
+	{iavf_pattern_eth_ipv4_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6,	&inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6,		IAVF_RSS_TYPE_INNER_IPV6, &inner_ipv6_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_tcp,	IAVF_RSS_TYPE_INNER_IPV6_TCP, &inner_ipv6_tcp_tmplt},
+	{iavf_pattern_eth_ipv4_gre_ipv6_udp,	IAVF_RSS_TYPE_INNER_IPV6_UDP, &inner_ipv6_udp_tmplt},
+	{iavf_pattern_eth_ipv6_gre_ipv6_udp,	IAVF_RSS_TYPE_INNER_IPV6_UDP, &inner_ipv6_udp_tmplt},
 };
 
 static struct iavf_flow_engine iavf_hash_engine = {
@@ -458,24 +557,64 @@  static struct iavf_flow_parser iavf_hash_parser = {
 	.stage = IAVF_FLOW_STAGE_RSS,
 };
 
-static int
-iavf_hash_default_set(struct iavf_adapter *ad, bool add)
+int
+iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add)
 {
-	struct virtchnl_rss_cfg *rss_cfg;
-	uint16_t i;
+	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct virtchnl_rss_cfg rss_cfg;
+
+#define IAVF_RSS_HF_ALL ( \
+	ETH_RSS_IPV4 | \
+	ETH_RSS_IPV6 | \
+	ETH_RSS_NONFRAG_IPV4_UDP | \
+	ETH_RSS_NONFRAG_IPV6_UDP | \
+	ETH_RSS_NONFRAG_IPV4_TCP | \
+	ETH_RSS_NONFRAG_IPV6_TCP | \
+	ETH_RSS_NONFRAG_IPV4_SCTP | \
+	ETH_RSS_NONFRAG_IPV6_SCTP)
+
+	rss_cfg.rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+	if (rss_hf & ETH_RSS_IPV4) {
+		rss_cfg.proto_hdrs = inner_ipv4_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
+	}
 
-	rss_cfg = rte_zmalloc("iavf rss rule",
-			      sizeof(struct virtchnl_rss_cfg), 0);
-	if (!rss_cfg)
-		return -ENOMEM;
+	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+		rss_cfg.proto_hdrs = inner_ipv4_udp_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
+	}
+
+	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+		rss_cfg.proto_hdrs = inner_ipv4_tcp_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
+	}
 
-	for (i = 0; i < RTE_DIM(iavf_hash_default_hdrs); i++) {
-		rss_cfg->proto_hdrs = *iavf_hash_default_hdrs[i];
-		rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+		rss_cfg.proto_hdrs = inner_ipv4_sctp_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
+	}
+
+	if (rss_hf & ETH_RSS_IPV6) {
+		rss_cfg.proto_hdrs = inner_ipv6_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
+	}
 
-		iavf_add_del_rss_cfg(ad, rss_cfg, add);
+	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+		rss_cfg.proto_hdrs = inner_ipv6_udp_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
 	}
 
+	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+		rss_cfg.proto_hdrs = inner_ipv6_tcp_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
+	}
+
+	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+		rss_cfg.proto_hdrs = inner_ipv6_sctp_tmplt;
+		iavf_add_del_rss_cfg(ad, &rss_cfg, add);
+	}
+
+	vf->rss_hf = rss_hf & IAVF_RSS_HF_ALL;
 	return 0;
 }
 
@@ -510,12 +649,6 @@  iavf_hash_init(struct iavf_adapter *ad)
 		return ret;
 	}
 
-	ret = iavf_hash_default_set(ad, true);
-	if (ret) {
-		PMD_DRV_LOG(ERR, "fail to set default RSS");
-		iavf_unregister_parser(parser, ad);
-	}
-
 	return ret;
 }
 
@@ -535,13 +668,20 @@  iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
 		}
 
 		switch (item->type) {
+		case RTE_FLOW_ITEM_TYPE_RAW:
+			*phint |= IAVF_PHINT_RAW;
+			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
-			if (!(*phint & IAVF_PHINT_GTPU_MSK))
+			if (!(*phint & IAVF_PHINT_GTPU_MSK) && !(*phint & IAVF_PHINT_GRE))
 				*phint |= IAVF_PHINT_OUTER_IPV4;
+			if ((*phint & IAVF_PHINT_GRE) && !(*phint & IAVF_PHINT_GTPU_MSK))
+				*phint |= IAVF_PHINT_MID_IPV4;
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
-			if (!(*phint & IAVF_PHINT_GTPU_MSK))
+			if (!(*phint & IAVF_PHINT_GTPU_MSK) && !(*phint & IAVF_PHINT_GRE))
 				*phint |= IAVF_PHINT_OUTER_IPV6;
+			if ((*phint & IAVF_PHINT_GRE) && !(*phint & IAVF_PHINT_GTPU_MSK))
+				*phint |= IAVF_PHINT_MID_IPV6;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GTPU:
 			*phint |= IAVF_PHINT_GTPU;
@@ -556,6 +696,8 @@  iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
 			else if (psc->pdu_type == IAVF_GTPU_EH_DWNLINK)
 				*phint |= IAVF_PHINT_GTPU_EH_DWN;
 			break;
+		case RTE_FLOW_ITEM_TYPE_GRE:
+			*phint |= IAVF_PHINT_GRE;
 		default:
 			break;
 		}
@@ -564,6 +706,80 @@  iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
 	return 0;
 }
 
+static int
+iavf_hash_parse_raw_pattern(const struct rte_flow_item *item,
+			struct iavf_rss_meta *meta)
+{
+	const struct rte_flow_item_raw *raw_spec, *raw_mask;
+	uint8_t *pkt_buf, *msk_buf;
+	uint16_t spec_len, pkt_len;
+	uint8_t tmp_val = 0;
+	uint8_t tmp_c = 0;
+	int i, j;
+
+	raw_spec = item->spec;
+	raw_mask = item->mask;
+
+	spec_len = strlen((char *)(uintptr_t)raw_spec->pattern);
+	if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
+		spec_len)
+		return -rte_errno;
+
+	pkt_len = spec_len / 2;
+
+	pkt_buf = rte_zmalloc(NULL, pkt_len, 0);
+	if (!pkt_buf)
+		return -ENOMEM;
+
+	msk_buf = rte_zmalloc(NULL, pkt_len, 0);
+	if (!msk_buf)
+		return -ENOMEM;
+
+	/* convert string to int array */
+	for (i = 0, j = 0; i < spec_len; i += 2, j++) {
+		tmp_c = raw_spec->pattern[i];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			tmp_val = tmp_c - 'a' + 10;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			tmp_val = tmp_c - 'A' + 10;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			tmp_val = tmp_c - '0';
+
+		tmp_c = raw_spec->pattern[i + 1];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			pkt_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			pkt_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			pkt_buf[j] = tmp_val * 16 + tmp_c - '0';
+
+		tmp_c = raw_mask->pattern[i];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			tmp_val = tmp_c - 0x57;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			tmp_val = tmp_c - 0x37;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			tmp_val = tmp_c - '0';
+
+		tmp_c = raw_mask->pattern[i + 1];
+		if (tmp_c >= 'a' && tmp_c <= 'f')
+			msk_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
+		if (tmp_c >= 'A' && tmp_c <= 'F')
+			msk_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
+		if (tmp_c >= '0' && tmp_c <= '9')
+			msk_buf[j] = tmp_val * 16 + tmp_c - '0';
+	}
+
+	rte_memcpy(meta->proto_hdrs.raw.spec, pkt_buf, pkt_len);
+	rte_memcpy(meta->proto_hdrs.raw.mask, msk_buf, pkt_len);
+	meta->proto_hdrs.raw.pkt_len = pkt_len;
+
+	rte_free(pkt_buf);
+	rte_free(msk_buf);
+
+	return 0;
+}
+
 #define REFINE_PROTO_FLD(op, fld) \
 	VIRTCHNL_##op##_PROTO_HDR_FIELD(hdr, VIRTCHNL_PROTO_HDR_##fld)
 #define REPALCE_PROTO_FLD(fld_1, fld_2) \
@@ -572,6 +788,29 @@  do { \
 	REFINE_PROTO_FLD(ADD, fld_2);	\
 } while (0)
 
+static void
+iavf_hash_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
+{
+	struct virtchnl_proto_hdr *hdr1;
+	struct virtchnl_proto_hdr *hdr2;
+	int i;
+
+	if (layer < 0 || layer > hdrs->count)
+		return;
+
+	/* shift headers layer */
+	for (i = hdrs->count; i >= layer; i--) {
+		hdr1 = &hdrs->proto_hdr[i];
+		hdr2 = &hdrs->proto_hdr[i - 1];
+		*hdr1 = *hdr2;
+	}
+
+	/* adding dummy fragment header */
+	hdr1 = &hdrs->proto_hdr[layer];
+	VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
+	hdrs->count = ++layer;
+}
+
 /* refine proto hdrs base on l2, l3, l4 rss type */
 static void
 iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
@@ -593,17 +832,19 @@  iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 			break;
 		case VIRTCHNL_PROTO_HDR_IPV4:
 			if (rss_type &
-			    (ETH_RSS_IPV4 |
+			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
 			     ETH_RSS_NONFRAG_IPV4_UDP |
 			     ETH_RSS_NONFRAG_IPV4_TCP |
 			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
-				if (rss_type & ETH_RSS_L3_SRC_ONLY) {
+				if (rss_type & ETH_RSS_FRAG_IPV4) {
+					iavf_hash_add_fragment_hdr(proto_hdrs, i + 1);
+				} else if (rss_type & ETH_RSS_L3_SRC_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				} else if (rss_type &
-					 (ETH_RSS_L4_SRC_ONLY |
-					  ETH_RSS_L4_DST_ONLY)) {
+					   (ETH_RSS_L4_SRC_ONLY |
+					    ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV4_DST);
 					REFINE_PROTO_FLD(DEL, IPV4_SRC);
 				}
@@ -611,9 +852,21 @@  iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 				hdr->field_selector = 0;
 			}
 			break;
+		case VIRTCHNL_PROTO_HDR_IPV4_FRAG:
+			if (rss_type &
+			    (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+			     ETH_RSS_NONFRAG_IPV4_UDP |
+			     ETH_RSS_NONFRAG_IPV4_TCP |
+			     ETH_RSS_NONFRAG_IPV4_SCTP)) {
+				if (rss_type & ETH_RSS_FRAG_IPV4)
+					REFINE_PROTO_FLD(ADD, IPV4_FRAG_PKID);
+			} else {
+				hdr->field_selector = 0;
+			}
+			break;
 		case VIRTCHNL_PROTO_HDR_IPV6:
 			if (rss_type &
-			    (ETH_RSS_IPV6 |
+			    (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
 			     ETH_RSS_NONFRAG_IPV6_UDP |
 			     ETH_RSS_NONFRAG_IPV6_TCP |
 			     ETH_RSS_NONFRAG_IPV6_SCTP)) {
@@ -622,8 +875,8 @@  iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 				} else if (rss_type & ETH_RSS_L3_DST_ONLY) {
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				} else if (rss_type &
-					 (ETH_RSS_L4_SRC_ONLY |
-					  ETH_RSS_L4_DST_ONLY)) {
+					   (ETH_RSS_L4_SRC_ONLY |
+					    ETH_RSS_L4_DST_ONLY)) {
 					REFINE_PROTO_FLD(DEL, IPV6_DST);
 					REFINE_PROTO_FLD(DEL, IPV6_SRC);
 				}
@@ -638,6 +891,13 @@  iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs *proto_hdrs,
 					REPALCE_PROTO_FLD(IPV6_DST,
 							  IPV6_PREFIX64_DST);
 			}
+			break;
+		case VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG:
+			if (rss_type & ETH_RSS_FRAG_IPV6)
+				REFINE_PROTO_FLD(ADD, IPV6_EH_FRAG_PKID);
+			else
+				hdr->field_selector = 0;
+
 			break;
 		case VIRTCHNL_PROTO_HDR_UDP:
 			if (rss_type &
@@ -747,14 +1007,15 @@  iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs,
 	struct virtchnl_proto_hdr *hdr1;
 	struct virtchnl_proto_hdr *hdr2;
 	int i, shift_count = 1;
+	int tun_lvl = proto_hdrs->tunnel_level;
 
-	if (!(phint & IAVF_PHINT_GTPU_MSK))
+	if (!(phint & IAVF_PHINT_GTPU_MSK) && !(phint & IAVF_PHINT_GRE))
 		return;
 
-	if (phint & IAVF_PHINT_LAYERS_MSK)
-		shift_count++;
+	while (tun_lvl) {
+		if (phint & IAVF_PHINT_LAYERS_MSK)
+			shift_count = 2;
 
-	if (proto_hdrs->tunnel_level == TUNNEL_LEVEL_INNER) {
 		/* shift headers layer */
 		for (i = proto_hdrs->count - 1 + shift_count;
 		     i > shift_count - 1; i--) {
@@ -764,36 +1025,52 @@  iavf_refine_proto_hdrs_by_pattern(struct virtchnl_proto_hdrs *proto_hdrs,
 		}
 
 		if (shift_count == 1) {
-			/* adding gtpu header at layer 0 */
+			/* adding tunnel header at layer 0 */
 			hdr1 = &proto_hdrs->proto_hdr[0];
 		} else {
-			/* adding gtpu header and outer ip header */
+			/* adding tunnel header and outer ip header */
 			hdr1 = &proto_hdrs->proto_hdr[1];
 			hdr2 = &proto_hdrs->proto_hdr[0];
 			hdr2->field_selector = 0;
 			proto_hdrs->count++;
-			proto_hdrs->tunnel_level = TUNNEL_LEVEL_OUTER;
+			tun_lvl--;
+
+			if (tun_lvl == TUNNEL_LEVEL_OUTER) {
+				if (phint & IAVF_PHINT_OUTER_IPV4)
+					VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV4);
+				else if (phint & IAVF_PHINT_OUTER_IPV6)
+					VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV6);
+			} else if (tun_lvl == TUNNEL_LEVEL_INNER) {
+				if (phint & IAVF_PHINT_MID_IPV4)
+					VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV4);
+				else if (phint & IAVF_PHINT_MID_IPV6)
+					VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV6);
+			}
+		}
 
-			if (phint & IAVF_PHINT_OUTER_IPV4)
-				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV4);
-			else if (phint & IAVF_PHINT_OUTER_IPV6)
-				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV6);
+		hdr1->field_selector = 0;
+		proto_hdrs->count++;
+
+		if (phint & IAVF_PHINT_GTPU_EH_DWN)
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_EH_PDU_DWN);
+		else if (phint & IAVF_PHINT_GTPU_EH_UP)
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_EH_PDU_UP);
+		else if (phint & IAVF_PHINT_GTPU_EH)
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_EH);
+		else if (phint & IAVF_PHINT_GTPU)
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_IP);
+
+		if (phint & IAVF_PHINT_GRE) {
+			if (phint & IAVF_PHINT_GTPU) {
+				/* if GTPoGRE, add GRE header at the outer tunnel  */
+				if (tun_lvl == TUNNEL_LEVEL_OUTER)
+					VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GRE);
+			} else {
+				VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GRE);
+			}
 		}
-	} else {
-		hdr1 = &proto_hdrs->proto_hdr[proto_hdrs->count];
 	}
-
-	hdr1->field_selector = 0;
-	proto_hdrs->count++;
-
-	if (phint & IAVF_PHINT_GTPU_EH_DWN)
-		VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_EH_PDU_DWN);
-	else if (phint & IAVF_PHINT_GTPU_EH_UP)
-		VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_EH_PDU_UP);
-	else if (phint & IAVF_PHINT_GTPU_EH)
-		VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_EH);
-	else if (phint & IAVF_PHINT_GTPU)
-		VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, GTPU_IP);
+	proto_hdrs->tunnel_level = tun_lvl;
 }
 
 static void iavf_refine_proto_hdrs(struct virtchnl_proto_hdrs *proto_hdrs,
@@ -825,8 +1102,10 @@  struct rss_attr_type {
 				 ETH_RSS_NONFRAG_IPV6_TCP	| \
 				 ETH_RSS_NONFRAG_IPV6_SCTP)
 
-#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | VALID_RSS_IPV4_L4)
-#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | VALID_RSS_IPV6_L4)
+#define VALID_RSS_IPV4		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+				 VALID_RSS_IPV4_L4)
+#define VALID_RSS_IPV6		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+				 VALID_RSS_IPV6_L4)
 #define VALID_RSS_L3		(VALID_RSS_IPV4 | VALID_RSS_IPV6)
 #define VALID_RSS_L4		(VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
 
@@ -942,6 +1221,10 @@  iavf_hash_parse_action(struct iavf_pattern_match_item *match_item,
 					RTE_FLOW_ERROR_TYPE_ACTION, action,
 					"a non-NULL RSS queue is not supported");
 
+			/* If pattern type is raw, no need to refine rss type */
+			if (pattern_hint == IAVF_PHINT_RAW)
+				break;
+
 			/**
 			 * Check simultaneous use of SRC_ONLY and DST_ONLY
 			 * of the same level.
@@ -1008,6 +1291,17 @@  iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 	if (ret)
 		goto error;
 
+	if (phint == IAVF_PHINT_RAW) {
+		rss_meta_ptr->raw_ena = true;
+		ret = iavf_hash_parse_raw_pattern(pattern, rss_meta_ptr);
+		if (ret) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					   "Parse raw pattern failed");
+			goto error;
+		}
+	}
+
 	ret = iavf_hash_parse_action(pattern_match_item, actions, phint,
 				     (void **)&rss_meta_ptr, error);
 
@@ -1089,6 +1383,7 @@  static void
 iavf_hash_uninit(struct iavf_adapter *ad)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct rte_eth_rss_conf *rss_conf;
 
 	if (vf->vf_reset)
 		return;
@@ -1099,7 +1394,8 @@  iavf_hash_uninit(struct iavf_adapter *ad)
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF))
 		return;
 
-	if (iavf_hash_default_set(ad, false))
+	rss_conf = &ad->dev_data->dev_conf.rx_adv_conf.rss_conf;
+	if (iavf_rss_hash_set(ad, rss_conf->rss_hf, false))
 		PMD_DRV_LOG(ERR, "fail to delete default RSS");
 
 	iavf_unregister_parser(&iavf_hash_parser, ad);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index d4b4935..7a2ed65 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -540,8 +540,8 @@  void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
 	int i; \
-	for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
-		struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+	for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
+		struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
 		if (!rxq) \
 			continue; \
 		rxq->fdir_enabled = on; \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 8f28afc..233e3c4 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -640,7 +640,10 @@  _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 {
 #define IAVF_DESCS_PER_LOOP_AVX 8
 
-	const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *adapter = rxq->vsi->adapter;
+
+	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+	const uint32_t *type_table = adapter->ptype_tbl;
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
 			0, rxq->mbuf_initializer);
@@ -996,8 +999,7 @@  _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
 				_mm_load_si128
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 584d12e..6e2484a 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -638,7 +638,11 @@  _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 					struct rte_mbuf **rx_pkts,
 					uint16_t nb_pkts, uint8_t *split_packet)
 {
-	const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *adapter = rxq->vsi->adapter;
+
+	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+
+	const uint32_t *type_table = adapter->ptype_tbl;
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
 						    rxq->mbuf_initializer);
@@ -1011,8 +1015,7 @@  _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-		    DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
 				_mm_load_si128
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 75c77f9..6b2baf2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -644,7 +644,9 @@  _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	uint16_t nb_pkts_recd;
 	int pos;
 	uint64_t var;
-	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *adapter = rxq->vsi->adapter;
+	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+	const uint32_t *ptype_tbl = adapter->ptype_tbl;
 	__m128i crc_adjust = _mm_set_epi16
 				(0, 0, 0,       /* ignore non-length fields */
 				 -rxq->crc_len, /* sub crc on data_len */
@@ -817,8 +819,7 @@  _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index c001da5..b3ab5bc 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -71,7 +71,6 @@  iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
-	struct rte_eth_dev *dev = adapter->eth_dev;
 	struct iavf_arq_event_info event;
 	enum iavf_aq_result result = IAVF_MSG_NON;
 	enum virtchnl_ops opcode;
@@ -113,7 +112,7 @@  iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 				speed = vpe->event_data.link_event.link_speed;
 				vf->link_speed = iavf_convert_link_speed(speed);
 			}
-			iavf_dev_link_update(dev, 0);
+			iavf_dev_link_update(vf->eth_dev, 0);
 			PMD_DRV_LOG(INFO, "Link status update:%s",
 					vf->link_up ? "up" : "down");
 			break;
@@ -539,8 +538,8 @@  iavf_enable_queues(struct iavf_adapter *adapter)
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 
-	queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
-	queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+	queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
+	queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
 
 	args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
 	args.in_args = (u8 *)&queue_select;
@@ -567,8 +566,8 @@  iavf_disable_queues(struct iavf_adapter *adapter)
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 
-	queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
-	queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+	queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
+	queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
 
 	args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
 	args.in_args = (u8 *)&queue_select;
@@ -638,12 +637,12 @@  iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
-		adapter->eth_dev->data->nb_tx_queues;
+		adapter->dev_data->nb_tx_queues;
 
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
-		adapter->eth_dev->data->nb_rx_queues;
+		adapter->dev_data->nb_rx_queues;
 
 	args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
 	args.in_args = (u8 *)queue_select;
@@ -682,12 +681,12 @@  iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
-		adapter->eth_dev->data->nb_tx_queues;
+		adapter->dev_data->nb_tx_queues;
 
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
-		adapter->eth_dev->data->nb_rx_queues;
+		adapter->dev_data->nb_rx_queues;
 
 	args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
 	args.in_args = (u8 *)queue_select;
@@ -816,9 +815,9 @@  iavf_configure_queues(struct iavf_adapter *adapter,
 		uint16_t num_queue_pairs, uint16_t index)
 {
 	struct iavf_rx_queue **rxq =
-		(struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
+		(struct iavf_rx_queue **)adapter->dev_data->rx_queues;
 	struct iavf_tx_queue **txq =
-		(struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues;
+		(struct iavf_tx_queue **)adapter->dev_data->tx_queues;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	struct virtchnl_vsi_queue_config_info *vc_config;
 	struct virtchnl_queue_pair_info *vc_qp;
@@ -842,7 +841,7 @@  iavf_configure_queues(struct iavf_adapter *adapter,
 		vc_qp->txq.queue_id = i;
 
 		/* Virtchnnl configure tx queues by pairs */
-		if (i < adapter->eth_dev->data->nb_tx_queues) {
+		if (i < adapter->dev_data->nb_tx_queues) {
 			vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
 			vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
 		}
@@ -851,7 +850,7 @@  iavf_configure_queues(struct iavf_adapter *adapter,
 		vc_qp->rxq.queue_id = i;
 		vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
 
-		if (i >= adapter->eth_dev->data->nb_rx_queues)
+		if (i >= adapter->dev_data->nb_rx_queues)
 			continue;
 
 		/* Virtchnnl configure rx queues by pairs */
@@ -920,7 +919,7 @@  iavf_config_irq_map(struct iavf_adapter *adapter)
 		return -ENOMEM;
 
 	map_info->num_vectors = vf->nb_msix;
-	for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+	for (i = 0; i < adapter->dev_data->nb_rx_queues; i++) {
 		vecmap =
 		    &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
 		vecmap->vsi_id = vf->vsi_res->vsi_id;
@@ -999,7 +998,7 @@  iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		j = 0;
 		len = sizeof(struct virtchnl_ether_addr_list);
 		for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) {
-			addr = &adapter->eth_dev->data->mac_addrs[i];
+			addr = &adapter->dev_data->mac_addrs[i];
 			if (rte_is_zero_ether_addr(addr))
 				continue;
 			len += sizeof(struct virtchnl_ether_addr);
@@ -1016,7 +1015,7 @@  iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		}
 
 		for (i = begin; i < next_begin; i++) {
-			addr = &adapter->eth_dev->data->mac_addrs[i];
+			addr = &adapter->dev_data->mac_addrs[i];
 			if (rte_is_zero_ether_addr(addr))
 				continue;
 			list->list[j].type = VIRTCHNL_ETHER_ADDR_EXTRA;
@@ -1350,6 +1349,53 @@  iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	return err;
 }
 
+int
+iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+	args.in_args = NULL;
+	args.in_args_size = 0;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
+		return err;
+	}
+
+	*caps = ((struct virtchnl_rss_hena *)args.out_buffer)->hena;
+	return 0;
+}
+
+int
+iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_rss_hena vrh;
+	struct iavf_cmd_info args;
+	int err;
+
+	vrh.hena = hena;
+	args.ops = VIRTCHNL_OP_SET_RSS_HENA;
+	args.in_args = (u8 *)&vrh;
+	args.in_args_size = sizeof(vrh);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_SET_RSS_HENA");
+
+	return err;
+}
+
 int
 iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
@@ -1405,9 +1451,10 @@  iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 }
 
 int
-iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
+iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 {
-	struct rte_eth_dev *dev = adapter->eth_dev;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct virtchnl_vf_res_request vfres;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index b0b2ecb..ab33cdd 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -863,6 +863,59 @@  ice_dcf_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+				struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_adapter *parent_adapter = &adapter->parent;
+	struct ice_hw *parent_hw = &parent_adapter->hw;
+	int ret = 0;
+
+	if (!udp_tunnel)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(parent_hw, TNL_VXLAN,
+					udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+				struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_adapter *parent_adapter = &adapter->parent;
+	struct ice_hw *parent_hw = &parent_adapter->hw;
+	int ret = 0;
+
+	if (!udp_tunnel)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(parent_hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
 		    __rte_unused int wait_to_complete)
@@ -870,6 +923,19 @@  ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+ice_dcf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
+{
+	/* mtu setting is forbidden if port is start */
+	if (dev->data->dev_started != 0) {
+		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+			    dev->data->port_id);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
 	.dev_start               = ice_dcf_dev_start,
 	.dev_stop                = ice_dcf_dev_stop,
@@ -887,11 +953,14 @@  static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
 	.link_update             = ice_dcf_link_update,
 	.stats_get               = ice_dcf_stats_get,
 	.stats_reset             = ice_dcf_stats_reset,
+	.udp_tunnel_port_add     = ice_dcf_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del     = ice_dcf_dev_udp_tunnel_port_del,
 	.promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
 	.promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
 	.allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
 	.allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
 	.filter_ctrl             = ice_dcf_dev_filter_ctrl,
+	.mtu_set                 = ice_dcf_dev_mtu_set,
 };
 
 static int
diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h
index b54528b..7911f4b 100644
--- a/drivers/net/ice/ice_dcf_ethdev.h
+++ b/drivers/net/ice/ice_dcf_ethdev.h
@@ -13,6 +13,12 @@ 
 
 #define ICE_DCF_MAX_RINGS  1
 
+#define ICE_DCF_FRAME_SIZE_MAX       9728
+#define ICE_DCF_VLAN_TAG_SIZE               4
+#define ICE_DCF_ETH_OVERHEAD \
+	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_DCF_VLAN_TAG_SIZE * 2)
+#define ICE_DCF_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_DCF_ETH_OVERHEAD)
+
 struct ice_dcf_queue {
 	uint64_t dummy;
 };