[v2,14/14] net/igc: implement flow API

Message ID 1584672375-376187-15-git-send-email-alvinx.zhang@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series igc PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Alvin Zhang March 20, 2020, 2:46 a.m. UTC
  From: Alvin Zhang <alvinx.zhang@intel.com>

Below type of flows are supported:
ether-type filter,
2-tuple filter,
SYN filter,
RSS

Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
---
 drivers/net/igc/Makefile     |   1 +
 drivers/net/igc/igc_ethdev.c |   3 +
 drivers/net/igc/igc_ethdev.h |  27 ++
 drivers/net/igc/igc_filter.c |   7 +
 drivers/net/igc/igc_flow.c   | 894 +++++++++++++++++++++++++++++++++++++++++++
 drivers/net/igc/igc_flow.h   |  25 ++
 drivers/net/igc/igc_txrx.c   | 126 ++++++
 drivers/net/igc/igc_txrx.h   |   5 +
 drivers/net/igc/meson.build  |   3 +-
 9 files changed, 1090 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/igc/igc_flow.c
 create mode 100644 drivers/net/igc/igc_flow.h
  

Comments

Ferruh Yigit April 3, 2020, 12:26 p.m. UTC | #1
On 3/20/2020 2:46 AM, alvinx.zhang@intel.com wrote:
> From: Alvin Zhang <alvinx.zhang@intel.com>
> 
> Below type of flows are supported:
> ether-type filter,
> 2-tuple filter,
> SYN filter,
> RSS
> 
> Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>

<...>

> @@ -852,6 +854,11 @@
>  	case RTE_ETH_FILTER_HASH:
>  		ret = igc_hash_filter_ctrl(dev, filter_op, arg);
>  		break;
> +	case RTE_ETH_FILTER_GENERIC:
> +		if (filter_op != RTE_ETH_FILTER_GET)
> +			return -EINVAL;
> +		*(const void **)arg = &igc_flow_ops;
> +		break;

This patch implement flow API and can set "Flow API" feature in this patch.
Btw, what filtering is enabled with this flow API is not clear, at least to me,
what do you think adding some documentation for it, and it would be even better
to provide some samples too on how to use them, and document any limitation etc
as well.
  

Patch

diff --git a/drivers/net/igc/Makefile b/drivers/net/igc/Makefile
index 97a8e76..ddc157a 100644
--- a/drivers/net/igc/Makefile
+++ b/drivers/net/igc/Makefile
@@ -68,5 +68,6 @@  SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_logs.c
 SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_txrx.c
 SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index dd32618..1bfc69f 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -12,6 +12,7 @@ 
 #include "igc_logs.h"
 #include "igc_txrx.h"
 #include "igc_filter.h"
+#include "igc_flow.h"
 
 #define IGC_INTEL_VENDOR_ID		0x8086
 
@@ -1155,6 +1156,7 @@  static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 	if (!adapter->stopped)
 		eth_igc_stop(dev);
 
+	igc_flow_flush(dev, NULL);
 	igc_clear_all_filter(dev);
 
 	igc_intr_other_disable(dev);
@@ -1325,6 +1327,7 @@  static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 		igc->rxq_stats_map[i] = -1;
 	}
 
+	igc_flow_init(dev);
 	igc_clear_all_filter(dev);
 	return 0;
 
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index 91a3198..0892651 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -192,6 +192,25 @@  struct igc_syn_filter {
 		enable:1;	/* 1-enable; 0-disable */
 };
 
+/* Structure to store RTE flow RSS configure. */
+struct igc_rss_filter {
+	struct rte_flow_action_rss conf; /**< RSS parameters. */
+	uint8_t key[IGC_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
+	uint16_t queue[IGC_RSS_RDT_SIZD];/* Queues indices to use. */
+	uint8_t enable;	/* 1-enabled, 0-disabled */
+};
+
+/* Structure to store flow */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	enum rte_filter_type filter_type;
+	RTE_STD_C11
+	char filter[0];		/* filter data */
+};
+
+/* Flow list header */
+TAILQ_HEAD(igc_flow_list, rte_flow);
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
@@ -209,6 +228,8 @@  struct igc_adapter {
 	struct igc_ethertype_filter ethertype_filters[IGC_MAX_ETQF_FILTERS];
 	struct igc_2tuple_filter tuple2_filters[IGC_MAX_2TUPLE_FILTERS];
 	struct igc_syn_filter syn_filter;
+	struct igc_rss_filter rss_filter;
+	struct igc_flow_list flow_list;
 };
 
 #define IGC_DEV_PRIVATE(_dev)	((_dev)->data->dev_private)
@@ -228,6 +249,12 @@  struct igc_adapter {
 #define IGC_DEV_PRIVATE_VFTA(_dev) \
 	(&((struct igc_adapter *)(_dev)->data->dev_private)->shadow_vfta)
 
+#define IGC_DEV_PRIVATE_RSS_FILTER(_dev) \
+	(&((struct igc_adapter *)(_dev)->data->dev_private)->rss_filter)
+
+#define IGC_DEV_PRIVATE_FLOW_LIST(_dev) \
+	(&((struct igc_adapter *)(_dev)->data->dev_private)->flow_list)
+
 static inline void
 igc_read_reg_check_set_bits(struct igc_hw *hw, uint32_t reg, uint32_t bits)
 {
diff --git a/drivers/net/igc/igc_filter.c b/drivers/net/igc/igc_filter.c
index 02f5720..d3e21cf 100644
--- a/drivers/net/igc/igc_filter.c
+++ b/drivers/net/igc/igc_filter.c
@@ -6,6 +6,7 @@ 
 #include "igc_logs.h"
 #include "igc_txrx.h"
 #include "igc_filter.h"
+#include "igc_flow.h"
 
 /*
  * igc_ethertype_filter_lookup - lookup ether-type filter
@@ -828,6 +829,7 @@ 
 	igc_clear_all_ethertype_filter(dev);
 	igc_clear_all_ntuple_filter(dev);
 	igc_clear_syn_filter(dev);
+	igc_clear_rss_filter(dev);
 }
 
 int
@@ -852,6 +854,11 @@ 
 	case RTE_ETH_FILTER_HASH:
 		ret = igc_hash_filter_ctrl(dev, filter_op, arg);
 		break;
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &igc_flow_ops;
+		break;
 	default:
 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
 							filter_type);
diff --git a/drivers/net/igc/igc_flow.c b/drivers/net/igc/igc_flow.c
new file mode 100644
index 0000000..491d457
--- /dev/null
+++ b/drivers/net/igc/igc_flow.c
@@ -0,0 +1,894 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#include "rte_malloc.h"
+#include "igc_logs.h"
+#include "igc_txrx.h"
+#include "igc_filter.h"
+#include "igc_flow.h"
+
+/*********************************************************************
+ * All Supported Rule Type
+ *
+ * ether-type filter
+ * pattern: ETH(type)/END
+ * action: QUEUE/END
+ * attribute:
+ *
+ * n-tuple filter
+ * pattern: [ETH/]([IPv4(protocol)|IPv6(protocol)/][UDP(dst_port)|
+ *          TCP([dst_port],[flags])|SCTP(dst_port)/])END
+ * action: QUEUE/END
+ * attribute: [priority(0-7)]
+ *
+ * SYN filter
+ * pattern: [ETH/][IPv4|IPv6/]TCP(flags=SYN)/END
+ * action: QUEUE/END
+ * attribute: [priority(0,1)]
+ *
+ * RSS filter
+ * pattern:
+ * action: RSS/END
+ * attribute:
+ ********************************************************************/
+
+/* Structure of all filters */
+struct igc_all_filter {
+	struct rte_eth_ethertype_filter ethertype;
+	struct rte_eth_ntuple_filter ntuple;
+	struct rte_eth_syn_filter syn;
+	struct igc_rss_filter rss;
+	uint32_t	mask;	/* see IGC_FILTER_MASK_* definition */
+};
+
+#define IGC_FILTER_MASK_ETHER	(1U << RTE_ETH_FILTER_ETHERTYPE)
+#define IGC_FILTER_MASK_NTUPLE	(1U << RTE_ETH_FILTER_NTUPLE)
+#define IGC_FILTER_MASK_TCP_SYN	(1U << RTE_ETH_FILTER_SYN)
+#define IGC_FILTER_MASK_RSS	(1U << RTE_ETH_FILTER_HASH)
+#define IGC_FILTER_MASK_ALL	(IGC_FILTER_MASK_ETHER |	\
+				IGC_FILTER_MASK_NTUPLE |	\
+				IGC_FILTER_MASK_TCP_SYN |	\
+				IGC_FILTER_MASK_RSS)
+
+#define IGC_SET_FILTER_MASK(_filter, _mask_bits)	\
+		((_filter)->mask &= (_mask_bits))
+
+#define IGC_IS_ALL_BITS_SET(_val)	((_val) == (typeof(_val))~0)
+#define IGC_NOT_ALL_BITS_SET(_val)	((_val) != (typeof(_val))~0)
+
+/* Parse rule attribute */
+static int
+igc_parse_attribute(const struct rte_flow_attr *attr,
+	struct igc_all_filter *filter, struct rte_flow_error *error)
+{
+	if (!attr)
+		return 0;
+
+	if (attr->group)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+				"Not support");
+
+	if (attr->egress)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+				"Not support");
+
+	if (attr->transfer)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+				"Not support");
+
+	if (!attr->ingress)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+				"A rule must apply to ingress traffic");
+
+	if (attr->priority == 0)
+		return 0;
+
+	/* only n-tuple and SYN filter have priority level */
+	IGC_SET_FILTER_MASK(filter,
+		IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+
+	if (IGC_IS_ALL_BITS_SET(attr->priority)) {
+		/* only SYN filter match this value */
+		IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
+		filter->syn.hig_pri = 1;
+		return 0;
+	}
+
+	if (attr->priority > IGC_2TUPLE_MAX_PRI)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+				"Priority value is invalid.");
+
+	if (attr->priority > 1) {
+		/* only n-tuple filter match this value */
+		IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+		/* get priority */
+		filter->ntuple.priority = (uint16_t)attr->priority;
+		return 0;
+	}
+
+	/* get priority */
+	filter->ntuple.priority = (uint16_t)attr->priority;
+	filter->syn.hig_pri = (uint8_t)attr->priority;
+
+	return 0;
+}
+
+/* function type of parse pattern */
+typedef int (*igc_pattern_parse)(const struct rte_flow_item *,
+		struct igc_all_filter *, struct rte_flow_error *);
+
+static int igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
+		__rte_unused struct igc_all_filter *filter,
+		__rte_unused struct rte_flow_error *error);
+static int igc_parse_pattern_ether(const struct rte_flow_item *item,
+		struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_ip(const struct rte_flow_item *item,
+		struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_ipv6(const struct rte_flow_item *item,
+		struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_udp(const struct rte_flow_item *item,
+		struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_pattern_tcp(const struct rte_flow_item *item,
+		struct igc_all_filter *filter, struct rte_flow_error *error);
+
+static igc_pattern_parse pattern_parse_list[] = {
+		[RTE_FLOW_ITEM_TYPE_VOID] = igc_parse_pattern_void,
+		[RTE_FLOW_ITEM_TYPE_ETH] = igc_parse_pattern_ether,
+		[RTE_FLOW_ITEM_TYPE_IPV4] = igc_parse_pattern_ip,
+		[RTE_FLOW_ITEM_TYPE_IPV6] = igc_parse_pattern_ipv6,
+		[RTE_FLOW_ITEM_TYPE_UDP] = igc_parse_pattern_udp,
+		[RTE_FLOW_ITEM_TYPE_TCP] = igc_parse_pattern_tcp,
+};
+
+/* Parse rule patterns */
+static int
+igc_parse_patterns(const struct rte_flow_item patterns[],
+	struct igc_all_filter *filter, struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = patterns;
+
+	if (item == NULL) {
+		/* only RSS filter match this pattern */
+		IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
+		return 0;
+	}
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		int ret;
+
+		if (item->type >= RTE_DIM(pattern_parse_list))
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not been supported");
+
+		if (item->last)
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
+					"Range not been supported");
+
+		/* check pattern format is valid */
+		if (!!item->spec ^ !!item->mask)
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Format error");
+
+		/* get the pattern type callback */
+		igc_pattern_parse parse_func =
+				pattern_parse_list[item->type];
+		if (!parse_func)
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not been supported");
+
+		/* call the pattern type function */
+		ret = parse_func(item, filter, error);
+		if (ret)
+			return ret;
+
+		/* if no filter match the pattern */
+		if (filter->mask == 0)
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM, item,
+					"Not been supported");
+	}
+
+	return 0;
+}
+
+static int igc_parse_action_queue(struct rte_eth_dev *dev,
+		const struct rte_flow_action *act,
+		struct igc_all_filter *filter, struct rte_flow_error *error);
+static int igc_parse_action_rss(struct rte_eth_dev *dev,
+		const struct rte_flow_action *act,
+		struct igc_all_filter *filter, struct rte_flow_error *error);
+
+/* Parse flow actions */
+static int
+igc_parse_actions(struct rte_eth_dev *dev,
+		const struct rte_flow_action actions[],
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_action *act = actions;
+	int ret;
+
+	if (act == NULL)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_NUM, act,
+				"Action is needed");
+
+	for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
+		switch (act->type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			ret = igc_parse_action_queue(dev, act, filter, error);
+			if (ret)
+				return ret;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			ret = igc_parse_action_rss(dev, act, filter, error);
+			if (ret)
+				return ret;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, act,
+					"Not been supported");
+		}
+
+		/* if no filter match the action */
+		if (filter->mask == 0)
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, act,
+					"Not been supported");
+	}
+
+	return 0;
+}
+
+/* Parse a flow rule */
+static int
+igc_parse_flow(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item patterns[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error,
+		struct igc_all_filter *filter)
+{
+	int ret;
+
+	/* clear all filters */
+	memset(filter, 0, sizeof(*filter));
+
+	/* set default filter mask */
+	filter->mask = IGC_FILTER_MASK_ALL;
+
+	ret = igc_parse_attribute(attr, filter, error);
+	if (ret)
+		return ret;
+
+	ret = igc_parse_patterns(patterns, filter, error);
+	if (ret)
+		return ret;
+
+	ret = igc_parse_actions(dev, actions, filter, error);
+	if (ret)
+		return ret;
+
+	/* if no or more than one filter matched this flow */
+	if (filter->mask == 0 || (filter->mask & (filter->mask - 1)))
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"Flow can't be recognized");
+	return 0;
+}
+
+/* Parse pattern type of void */
+static int
+igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
+		__rte_unused struct igc_all_filter *filter,
+		__rte_unused struct rte_flow_error *error)
+{
+	return 0;
+}
+
+/* Parse pattern type of ethernet header */
+static int
+igc_parse_pattern_ether(const struct rte_flow_item *item,
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	struct rte_eth_ethertype_filter *ether;
+
+	if (mask == NULL) {
+		/* only n-tuple and SYN filter match the pattern */
+		IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE |
+				IGC_FILTER_MASK_TCP_SYN);
+		return 0;
+	}
+
+	/* only ether-type filter match the pattern*/
+	IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER);
+
+	/* destination and source MAC address are not supported */
+	if (!rte_is_zero_ether_addr(&mask->src) ||
+		!rte_is_zero_ether_addr(&mask->dst))
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+				"Only support ether-type");
+
+	/* ether-type mask bits must be all 1 */
+	if (IGC_NOT_ALL_BITS_SET(mask->type))
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+				"Ethernet type mask bits must be all 1");
+
+	ether = &filter->ethertype;
+
+	/* get ether-type */
+	ether->ether_type = rte_be_to_cpu_16(spec->type);
+
+	/* ether-type should not be IPv4 and IPv6 */
+	if (ether->ether_type == RTE_ETHER_TYPE_IPV4 ||
+		ether->ether_type == RTE_ETHER_TYPE_IPV6)
+		return rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"IPv4/IPv6 not supported by ethertype filter");
+	return 0;
+}
+
+/* Parse pattern type of IP */
+static int
+igc_parse_pattern_ip(const struct rte_flow_item *item,
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv4 *spec = item->spec;
+	const struct rte_flow_item_ipv4 *mask = item->mask;
+
+	if (mask == NULL) {
+		/* only n-tuple and SYN filter match this pattern */
+		IGC_SET_FILTER_MASK(filter,
+			IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+		return 0;
+	}
+
+	/* only n-tuple filter match this pattern */
+	IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+	/* only protocol is used */
+	if (mask->hdr.version_ihl ||
+		mask->hdr.type_of_service ||
+		mask->hdr.total_length ||
+		mask->hdr.packet_id ||
+		mask->hdr.fragment_offset ||
+		mask->hdr.time_to_live ||
+		mask->hdr.hdr_checksum ||
+		mask->hdr.dst_addr ||
+		mask->hdr.src_addr)
+		return rte_flow_error_set(error,
+			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+			"IPv4 only support protocol");
+
+	if (mask->hdr.next_proto_id == 0)
+		return 0;
+
+	if (IGC_NOT_ALL_BITS_SET(mask->hdr.next_proto_id))
+		return rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+				"IPv4 protocol mask bits must be all 0 or 1");
+
+	/* get protocol type and protocol mask */
+	filter->ntuple.proto_mask  = mask->hdr.next_proto_id;
+	filter->ntuple.proto  = spec->hdr.next_proto_id;
+	filter->ntuple.flags |= RTE_NTUPLE_FLAGS_PROTO;
+
+	return 0;
+}
+
+/*
+ * Check ipv6 address is 0
+ * Return 1 if true, 0 for false.
+ */
+static inline bool
+igc_is_zero_ipv6_addr(const void *ipv6_addr)
+{
+	const uint64_t *ddw = ipv6_addr;
+	return ddw[0] == 0 && ddw[1] == 0;
+}
+
+/* Parse pattern type of IPv6 */
+static int
+igc_parse_pattern_ipv6(const struct rte_flow_item *item,
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+
+	if (mask == NULL) {
+		/* only n-tuple and syn filter match this pattern */
+		IGC_SET_FILTER_MASK(filter,
+			IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+		return 0;
+	}
+
+	/* only n-tuple filter match this pattern */
+	IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+	/* only protocol is used */
+	if (mask->hdr.vtc_flow ||
+		mask->hdr.payload_len ||
+		mask->hdr.hop_limits ||
+		!igc_is_zero_ipv6_addr(mask->hdr.src_addr) ||
+		!igc_is_zero_ipv6_addr(mask->hdr.dst_addr))
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, item,
+				"IPv6 only support protocol");
+
+	if (mask->hdr.proto == 0)
+		return 0;
+
+	if (IGC_NOT_ALL_BITS_SET(mask->hdr.proto))
+		return rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+				"IPv6 protocol mask bits must be all 0 or 1");
+
+	/* get protocol type and protocol mask */
+	filter->ntuple.proto_mask  = mask->hdr.proto;
+	filter->ntuple.proto  = spec->hdr.proto;
+	filter->ntuple.flags |= RTE_NTUPLE_FLAGS_PROTO;
+
+	return 0;
+}
+
+/* Parse pattern type of UDP */
+static int
+igc_parse_pattern_udp(const struct rte_flow_item *item,
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+
+	/* only n-tuple filter match this pattern */
+	IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+	if (mask == NULL)
+		return 0;
+
+	/* only destination port is used */
+	if (mask->hdr.dgram_len || mask->hdr.dgram_cksum || mask->hdr.src_port)
+		return rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+			"UDP only support destination port");
+
+	if (mask->hdr.dst_port == 0)
+		return 0;
+
+	if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+				"UDP port mask bits must be all 0 or 1");
+
+	/* get destination port info. */
+	filter->ntuple.dst_port_mask = mask->hdr.dst_port;
+	filter->ntuple.dst_port = spec->hdr.dst_port;
+	filter->ntuple.flags |= RTE_NTUPLE_FLAGS_DST_PORT;
+
+	return 0;
+}
+
+/* Parse pattern type of TCP */
+static int
+igc_parse_pattern_tcp(const struct rte_flow_item *item,
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_tcp *spec = item->spec;
+	const struct rte_flow_item_tcp *mask = item->mask;
+
+	if (mask == NULL) {
+		/* only n-tuple filter match this pattern */
+		IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+		return 0;
+	}
+
+	/* only n-tuple and SYN filter match this pattern */
+	IGC_SET_FILTER_MASK(filter,
+			IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+
+	/* only destination port and TCP flags are used */
+	if (mask->hdr.sent_seq ||
+		mask->hdr.recv_ack ||
+		mask->hdr.data_off ||
+		mask->hdr.rx_win ||
+		mask->hdr.cksum ||
+		mask->hdr.tcp_urp ||
+		mask->hdr.src_port)
+		return rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+			"TCP only support destination port and flags");
+
+	/* if destination port is used */
+	if (mask->hdr.dst_port) {
+		IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+		if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
+			return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+				"TCP port mask bits must be all 1");
+
+		/* get destination port info. */
+		filter->ntuple.dst_port = spec->hdr.dst_port;
+		filter->ntuple.dst_port_mask = mask->hdr.dst_port;
+		filter->ntuple.flags |= RTE_NTUPLE_FLAGS_DST_PORT;
+	}
+
+	/* if TCP flags are used */
+	if (mask->hdr.tcp_flags) {
+		if (IGC_IS_ALL_BITS_SET(mask->hdr.tcp_flags)) {
+			/* only n-tuple match this pattern */
+			IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+
+			/* get TCP flags */
+			filter->ntuple.tcp_flags = spec->hdr.tcp_flags;
+			filter->ntuple.flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else if (mask->hdr.tcp_flags == RTE_TCP_SYN_FLAG) {
+			/* only TCP SYN filter match this pattern */
+			IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
+		} else {
+			/* no filter match this pattern */
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
+					"TCP flags can't match");
+		}
+	} else {
+		/* only n-tuple match this pattern */
+		IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
+	}
+
+	return 0;
+}
+
+static int
+igc_parse_action_queue(struct rte_eth_dev *dev,
+		const struct rte_flow_action *act,
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	uint16_t queue_idx;
+
+	if (act->conf == NULL)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+				"NULL pointer");
+
+	/* only ether-type, n-tuple, SYN filter match the action */
+	IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER |
+			IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
+
+	/* get queue index */
+	queue_idx = ((const struct rte_flow_action_queue *)act->conf)->index;
+
+	/* check the queue index is valid */
+	if (queue_idx >= dev->data->nb_rx_queues)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+				"Queue id is invalid");
+
+	/* get queue info. */
+	filter->ethertype.queue = queue_idx;
+	filter->ntuple.queue = queue_idx;
+	filter->syn.queue = queue_idx;
+	return 0;
+}
+
+/* Parse action of RSS */
+static int
+igc_parse_action_rss(struct rte_eth_dev *dev,
+		const struct rte_flow_action *act,
+		struct igc_all_filter *filter,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_action_rss *rss = act->conf;
+	uint32_t i;
+
+	if (act->conf == NULL)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+				"NULL pointer");
+
+	/* only RSS match the action */
+	IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
+
+	/* RSS redirect table can't be zero and can't exceed 128 */
+	if (!rss || !rss->queue_num || rss->queue_num > IGC_RSS_RDT_SIZD)
+		return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+				"No valid queues");
+
+	/* queue index can't exceed max queue index */
+	for (i = 0; i < rss->queue_num; i++) {
+		if (rss->queue[i] >= dev->data->nb_rx_queues)
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+					"Queue id is invalid");
+	}
+
+	/* only default RSS hase function is supported */
+	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+				"Only default RSS hash functions is supported");
+
+	if (rss->level)
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+				"Only 0 RSS encapsulation level is supported");
+
+	/* check key length is valid */
+	if (rss->key_len && rss->key_len != sizeof(filter->rss.key))
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
+				"RSS hash key must be exactly 40 bytes");
+
+	/* get RSS info. */
+	igc_rss_conf_set(&filter->rss, rss);
+	return 0;
+}
+
+/**
+ * Allocate a rte_flow from the heap
+ * Return the pointer of the flow, or NULL for failed
+ **/
+static inline struct rte_flow *
+igc_alloc_flow(const void *filter, enum rte_filter_type type, uint inbytes)
+{
+	/* allocate memory, 8 bytes boundary aligned */
+	struct rte_flow *flow = rte_malloc("igc flow filter",
+			sizeof(struct rte_flow) + inbytes, 8);
+	if (flow == NULL) {
+		PMD_DRV_LOG(ERR, "failed to allocate memory");
+		return NULL;
+	}
+
+	flow->filter_type = type;
+
+	/* copy filter data */
+	memcpy(flow->filter, filter, inbytes);
+	return flow;
+}
+
+/* Append a rte_flow to the list */
+static inline void
+igc_append_flow(struct igc_flow_list *list, struct rte_flow *flow)
+{
+	TAILQ_INSERT_TAIL(list, flow, node);
+}
+
+/**
+ * Remove the flow and free the flow buffer
+ * The caller should make sure the flow is really exist in the list
+ **/
+static inline void
+igc_remove_flow(struct igc_flow_list *list, struct rte_flow *flow)
+{
+	TAILQ_REMOVE(list, flow, node);
+	rte_free(flow);
+}
+
+/* Check whether the flow is really in the list or not */
+static inline bool
+igc_is_flow_in_list(struct igc_flow_list *list, struct rte_flow *flow)
+{
+	struct rte_flow *it;
+
+	TAILQ_FOREACH(it, list, node) {
+		if (it == flow)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * Create a flow rule.
+ * Theoretically one rule can match more than one filters.
+ * We will let it use the filter which it hit first.
+ * So, the sequence matters.
+ **/
+static struct rte_flow *
+igc_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item patterns[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	struct rte_flow *flow = NULL;
+	struct igc_all_filter filter;
+	int ret;
+
+	ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter);
+	if (ret)
+		return NULL;
+	ret = -ENOMEM;
+
+	switch (filter.mask) {
+	case IGC_FILTER_MASK_ETHER:
+		flow = igc_alloc_flow(&filter.ethertype,
+				RTE_ETH_FILTER_ETHERTYPE,
+				sizeof(filter.ethertype));
+		if (flow)
+			ret = igc_add_ethertype_filter(dev, &filter.ethertype);
+		break;
+	case IGC_FILTER_MASK_NTUPLE:
+		flow = igc_alloc_flow(&filter.ntuple, RTE_ETH_FILTER_NTUPLE,
+				sizeof(filter.ntuple));
+		if (flow)
+			ret = igc_add_del_ntuple_filter(dev,
+					&filter.ntuple, true);
+		break;
+	case IGC_FILTER_MASK_TCP_SYN:
+		flow = igc_alloc_flow(&filter.syn, RTE_ETH_FILTER_SYN,
+				sizeof(filter.syn));
+		if (flow)
+			ret = igc_set_syn_filter(dev, &filter.syn);
+		break;
+	case IGC_FILTER_MASK_RSS:
+		flow = igc_alloc_flow(&filter.rss, RTE_ETH_FILTER_HASH,
+				sizeof(filter.rss));
+		if (flow) {
+			struct igc_rss_filter *rss =
+					(struct igc_rss_filter *)flow->filter;
+			rss->conf.key = rss->key;
+			rss->conf.queue = rss->queue;
+			ret = igc_add_rss_filter(dev, &filter.rss);
+		}
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"Flow can't be recognized");
+		return NULL;
+	}
+
+	if (ret) {
+		/* check and free the memory */
+		if (flow)
+			rte_free(flow);
+
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to create flow.");
+		return NULL;
+	}
+
+	/* append the flow to the tail of the list */
+	igc_append_flow(IGC_DEV_PRIVATE_FLOW_LIST(dev), flow);
+	return flow;
+}
+
+/**
+ * Check if the flow rule is supported by the device.
+ * It only checks the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ **/
+static int
+igc_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item patterns[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
+{
+	struct igc_all_filter filter;
+
+	return igc_parse_flow(dev, attr, patterns, actions, error, &filter);
+}
+
+/**
+ * Disable a valid flow, the flow must be not NULL and
+ * chained in the device flow list.
+ **/
+static int
+igc_disable_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+	int ret = 0;
+
+	switch (flow->filter_type) {
+	case RTE_ETH_FILTER_ETHERTYPE:
+		ret = igc_del_ethertype_filter(dev,
+			(struct rte_eth_ethertype_filter *)&flow->filter);
+		break;
+
+	case RTE_ETH_FILTER_NTUPLE:
+		ret = igc_add_del_ntuple_filter(dev,
+				(struct rte_eth_ntuple_filter *)&flow->filter,
+				false);
+		break;
+
+	case RTE_ETH_FILTER_SYN:
+		ret = igc_del_syn_filter(dev);
+		break;
+
+	case RTE_ETH_FILTER_HASH:
+		ret = igc_del_rss_filter(dev);
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "Filter type (%d) not supported",
+				flow->filter_type);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/* Destroy a flow rule */
+static int
+igc_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
+	int ret;
+
+	if (!flow) {
+		PMD_DRV_LOG(ERR, "NULL flow!");
+		return -EINVAL;
+	}
+
+	/* check the flow is create by IGC PMD */
+	if (!igc_is_flow_in_list(list, flow)) {
+		PMD_DRV_LOG(ERR, "Flow(%p) not been found!", flow);
+		return -ENOENT;
+	}
+
+	ret = igc_disable_flow(dev, flow);
+	if (ret)
+		rte_flow_error_set(error, -ret,
+				RTE_FLOW_ERROR_TYPE_HANDLE,
+				NULL, "Failed to destroy flow");
+
+	igc_remove_flow(list, flow);
+	return ret;
+}
+
+/* Initiate device flow list header */
+void
+igc_flow_init(struct rte_eth_dev *dev)
+{
+	TAILQ_INIT(IGC_DEV_PRIVATE_FLOW_LIST(dev));
+}
+
+/* Destroy all flow in the list and free memory */
+int
+igc_flow_flush(struct rte_eth_dev *dev,
+		__rte_unused struct rte_flow_error *error)
+{
+	struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
+	struct rte_flow *flow;
+
+	while ((flow = TAILQ_FIRST(list)) != NULL) {
+		igc_disable_flow(dev, flow);
+		igc_remove_flow(list, flow);
+	}
+
+	return 0;
+}
+
+const struct rte_flow_ops igc_flow_ops = {
+	.validate = igc_flow_validate,
+	.create = igc_flow_create,
+	.destroy = igc_flow_destroy,
+	.flush = igc_flow_flush,
+};
diff --git a/drivers/net/igc/igc_flow.h b/drivers/net/igc/igc_flow.h
new file mode 100644
index 0000000..310b4bd
--- /dev/null
+++ b/drivers/net/igc/igc_flow.h
@@ -0,0 +1,25 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2020 Intel Corporation
+ */
+
+#ifndef _IGC_FLOW_H_
+#define _IGC_FLOW_H_
+
+#include <rte_flow_driver.h>
+#include "igc_ethdev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const struct rte_flow_ops igc_flow_ops;
+
+void igc_flow_init(struct rte_eth_dev *dev);
+int igc_flow_flush(struct rte_eth_dev *dev,
+		__rte_unused struct rte_flow_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IGC_FLOW_H_ */
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index 5eb8fef..6a25e68 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -991,6 +991,132 @@  int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)
 	igc_hw_rss_hash_set(hw, &rss_conf);
 }
 
+int
+igc_del_rss_filter(struct rte_eth_dev *dev)
+{
+	struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+
+	if (rss_filter->enable) {
+		/* recover default RSS configuration */
+		igc_rss_configure(dev);
+
+		/* disable RSS logic and clear filter data */
+		igc_rss_disable(dev);
+		memset(rss_filter, 0, sizeof(*rss_filter));
+		return 0;
+	}
+	PMD_DRV_LOG(ERR, "filter not exist!");
+	return -ENOENT;
+}
+
+/* Initiate the filter structure by the structure of rte_flow_action_rss */
+void
+igc_rss_conf_set(struct igc_rss_filter *out,
+		const struct rte_flow_action_rss *rss)
+{
+	out->conf.func = rss->func;
+	out->conf.level = rss->level;
+	out->conf.types = rss->types;
+
+	if (rss->key_len == sizeof(out->key)) {
+		memcpy(out->key, rss->key, rss->key_len);
+		out->conf.key = out->key;
+		out->conf.key_len = rss->key_len;
+	} else {
+		out->conf.key = NULL;
+		out->conf.key_len = 0;
+	}
+
+	if (rss->queue_num <= IGC_RSS_RDT_SIZD) {
+		memcpy(out->queue, rss->queue,
+			sizeof(*out->queue) * rss->queue_num);
+		out->conf.queue = out->queue;
+		out->conf.queue_num = rss->queue_num;
+	} else {
+		out->conf.queue = NULL;
+		out->conf.queue_num = 0;
+	}
+}
+
+int
+igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss)
+{
+	struct rte_eth_rss_conf rss_conf = {
+		.rss_key = rss->conf.key_len ?
+			(void *)(uintptr_t)rss->conf.key : NULL,
+		.rss_key_len = rss->conf.key_len,
+		.rss_hf = rss->conf.types,
+	};
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+	uint32_t i, j;
+
+	/* check RSS type is valid */
+	if ((rss_conf.rss_hf & IGC_RSS_OFFLOAD_ALL) == 0) {
+		PMD_DRV_LOG(ERR, "RSS type error!");
+		return -EINVAL;
+	}
+
+	/* check queue count is not zero */
+	if (!rss->conf.queue_num) {
+		PMD_DRV_LOG(ERR, "queue number should not be 0!");
+		return -EINVAL;
+	}
+
+	/* check queue id is valid */
+	for (i = 0; i < rss->conf.queue_num; i++)
+		if (rss->conf.queue[i] >= dev->data->nb_rx_queues) {
+			PMD_DRV_LOG(ERR, "queue id %u is invalid!",
+					rss->conf.queue[i]);
+			return -EINVAL;
+		}
+
+	/* only support one filter */
+	if (rss_filter->enable) {
+		PMD_DRV_LOG(ERR, "RSS filter exist!");
+		return -EEXIST;
+	}
+	rss_filter->enable = 1;
+
+	igc_rss_conf_set(rss_filter, &rss->conf);
+
+	/* Fill in redirection table. */
+	for (i = 0, j = 0; i < IGC_RSS_RDT_SIZD; i++, j++) {
+		union igc_rss_reta_reg reta;
+		uint16_t q_idx, reta_idx;
+
+		if (j == rss->conf.queue_num)
+			j = 0;
+		q_idx = rss->conf.queue[j];
+		reta_idx = i % sizeof(reta);
+		reta.bytes[reta_idx] = q_idx;
+		if (reta_idx == sizeof(reta) - 1)
+			IGC_WRITE_REG_LE_VALUE(hw,
+				IGC_RETA(i / sizeof(reta)), reta.dword);
+	}
+
+	if (rss_conf.rss_key == NULL)
+		rss_conf.rss_key = default_rss_key;
+	igc_hw_rss_hash_set(hw, &rss_conf);
+	return 0;
+}
+
+void
+igc_clear_rss_filter(struct rte_eth_dev *dev)
+{
+	struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
+
+	if (!rss_filter->enable)
+		return;
+
+	/* recover default RSS configuration */
+	igc_rss_configure(dev);
+
+	/* disable RSS logic and clear filter data */
+	igc_rss_disable(dev);
+	memset(rss_filter, 0, sizeof(*rss_filter));
+}
+
 static int
 igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h
index 50be783..14be64c 100644
--- a/drivers/net/igc/igc_txrx.h
+++ b/drivers/net/igc/igc_txrx.h
@@ -44,6 +44,11 @@  int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 void igc_set_rss_flowtype(struct igc_hw *hw, uint64_t flowtype);
 void
 igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf);
+int igc_del_rss_filter(struct rte_eth_dev *dev);
+void igc_rss_conf_set(struct igc_rss_filter *out,
+		const struct rte_flow_action_rss *rss);
+int igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss);
+void igc_clear_rss_filter(struct rte_eth_dev *dev);
 void eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_rxq_info *qinfo);
 void eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
diff --git a/drivers/net/igc/meson.build b/drivers/net/igc/meson.build
index d509c0e..df58e2f 100644
--- a/drivers/net/igc/meson.build
+++ b/drivers/net/igc/meson.build
@@ -8,7 +8,8 @@  sources = files(
 	'igc_logs.c',
 	'igc_ethdev.c',
 	'igc_txrx.c',
-	'igc_filter.c'
+	'igc_filter.c',
+	'igc_flow.c'
 )
 
 includes += include_directories('base')