[dpdk-dev,14/24] net/i40e: parse NVGRE filter

Message ID 1480679625-4157-15-git-send-email-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
checkpatch/checkpatch warning coding style issues

Commit Message

Xing, Beilei Dec. 2, 2016, 11:53 a.m. UTC
  Check if the rule is a NVGRE rule, and get the NVGRE
info.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c | 269 +++++++++++++++++++++++++++++++++++++++++
 lib/librte_ether/rte_flow.h    |  23 ++++
 2 files changed, 292 insertions(+)
  

Patch

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3bdef8e..1ffafa0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10720,6 +10720,270 @@  i40e_parse_vxlan_tunnel_filter(const struct rte_flow_attr *attr,
 	return i40e_parse_tunnel_act_attr(attr, actions, filter, error);
 }
 
+/* whether it is NVGRE tunnel rule */
+static int
+i40e_parse_nvgre_tunnel_filter(const struct rte_flow_attr *attr,
+			       const struct rte_flow_item *pattern,
+			       const struct rte_flow_action *actions,
+			       struct rte_eth_tunnel_filter_conf *filter,
+			       struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_item_eth *o_eth_spec = NULL;
+	const struct rte_flow_item_eth *o_eth_mask = NULL;
+	const struct rte_flow_item_nvgre *nvgre_spec = NULL;
+	const struct rte_flow_item_nvgre *nvgre_mask = NULL;
+	const struct rte_flow_item_eth *i_eth_spec, *i_eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec = NULL;
+	const struct rte_flow_item_vlan *vlan_mask = NULL;
+	struct ether_addr macaddr_unmasked = {
+		.addr_bytes = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+	};
+	struct ether_addr macaddr_masked = {
+		.addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+	};
+	bool is_tni_masked = 0;
+	uint32_t i;
+
+	/* parse pattern */
+	i = 0;
+
+	/* The first not void item should be ETH or IP or UDP or VXLAN. */
+	PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+	    item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		o_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		o_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+		if ((!o_eth_spec && o_eth_mask) ||
+		    (o_eth_spec && !o_eth_mask)) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		if (o_eth_spec)
+			rte_memcpy(&filter->outer_mac, &o_eth_spec->dst,
+				   ETHER_ADDR_LEN);
+
+		if (o_eth_mask) {
+			/**
+			 * DST MAC address shouldn't be masked.
+			 * SRC MAC address should be masked.
+			 * Ethertype should be masked.
+			 */
+			if (!is_same_ether_addr(&o_eth_mask->dst,
+						&macaddr_unmasked) ||
+			    !is_same_ether_addr(&o_eth_mask->src,
+						&macaddr_masked) ||
+			    o_eth_mask->type) {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		}
+
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+		    item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+	    item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+		/**
+		 * If the item is IP, the content should be NULL.
+		 * Only used to describe the protocol stack.
+		 */
+		if (item->spec || item->mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		/* Check if the next not void item is UDP. */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	nvgre_spec = (const struct rte_flow_item_nvgre *)item->spec;
+	nvgre_mask = (const struct rte_flow_item_nvgre *)item->mask;
+
+	/**
+	 * Check if NVGRE item is used to describe the protocol stack.
+	 * If yes, both nvgre_spec and nvgre_mask should be NULL.
+	 * If no, either nvgre_spec or nvgre_mask shouldn't be NULL.
+	 */
+	if ((!nvgre_spec && nvgre_mask) ||
+	    (nvgre_spec && !nvgre_mask)) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	/* Check if TNI isn't masked */
+	if (nvgre_mask) {
+		for (uint32_t j = 0; j < RTE_DIM(nvgre_mask->tni); j++) {
+			if (nvgre_mask->tni[j] == 0xFF) {
+				if (j > 0 &&
+				    (nvgre_mask->tni[j] !=
+				     nvgre_mask->tni[j - 1])) {
+					error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+					return -EINVAL;
+				}
+				is_tni_masked = 0;
+			} else if (nvgre_mask->tni[j] == 0) {
+				if (j > 0 &&
+				    (nvgre_mask->tni[j] !=
+				     nvgre_mask->tni[j - 1])) {
+					error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+					return -EINVAL;
+				}
+				is_tni_masked = 1;
+			} else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		}
+	}
+
+	/* check if the next not void item is ETH*/
+	i++;
+	PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+			  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	if (item->type ==  RTE_FLOW_ITEM_TYPE_ETH) {
+		i_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+		i_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+		if (!i_eth_spec || !i_eth_mask) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		rte_memcpy(&filter->inner_mac, &i_eth_spec->dst,
+			   ETHER_ADDR_LEN);
+
+		/**
+		 * DST address of inner MAC shouldn't be masked.
+		 * SRC address of Inner MAC should be masked.
+		 */
+		if (!is_same_ether_addr(&i_eth_mask->dst, &macaddr_unmasked) ||
+		    !is_same_ether_addr(&i_eth_mask->src, &macaddr_masked) ||
+		    i_eth_mask->type) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		/* Check if the next not void item is VLAN or END. */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+		    item->type != RTE_FLOW_ITEM_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+		vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+		if (!(vlan_spec && vlan_mask)) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+
+		/* check if the next not void item is END */
+		i++;
+		PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+				  RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+		if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	}
+
+	if (vlan_spec && vlan_mask &&
+	    (vlan_mask->tci == rte_cpu_to_be_16(0x0FFF))) {
+		filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) & 0xFFF;
+		if (nvgre_spec && nvgre_mask && !is_tni_masked) {
+			rte_memcpy(&filter->tenant_id, nvgre_spec->tni,
+				   RTE_DIM(nvgre_spec->tni));
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+			else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else if (!nvgre_spec && !nvgre_mask) {
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_IMAC_IVLAN;
+			else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	} else if ((!vlan_spec && !vlan_mask) ||
+		   (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+		if (nvgre_spec && nvgre_mask && !is_tni_masked) {
+			rte_memcpy(&filter->tenant_id, nvgre_spec->tni,
+				   RTE_DIM(nvgre_spec->tni));
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_IMAC_TENID;
+			else
+				filter->filter_type =
+					RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+		} else if (!nvgre_spec && !nvgre_mask) {
+			if (!o_eth_spec && !o_eth_mask)
+				filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+			else {
+				error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+				return -EINVAL;
+			}
+		} else {
+			error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+			return -EINVAL;
+		}
+	} else {
+		error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+		return -EINVAL;
+	}
+
+	filter->tunnel_type = RTE_TUNNEL_TYPE_NVGRE;
+
+	return i40e_parse_tunnel_act_attr(attr, actions, filter, error);
+}
+
 static int
 i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
 			 const struct rte_flow_item *pattern,
@@ -10734,6 +10998,11 @@  i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
 	if (!ret)
 		return 0;
 
+	ret = i40e_parse_nvgre_tunnel_filter(attr, pattern,
+					     actions, rule, error);
+	if (!ret)
+		return 0;
+
 	return ret;
 }
 
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index 211f307..6bdbba1 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -270,6 +270,13 @@  enum rte_flow_item_type {
 	 * See struct rte_flow_item_vxlan.
 	 */
 	RTE_FLOW_ITEM_TYPE_VXLAN,
+
+	/**
+	 * Matches a NVGRE header.
+	 *
+	 * See struct rte_flow_item_nvgre.
+	 */
+	RTE_FLOW_ITEM_TYPE_NVGRE,
 };
 
 /**
@@ -461,6 +468,22 @@  struct rte_flow_item_vxlan {
 };
 
 /**
+ * RTE_FLOW_ITEM_TYPE_NVGRE.
+ *
+ * Matches a NVGRE header.
+ */
+struct rte_flow_item_nvgre {
+	uint32_t flags0:1; /**< 0 */
+	uint32_t rsvd1:1; /**< 1 bit not defined */
+	uint32_t flags1:2; /**< 2 bits, 1 0 */
+	uint32_t rsvd0:9; /**< Reserved0 */
+	uint32_t ver:3; /**< version */
+	uint32_t protocol:16; /**< protocol type, 0x6558 */
+	uint8_t tni[3]; /**< tenant network ID or virtual subnet ID */
+	uint8_t flow_id; /**< flow ID or Reserved */
+};
+
+/**
  * Matching pattern item definition.
  *
  * A pattern is formed by stacking items starting from the lowest protocol