[dpdk-dev,2/3] net/i40e: add MPLS parsing function

Message ID 1488534236-29904-3-git-send-email-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail apply patch file failure

Commit Message

Xing, Beilei March 3, 2017, 9:43 a.m. UTC
  This patch adds MPLS parsing function to support
MPLSoUDP & MPLSoGRE cloud filter.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/i40e/i40e_ethdev.h |   5 +
 drivers/net/i40e/i40e_flow.c   | 217 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 222 insertions(+)
  

Patch

diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3e9b129..dd9d709 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -499,6 +499,9 @@  struct i40e_ethertype_rule {
 /* Tunnel filter number HW supports */
 #define I40E_MAX_TUNNEL_FILTER_NUM 400
 
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 0x11
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 0x12
+
 enum i40e_tunnel_iptype {
 	I40E_TUNNEL_IPTYPE_IPV4,
 	I40E_TUNNEL_IPTYPE_IPV6,
@@ -541,6 +544,8 @@  enum i40e_tunnel_type {
 	I40E_TUNNEL_TYPE_NVGRE,
 	I40E_TUNNEL_TYPE_IP_IN_GRE,
 	I40E_L2_TUNNEL_TYPE_E_TAG,
+	I40E_TUNNEL_TYPE_MPLSoUDP,
+	I40E_TUNNEL_TYPE_MPLSoGRE,
 	I40E_TUNNEL_TYPE_MAX,
 };
 
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3e8c63a..9ca3189 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -57,6 +57,7 @@ 
 #define I40E_IPV6_FRAG_HEADER	44
 #define I40E_TENANT_ARRAY_NUM	3
 #define I40E_TCI_MASK		0xFFFF
+#define I40E_MPLS_LABEL_MASK	0xFFFFF
 
 static int i40e_flow_validate(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -114,6 +115,12 @@  static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
 					const struct rte_flow_action actions[],
 					struct rte_flow_error *error,
 					union i40e_filter_t *filter);
+static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+				       const struct rte_flow_attr *attr,
+				       const struct rte_flow_item pattern[],
+				       const struct rte_flow_action actions[],
+				       struct rte_flow_error *error,
+				       union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
 				      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -278,6 +285,39 @@  static enum rte_flow_item_type pattern_vxlan_4[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_mpls_1[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_MPLS,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_2[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_MPLS,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_3[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_MPLS,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_GRE,
+	RTE_FLOW_ITEM_TYPE_MPLS,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	/* Ethertype */
 	{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
@@ -303,6 +343,11 @@  static struct i40e_valid_pattern i40e_supported_patterns[] = {
 	{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
 	{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
 	{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+	/* MPLSoUDP & MPLSoGRE */
+	{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
+	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
+	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
+	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
 };
 
 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
@@ -1515,6 +1560,166 @@  i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MPLS label.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
+			     const struct rte_flow_item *pattern,
+			     struct rte_flow_error *error,
+			     struct i40e_tunnel_filter_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_mpls *mpls_spec;
+	const struct rte_flow_item_mpls *mpls_mask;
+	enum rte_flow_item_type item_type;
+	bool is_mplsoudp; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
+	uint32_t mpls_cpu;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return -rte_errno;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ETH item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			filter->ip_type = RTE_TUNNEL_IPTYPE_IPV4;
+			/* IPv4 is used to describe protocol,
+			 * spec amd mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv4 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			filter->ip_type = RTE_TUNNEL_IPTYPE_IPV6;
+			/* IPv6 is used to describe protocol,
+			 * spec amd mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid IPv6 item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			/* UDP is used to describe protocol,
+			 * spec amd mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP item");
+				return -rte_errno;
+			}
+			is_mplsoudp = 1;
+			break;
+		case RTE_FLOW_ITEM_TYPE_GRE:
+			/* GRE is used to describe protocol,
+			 * spec amd mask should be NULL.
+			 */
+			if (item->spec || item->mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid GRE item");
+				return -rte_errno;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_MPLS:
+			mpls_spec =
+				(const struct rte_flow_item_mpls *)item->spec;
+			mpls_mask =
+				(const struct rte_flow_item_mpls *)item->mask;
+
+			if (!mpls_spec || !mpls_mask) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid VXLAN item");
+				return -rte_errno;
+			}
+
+			if ((mpls_mask->label_tc_s_ttl &
+			     rte_cpu_to_be_32(I40E_MPLS_LABEL_MASK)) !=
+			    rte_cpu_to_be_32(I40E_MPLS_LABEL_MASK)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid VXLAN mask");
+				return -rte_errno;
+			}
+
+			mpls_cpu = rte_be_to_cpu_32(mpls_spec->label_tc_s_ttl);
+			filter->tenant_id = mpls_cpu & I40E_MPLS_LABEL_MASK;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (is_mplsoudp)
+		filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
+	else
+		filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
+
+	return 0;
+}
+
+static int
+i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+			    const struct rte_flow_attr *attr,
+			    const struct rte_flow_item pattern[],
+			    const struct rte_flow_action actions[],
+			    struct rte_flow_error *error,
+			    union i40e_filter_t *filter)
+{
+	struct i40e_tunnel_filter_conf *tunnel_filter =
+		&filter->consistent_tunnel_filter;
+	int ret;
+
+	ret = i40e_flow_parse_mpls_pattern(dev, pattern,
+					   error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+	if (ret)
+		return ret;
+
+	ret = i40e_flow_parse_attr(attr, error);
+	if (ret)
+		return ret;
+
+	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+	return ret;
+}
+
 static int
 i40e_flow_validate(struct rte_eth_dev *dev,
 		   const struct rte_flow_attr *attr,
@@ -1748,6 +1953,18 @@  i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
 		   filter->input.general_fields,
 		   sizeof(cld_filter.general_fields));
 
+	if (((filter->input.flags &
+	      (I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP <<
+	       I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)) ==
+	     (I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP <<
+	      I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)) ||
+	    ((filter->input.flags &
+	      (I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE <<
+	       I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)) ==
+	     (I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE <<
+	      I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)))
+		big_buffer = 1;
+
 	if (big_buffer)
 		ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
 							      &cld_filter, 1);