net/ice: fix tunnel rule not recognize

Message ID 20190715094052.128170-1-qiming.yang@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Qi Zhang
Headers
Series net/ice: fix tunnel rule not recognize |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues
ci/intel-Performance-Testing success Performance Testing PASS
ci/mellanox-Performance-Testing success Performance Testing PASS

Commit Message

Qiming Yang July 15, 2019, 9:40 a.m. UTC
  In the past, to distinguish whether the input set is outer or inner
by check the item appear once or twice. But this way doesn't work when user
don't configure the outer input set. This patch fix the issue.

Fixes: d76116a4678f ("net/ice: add generic flow API")
Cc: stable@dpdk.org

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_generic_flow.c | 80 ++++++++++++++++++--------------------
 1 file changed, 38 insertions(+), 42 deletions(-)
  

Comments

Qi Zhang July 16, 2019, 1:32 a.m. UTC | #1
> -----Original Message-----
> From: Yang, Qiming
> Sent: Monday, July 15, 2019 5:41 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; stable@dpdk.org
> Subject: [PATCH] net/ice: fix tunnel rule not recognize
> 
> In the past, to distinguish whether the input set is outer or inner by check the
> item appear once or twice. But this way doesn't work when user don't
> configure the outer input set. This patch fix the issue.
> 
> Fixes: d76116a4678f ("net/ice: add generic flow API")
> Cc: stable@dpdk.org

Fix for current version, no need to cc stable
> 
> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi
  

Patch

diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index e6a2c4b..05a1678 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -209,8 +209,7 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
 	uint64_t input_set = ICE_INSET_NONE;
-	bool outer_ip = true;
-	bool outer_l4 = true;
+	bool is_tunnel = false;
 
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
@@ -259,27 +258,26 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 				return 0;
 			}
 
-			if (outer_ip) {
+			if (is_tunnel) {
 				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
-					input_set |= ICE_INSET_IPV4_SRC;
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
-					input_set |= ICE_INSET_IPV4_DST;
-				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
-					input_set |= ICE_INSET_IPV4_TOS;
+					input_set |= ICE_INSET_TUN_IPV4_DST;
 				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
-					input_set |= ICE_INSET_IPV4_TTL;
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
 				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
-					input_set |= ICE_INSET_IPV4_PROTO;
-				outer_ip = false;
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
 			} else {
 				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
-					input_set |= ICE_INSET_TUN_IPV4_SRC;
+					input_set |= ICE_INSET_IPV4_SRC;
 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
-					input_set |= ICE_INSET_TUN_IPV4_DST;
+					input_set |= ICE_INSET_IPV4_DST;
 				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
-					input_set |= ICE_INSET_TUN_IPV4_TTL;
+					input_set |= ICE_INSET_IPV4_TTL;
 				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
-					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+					input_set |= ICE_INSET_IPV4_PROTO;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -302,33 +300,32 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 				return 0;
 			}
 
-			if (outer_ip) {
+			if (is_tunnel) {
 				if (!memcmp(ipv6_mask->hdr.src_addr,
 					    ipv6_addr_mask,
 					    RTE_DIM(ipv6_mask->hdr.src_addr)))
-					input_set |= ICE_INSET_IPV6_SRC;
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
 				if (!memcmp(ipv6_mask->hdr.dst_addr,
 					    ipv6_addr_mask,
 					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
-					input_set |= ICE_INSET_IPV6_DST;
+					input_set |= ICE_INSET_TUN_IPV6_DST;
 				if (ipv6_mask->hdr.proto == UINT8_MAX)
-					input_set |= ICE_INSET_IPV6_PROTO;
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
 				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
-					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
-				outer_ip = false;
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
 			} else {
 				if (!memcmp(ipv6_mask->hdr.src_addr,
 					    ipv6_addr_mask,
 					    RTE_DIM(ipv6_mask->hdr.src_addr)))
-					input_set |= ICE_INSET_TUN_IPV6_SRC;
+					input_set |= ICE_INSET_IPV6_SRC;
 				if (!memcmp(ipv6_mask->hdr.dst_addr,
 					    ipv6_addr_mask,
 					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
-					input_set |= ICE_INSET_TUN_IPV6_DST;
+					input_set |= ICE_INSET_IPV6_DST;
 				if (ipv6_mask->hdr.proto == UINT8_MAX)
-					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+					input_set |= ICE_INSET_IPV6_PROTO;
 				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
-					input_set |= ICE_INSET_TUN_IPV6_TTL;
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
 			}
 
 			break;
@@ -353,17 +350,16 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 				return 0;
 			}
 
-			if (outer_l4) {
+			if (is_tunnel) {
 				if (udp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= ICE_INSET_SRC_PORT;
+					input_set |= ICE_INSET_TUN_SRC_PORT;
 				if (udp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= ICE_INSET_DST_PORT;
-				outer_l4 = false;
+					input_set |= ICE_INSET_TUN_DST_PORT;
 			} else {
 				if (udp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= ICE_INSET_TUN_SRC_PORT;
+					input_set |= ICE_INSET_SRC_PORT;
 				if (udp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= ICE_INSET_TUN_DST_PORT;
+					input_set |= ICE_INSET_DST_PORT;
 			}
 
 			break;
@@ -393,17 +389,16 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 				return 0;
 			}
 
-			if (outer_l4) {
+			if (is_tunnel) {
 				if (tcp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= ICE_INSET_SRC_PORT;
+					input_set |= ICE_INSET_TUN_SRC_PORT;
 				if (tcp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= ICE_INSET_DST_PORT;
-				outer_l4 = false;
+					input_set |= ICE_INSET_TUN_DST_PORT;
 			} else {
 				if (tcp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= ICE_INSET_TUN_SRC_PORT;
+					input_set |= ICE_INSET_SRC_PORT;
 				if (tcp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= ICE_INSET_TUN_DST_PORT;
+					input_set |= ICE_INSET_DST_PORT;
 			}
 
 			break;
@@ -427,17 +422,16 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 				return 0;
 			}
 
-			if (outer_l4) {
+			if (is_tunnel) {
 				if (sctp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= ICE_INSET_SRC_PORT;
+					input_set |= ICE_INSET_TUN_SRC_PORT;
 				if (sctp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= ICE_INSET_DST_PORT;
-				outer_l4 = false;
+					input_set |= ICE_INSET_TUN_DST_PORT;
 			} else {
 				if (sctp_mask->hdr.src_port == UINT16_MAX)
-					input_set |= ICE_INSET_TUN_SRC_PORT;
+					input_set |= ICE_INSET_SRC_PORT;
 				if (sctp_mask->hdr.dst_port == UINT16_MAX)
-					input_set |= ICE_INSET_TUN_DST_PORT;
+					input_set |= ICE_INSET_DST_PORT;
 			}
 
 			break;
@@ -486,6 +480,7 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 					   "Invalid VXLAN item");
 				return 0;
 			}
+			is_tunnel = 1;
 
 			break;
 		case RTE_FLOW_ITEM_TYPE_NVGRE:
@@ -503,6 +498,7 @@  static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
 					   "Invalid NVGRE item");
 				return 0;
 			}
+			is_tunnel = 1;
 
 			break;
 		default: