From patchwork Mon Jul 15 09:40:52 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qiming Yang X-Patchwork-Id: 56436 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 575233256; Mon, 15 Jul 2019 11:43:53 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 79D422C6A; Mon, 15 Jul 2019 11:43:51 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 15 Jul 2019 02:43:50 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.63,493,1557212400"; d="scan'208";a="365817997" Received: from map1.sh.intel.com ([10.67.111.138]) by fmsmga005.fm.intel.com with ESMTP; 15 Jul 2019 02:43:49 -0700 From: Qiming Yang To: dev@dpdk.org Cc: qi.z.zhang@intel.com, Qiming Yang , stable@dpdk.org Date: Mon, 15 Jul 2019 17:40:52 +0800 Message-Id: <20190715094052.128170-1-qiming.yang@intel.com> X-Mailer: git-send-email 2.9.5 Subject: [dpdk-dev] [PATCH] net/ice: fix tunnel rule not recognize X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In the past, to distinguish whether the input set is outer or inner by check the item appear once or twice. But this way doesn't work when user don't configure the outer input set. This patch fix the issue. Fixes: d76116a4678f ("net/ice: add generic flow API") Cc: stable@dpdk.org Signed-off-by: Qiming Yang Acked-by: Qi Zhang --- drivers/net/ice/ice_generic_flow.c | 80 ++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 42 deletions(-) diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index e6a2c4b..05a1678 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -209,8 +209,7 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; uint64_t input_set = ICE_INSET_NONE; - bool outer_ip = true; - bool outer_l4 = true; + bool is_tunnel = false; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -259,27 +258,26 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_ip) { + if (is_tunnel) { if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_SRC; + input_set |= ICE_INSET_TUN_IPV4_SRC; if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_DST; - if (ipv4_mask->hdr.type_of_service == UINT8_MAX) - input_set |= ICE_INSET_IPV4_TOS; + input_set |= ICE_INSET_TUN_IPV4_DST; if (ipv4_mask->hdr.time_to_live == UINT8_MAX) - input_set |= ICE_INSET_IPV4_TTL; + input_set |= ICE_INSET_TUN_IPV4_TTL; if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) - input_set |= ICE_INSET_IPV4_PROTO; - outer_ip = false; + input_set |= ICE_INSET_TUN_IPV4_PROTO; } else { if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_TUN_IPV4_SRC; + input_set |= ICE_INSET_IPV4_SRC; if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_TUN_IPV4_DST; + input_set |= ICE_INSET_IPV4_DST; if (ipv4_mask->hdr.time_to_live == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV4_TTL; + input_set |= ICE_INSET_IPV4_TTL; if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV4_PROTO; + input_set |= ICE_INSET_IPV4_PROTO; + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) + input_set |= ICE_INSET_IPV4_TOS; } break; case RTE_FLOW_ITEM_TYPE_IPV6: @@ -302,33 +300,32 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_ip) { + if (is_tunnel) { if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.src_addr))) - input_set |= ICE_INSET_IPV6_SRC; + input_set |= ICE_INSET_TUN_IPV6_SRC; if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.dst_addr))) - input_set |= ICE_INSET_IPV6_DST; + input_set |= ICE_INSET_TUN_IPV6_DST; if (ipv6_mask->hdr.proto == UINT8_MAX) - input_set |= ICE_INSET_IPV6_PROTO; + input_set |= ICE_INSET_TUN_IPV6_PROTO; if (ipv6_mask->hdr.hop_limits == UINT8_MAX) - input_set |= ICE_INSET_IPV6_HOP_LIMIT; - outer_ip = false; + input_set |= ICE_INSET_TUN_IPV6_TTL; } else { if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.src_addr))) - input_set |= ICE_INSET_TUN_IPV6_SRC; + input_set |= ICE_INSET_IPV6_SRC; if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask, RTE_DIM(ipv6_mask->hdr.dst_addr))) - input_set |= ICE_INSET_TUN_IPV6_DST; + input_set |= ICE_INSET_IPV6_DST; if (ipv6_mask->hdr.proto == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV6_PROTO; + input_set |= ICE_INSET_IPV6_PROTO; if (ipv6_mask->hdr.hop_limits == UINT8_MAX) - input_set |= ICE_INSET_TUN_IPV6_TTL; + input_set |= ICE_INSET_IPV6_HOP_LIMIT; } break; @@ -353,17 +350,16 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_l4) { + if (is_tunnel) { if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; + input_set |= ICE_INSET_TUN_SRC_PORT; if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - outer_l4 = false; + input_set |= ICE_INSET_TUN_DST_PORT; } else { if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; + input_set |= ICE_INSET_SRC_PORT; if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; + input_set |= ICE_INSET_DST_PORT; } break; @@ -393,17 +389,16 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_l4) { + if (is_tunnel) { if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; + input_set |= ICE_INSET_TUN_SRC_PORT; if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - outer_l4 = false; + input_set |= ICE_INSET_TUN_DST_PORT; } else { if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; + input_set |= ICE_INSET_SRC_PORT; if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; + input_set |= ICE_INSET_DST_PORT; } break; @@ -427,17 +422,16 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], return 0; } - if (outer_l4) { + if (is_tunnel) { if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SRC_PORT; + input_set |= ICE_INSET_TUN_SRC_PORT; if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_DST_PORT; - outer_l4 = false; + input_set |= ICE_INSET_TUN_DST_PORT; } else { if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_SRC_PORT; + input_set |= ICE_INSET_SRC_PORT; if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TUN_DST_PORT; + input_set |= ICE_INSET_DST_PORT; } break; @@ -486,6 +480,7 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], "Invalid VXLAN item"); return 0; } + is_tunnel = 1; break; case RTE_FLOW_ITEM_TYPE_NVGRE: @@ -503,6 +498,7 @@ static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[], "Invalid NVGRE item"); return 0; } + is_tunnel = 1; break; default: