From patchwork Mon May 16 02:03:19 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qiming Yang X-Patchwork-Id: 111158 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E8DA5A00BE; Mon, 16 May 2022 04:06:38 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8471040A79; Mon, 16 May 2022 04:06:38 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id DC25B406A2; Mon, 16 May 2022 04:06:35 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1652666796; x=1684202796; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=FymEl0nRd6FhXisgOeHo/8V0GOcqlCIRM9M/eA5S8Mg=; b=Dvt5ln6nTBmjXUtn9pse75jZDTrpQ6BYl/RDLVROs+h41HA3RszOaMxx V1OnyBxOcEzKNMhKXASaFewA7SOZmO2oBdaAy5fMZo7xKcjc/rczi1xXo YPLbLiKOzlRXESLTiPZb6Pu620K3rZ/GAhdsrtJnsfcnKnmkgs3buki/Q WIy6cDWZR5yxkJ9jcjChbMHTeccQvyK0ARrAcuK49m+ccDJM6Nt4fy+ai glIn/1Tu+nbxFhG+syIJKZ6w8A97BTrLYUeMFBxrdh55zrbzXg5G/GqRv Wv3suK6NORnsaF3xOMisNqtRCtvx9KNVuCPv0RglHwQQhD04qDthWXJE6 w==; X-IronPort-AV: E=McAfee;i="6400,9594,10348"; a="295966936" X-IronPort-AV: E=Sophos;i="5.91,229,1647327600"; d="scan'208";a="295966936" Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 May 2022 19:06:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.91,229,1647327600"; d="scan'208";a="555061397" Received: from dpdk-qiming3.sh.intel.com ([10.67.110.236]) by orsmga002.jf.intel.com with ESMTP; 15 May 2022 19:06:32 -0700 From: Qiming Yang To: dev@dpdk.org, qi.z.zhang@intel.com Cc: Qiming Yang , stable@dpdk.org Subject: [PATCH] net/iavf: fix mask not allowed issue Date: Mon, 16 May 2022 02:03:19 +0000 Message-Id: <20220516020319.2430990-1-qiming.yang@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Only zero-mask and full-mask for IPv4/UDP/TCP/SCTP addr/port are supported in current code. Otherwise will return failure when parse pattern. Fixes: d5eb3e600d9e ("net/iavf: support flow director basic rule") Cc: stable@dpdk.org Signed-off-by: Qiming Yang --- drivers/net/iavf/iavf_fdir.c | 150 +++++++++++++++++++++-------------- 1 file changed, 89 insertions(+), 61 deletions(-) diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index b63aaca91d..356bca8d41 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -802,6 +802,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, return -rte_errno; } + /* Mask for IPv4 src/dst addrs not supported */ + if (ipv4_mask->hdr.src_addr && + ipv4_mask->hdr.src_addr != UINT32_MAX) + return -rte_errno; + if (ipv4_mask->hdr.dst_addr && + ipv4_mask->hdr.dst_addr != UINT32_MAX) + return -rte_errno; + if (ipv4_last && (ipv4_last->hdr.version_ihl || ipv4_last->hdr.type_of_service || @@ -998,37 +1006,47 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); - if (udp_spec && udp_mask) { - if (udp_mask->hdr.dgram_len || - udp_mask->hdr.dgram_cksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Invalid UDP mask"); - return -rte_errno; - } + if (udp_spec && udp_mask) + break; - if (udp_mask->hdr.src_port == UINT16_MAX) { - input_set |= IAVF_INSET_UDP_SRC_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); - } - if (udp_mask->hdr.dst_port == UINT16_MAX) { - input_set |= IAVF_INSET_UDP_DST_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); - } + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid UDP mask"); + return -rte_errno; + } - if (tun_inner) { - input_set &= ~IAVF_PROT_UDP_OUTER; - input_set |= IAVF_PROT_UDP_INNER; - } + /* Mask for UDP src/dst ports not supported */ + if (udp_mask->hdr.src_port && + udp_mask->hdr.src_port != UINT16_MAX) + return -rte_errno; + if (udp_mask->hdr.dst_port && + udp_mask->hdr.dst_port != UINT16_MAX) + return -rte_errno; - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) - rte_memcpy(hdr->buffer, - &udp_spec->hdr, - sizeof(udp_spec->hdr)); - else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) - rte_memcpy(hdr->buffer, - &udp_spec->hdr, - sizeof(udp_spec->hdr)); + if (udp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); + } + if (udp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT); + } + + if (tun_inner) { + input_set &= ~IAVF_PROT_UDP_OUTER; + input_set |= IAVF_PROT_UDP_INNER; + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); } hdrs->count = ++layer; @@ -1042,42 +1060,52 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); - if (tcp_spec && tcp_mask) { - if (tcp_mask->hdr.sent_seq || - tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || - tcp_mask->hdr.cksum || - tcp_mask->hdr.tcp_urp) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Invalid TCP mask"); - return -rte_errno; - } + if (tcp_spec && tcp_mask) + break; - if (tcp_mask->hdr.src_port == UINT16_MAX) { - input_set |= IAVF_INSET_TCP_SRC_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); - } - if (tcp_mask->hdr.dst_port == UINT16_MAX) { - input_set |= IAVF_INSET_TCP_DST_PORT; - VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); - } + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid TCP mask"); + return -rte_errno; + } - if (tun_inner) { - input_set &= ~IAVF_PROT_TCP_OUTER; - input_set |= IAVF_PROT_TCP_INNER; - } + /* Mask for TCP src/dst ports not supported */ + if (tcp_mask->hdr.src_port && + tcp_mask->hdr.src_port != UINT16_MAX) + return -rte_errno; + if (tcp_mask->hdr.dst_port && + tcp_mask->hdr.dst_port != UINT16_MAX) + return -rte_errno; - if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) - rte_memcpy(hdr->buffer, - &tcp_spec->hdr, - sizeof(tcp_spec->hdr)); - else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) - rte_memcpy(hdr->buffer, - &tcp_spec->hdr, - sizeof(tcp_spec->hdr)); + if (tcp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); + } + if (tcp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT); + } + + if (tun_inner) { + input_set &= ~IAVF_PROT_TCP_OUTER; + input_set |= IAVF_PROT_TCP_INNER; + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); } hdrs->count = ++layer;