From patchwork Tue Dec 26 09:39:07 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhao1, Wei" X-Patchwork-Id: 32749 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CA6691B636; Tue, 26 Dec 2017 10:47:08 +0100 (CET) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 5825D1B633 for ; Tue, 26 Dec 2017 10:47:06 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 26 Dec 2017 01:47:05 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.45,459,1508828400"; d="scan'208";a="5108211" Received: from dpdk2.bj.intel.com ([172.16.182.81]) by fmsmga007.fm.intel.com with ESMTP; 26 Dec 2017 01:47:04 -0800 From: Wei Zhao To: dev@dpdk.org Cc: wenzhuo.lu@intel.com, Wei Zhao Date: Tue, 26 Dec 2017 17:39:07 +0800 Message-Id: <20171226093907.83005-1-wei.zhao1@intel.com> X-Mailer: git-send-email 2.9.3 In-Reply-To: <20171226092828.82837-1-wei.zhao1@intel.com> References: <20171226092828.82837-1-wei.zhao1@intel.com> Subject: [dpdk-dev] [PATCH v4] net/ixgbe: add flow parser ntuple support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Ixgbe ntuple filter in rte_flow need to support diversion data with less than 5 tuple parameters.So add this new support in parser code. Signed-off-by: Wei Zhao --- v2: -fix coding style issue. v3: -add parser vlan pattern code. v4: -fix patch check issue. --- drivers/net/ixgbe/ixgbe_flow.c | 136 +++++++++++++++++++++++++++++------------ 1 file changed, 96 insertions(+), 40 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 887d933..a066917 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -207,6 +207,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_udp *udp_mask; const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + struct rte_flow_item_eth eth_null; + struct rte_flow_item_vlan vlan_null; if (!pattern) { rte_flow_error_set(error, @@ -228,6 +234,9 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + memset(ð_null, 0, sizeof(struct rte_flow_item_eth)); + memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan)); + #ifdef RTE_LIBRTE_SECURITY /** * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY @@ -277,6 +286,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* Skip Ethernet */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_mask = (const struct rte_flow_item_eth *)item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -287,15 +298,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* if the first item is MAC, the content should be NULL */ - if (item->spec || item->mask) { + if ((item->spec || item->mask) && + (memcmp(eth_spec, ð_null, + sizeof(struct rte_flow_item_eth)) || + memcmp(eth_mask, ð_null, + sizeof(struct rte_flow_item_eth)))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); return -rte_errno; } - /* check if the next not void item is IPv4 */ + /* check if the next not void item is IPv4 or Vlan */ item = next_no_void_pattern(pattern, item); - if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_VLAN) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not supported by ntuple filter"); @@ -303,48 +319,83 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } } - /* get the IPv4 info */ - if (!item->spec || !item->mask) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, "Invalid ntuple mask"); - return -rte_errno; - } - /*Not supported last point for range*/ - if (item->last) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - item, "Not supported last point for range"); - return -rte_errno; + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_spec = (const struct rte_flow_item_vlan *)item->spec; + vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; - } + } + /* the content should be NULL */ + if ((item->spec || item->mask) && + (memcmp(vlan_spec, &vlan_null, + sizeof(struct rte_flow_item_vlan)) || + memcmp(vlan_mask, &vlan_null, + sizeof(struct rte_flow_item_vlan)))) { - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; - /** - * Only support src & dst addresses, protocol, - * others should be masked. - */ - if (ipv4_mask->hdr.version_ihl || - ipv4_mask->hdr.type_of_service || - ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || - ipv4_mask->hdr.time_to_live || - ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + /* check if the next not void item is IPv4 */ + item = next_no_void_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { rte_flow_error_set(error, - EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, - item, "Not supported by ntuple filter"); - return -rte_errno; + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } } - filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; - filter->src_ip_mask = ipv4_mask->hdr.src_addr; - filter->proto_mask = ipv4_mask->hdr.next_proto_id; + if (item->mask) { + /* get the IPv4 info */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid ntuple mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; - filter->dst_ip = ipv4_spec->hdr.dst_addr; - filter->src_ip = ipv4_spec->hdr.src_addr; - filter->proto = ipv4_spec->hdr.next_proto_id; + ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + /** + * Only support src & dst addresses, protocol, + * others should be masked. + */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by ntuple filter"); + return -rte_errno; + } + + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; + filter->src_ip_mask = ipv4_mask->hdr.src_addr; + filter->proto_mask = ipv4_mask->hdr.next_proto_id; + + ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + filter->dst_ip = ipv4_spec->hdr.dst_addr; + filter->src_ip = ipv4_spec->hdr.src_addr; + filter->proto = ipv4_spec->hdr.next_proto_id; + } /* check if the next not void item is TCP or UDP */ item = next_no_void_pattern(pattern, item); @@ -359,8 +410,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - /* get the TCP/UDP info */ if ((item->type != RTE_FLOW_ITEM_TYPE_END) && + (!item->spec && !item->mask)) { + goto action; + } + + /* get the TCP/UDP/SCTP info */ + if (item->type != RTE_FLOW_ITEM_TYPE_END && (!item->spec || !item->mask)) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL,