From patchwork Wed Jul 5 02:21:35 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 26485 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id C2FC1532C; Wed, 5 Jul 2017 11:28:49 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id B19E22C13 for ; Wed, 5 Jul 2017 11:28:44 +0200 (CEST) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 05 Jul 2017 02:28:44 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.40,311,1496127600"; d="scan'208"; a="1190690797" Received: from dpdk777.sh.intel.com ([10.67.111.90]) by fmsmga002.fm.intel.com with ESMTP; 05 Jul 2017 02:28:43 -0700 From: Qi Zhang To: wenzhuo.lu@intel.com, helin.zhang@intel.com Cc: dev@dpdk.org, Qi Zhang Date: Tue, 4 Jul 2017 22:21:35 -0400 Message-Id: <1499221296-7251-3-git-send-email-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1499221296-7251-1-git-send-email-qi.z.zhang@intel.com> References: <1495828350-10259-1-git-send-email-qi.z.zhang@intel.com> <1499221296-7251-1-git-send-email-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH v4 2/3] net/ixgbe: enable signature match for consistent API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable signature match for rte_flow API. RTE_FLOW_ITEM_TYPE_FUZZY specify a signature match. Signed-off-by: Qi Zhang --- v4: - rename roughly to fuzzy v2: - will check fuzzy match's last and mask value. drivers/net/ixgbe/ixgbe_flow.c | 91 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 79 insertions(+), 12 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 067252a..593f267 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -1268,6 +1268,67 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return 0; } +/* search next no void pattern and skip fuzzy */ +static inline +const struct rte_flow_item *next_no_fuzzy_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_fuzzy *spec, *last, *mask; + const struct rte_flow_item *item; + uint32_t sh, lh, mh; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { + spec = + (const struct rte_flow_item_fuzzy *)item->spec; + last = + (const struct rte_flow_item_fuzzy *)item->last; + mask = + (const struct rte_flow_item_fuzzy *)item->mask; + + if (!spec || !mask) + return 0; + + sh = spec->thresh; + + if (!last) + lh = sh; + else + lh = last->thresh; + + mh = mask->thresh; + sh = sh & mh; + lh = lh & mh; + + if (!sh || sh > lh) + return 0; + + return 1; + } + + i++; + } + + return 0; +} + /** * Parse the rule to see if it is a IP or MAC VLAN flow director rule. * And get the flow director filter info BTW. @@ -1277,6 +1338,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * The next not void item could be UDP or TCP or SCTP (optional) * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. + * A Fuzzy Match pattern can appear at any place before END (optional) * MAC VLAN PATTERN: * The first not void item must be ETH. * The second not void item must be MAC VLAN. @@ -1371,7 +1433,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * The first not void item should be * MAC or IPv4 or TCP or UDP or SCTP. */ - item = next_no_void_pattern(pattern, NULL); + item = next_no_fuzzy_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_TCP && @@ -1384,7 +1446,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - rule->mode = RTE_FDIR_MODE_PERFECT; + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; /*Not supported last point for range*/ if (item->last) { @@ -1421,14 +1486,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->mask) { - /* If ethernet has meaning, it means MAC VLAN mode. */ - rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; rule->b_mask = TRUE; eth_mask = (const struct rte_flow_item_eth *)item->mask; /* Ether type should be masked. */ - if (eth_mask->type) { + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1436,6 +1500,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + /** * src MAC address must be masked, * and don't support dst MAC address mask. @@ -1464,7 +1531,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - item = next_no_void_pattern(pattern, item); + item = next_no_fuzzy_pattern(pattern, item); if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1511,7 +1578,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, /* More than one tags are not supported. */ /* Next not void item must be END */ - item = next_no_void_pattern(pattern, item); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -1581,7 +1648,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is * TCP or UDP or SCTP or END. */ - item = next_no_void_pattern(pattern, item); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && @@ -1648,7 +1715,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, tcp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1708,7 +1775,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, udp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1770,7 +1837,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, sctp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1854,7 +1921,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->type != RTE_FLOW_ITEM_TYPE_END) { /* check if the next not void item is END */ - item = next_no_void_pattern(pattern, item); + item = next_no_fuzzy_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL,