From patchwork Fri May 26 19:52:29 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 24687 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 5DE673790; Sat, 27 May 2017 04:59:17 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id E86102C16 for ; Sat, 27 May 2017 04:59:07 +0200 (CEST) Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 26 May 2017 19:59:07 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.38,400,1491289200"; d="scan'208";a="107611230" Received: from zhangqi.sh.intel.com ([10.239.129.189]) by fmsmga006.fm.intel.com with ESMTP; 26 May 2017 19:59:06 -0700 From: Qi Zhang To: helin.zhang@intel.com, wenzhuo.lu@intel.com Cc: dev@dpdk.org, Qi Zhang Date: Fri, 26 May 2017 15:52:29 -0400 Message-Id: <1495828350-10259-3-git-send-email-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1495828350-10259-1-git-send-email-qi.z.zhang@intel.com> References: <1495828350-10259-1-git-send-email-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable signature match for rte_flow API. RTE_FLOW_ITEM_TYPE_ROUGHLY specify a signature match. Signed-off-by: Qi Zhang Acked-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_flow.c | 71 +++++++++++++++++++++++++++++++++++------- 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index c6fb2f8..0638c32 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -1268,6 +1268,47 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return 0; } +/* search next no void pattern and skip roughly */ +static inline +const struct rte_flow_item *next_no_roughly_pattern( + const struct rte_flow_item pattern[], + const struct rte_flow_item *cur) +{ + const struct rte_flow_item *next = + next_no_void_pattern(pattern, cur); + while (1) { + if (next->type != RTE_FLOW_ITEM_TYPE_ROUGHLY) + return next; + next = next_no_void_pattern(pattern, next); + } +} + +static inline uint8_t signature_match(const struct rte_flow_item pattern[]) +{ + const struct rte_flow_item_roughly *spec; + const struct rte_flow_item *item; + int i = 0; + + while (1) { + item = pattern + i; + if (item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (item->type == RTE_FLOW_ITEM_TYPE_ROUGHLY) { + spec = + (const struct rte_flow_item_roughly *)item->spec; + if (spec->threshold) + return 1; + else + return 0; + } + + i++; + } + + return 0; +} + /** * Parse the rule to see if it is a IP or MAC VLAN flow director rule. * And get the flow director filter info BTW. @@ -1277,6 +1318,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, * The next not void item could be UDP or TCP or SCTP (optional) * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. + * A Roughly Match pattern can appear at any place before END (optional) * MAC VLAN PATTERN: * The first not void item must be ETH. * The second not void item must be MAC VLAN. @@ -1371,7 +1413,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * The first not void item should be * MAC or IPv4 or TCP or UDP or SCTP. */ - item = next_no_void_pattern(pattern, NULL); + item = next_no_roughly_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && item->type != RTE_FLOW_ITEM_TYPE_TCP && @@ -1384,7 +1426,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } - rule->mode = RTE_FDIR_MODE_PERFECT; + if (signature_match(pattern)) + rule->mode = RTE_FDIR_MODE_SIGNATURE; + else + rule->mode = RTE_FDIR_MODE_PERFECT; /*Not supported last point for range*/ if (item->last) { @@ -1421,14 +1466,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->mask) { - /* If ethernet has meaning, it means MAC VLAN mode. */ - rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; rule->b_mask = TRUE; eth_mask = (const struct rte_flow_item_eth *)item->mask; /* Ether type should be masked. */ - if (eth_mask->type) { + if (eth_mask->type || + rule->mode == RTE_FDIR_MODE_SIGNATURE) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1436,6 +1480,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, return -rte_errno; } + /* If ethernet has meaning, it means MAC VLAN mode. */ + rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + /** * src MAC address must be masked, * and don't support dst MAC address mask. @@ -1464,7 +1511,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is vlan or ipv4. * IPv6 is not supported. */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1513,7 +1560,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, /** * Check if the next not void item is not vlan. */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -1583,7 +1630,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Check if the next not void item is * TCP or UDP or SCTP or END. */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP && @@ -1650,7 +1697,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, tcp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1710,7 +1757,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, udp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1772,7 +1819,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, sctp_spec->hdr.dst_port; } - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_RAW && item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1860,7 +1907,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, if (item->type != RTE_FLOW_ITEM_TYPE_END) { /* check if the next not void item is END */ - item = next_no_void_pattern(pattern, item); + item = next_no_roughly_pattern(pattern, item); if (item->type != RTE_FLOW_ITEM_TYPE_END) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL,