From patchwork Wed Jun 21 19:07:11 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 25609 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id ACF6058CE; Thu, 22 Jun 2017 04:14:24 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 2E393567C for ; Thu, 22 Jun 2017 04:14:14 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 21 Jun 2017 19:14:14 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.39,371,1493708400"; d="scan'208"; a="1143640783" Received: from zhangqi.sh.intel.com ([10.239.129.189]) by orsmga001.jf.intel.com with ESMTP; 21 Jun 2017 19:14:13 -0700 From: Qi Zhang To: wenzhuo.lu@intel.com, helin.zhang@intel.com Cc: dev@dpdk.org, Qi Zhang Date: Wed, 21 Jun 2017 15:07:11 -0400 Message-Id: <1498072031-4039-4-git-send-email-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1498072031-4039-1-git-send-email-qi.z.zhang@intel.com> References: <1495582134-13665-1-git-send-email-qi.z.zhang@intel.com> <1498072031-4039-1-git-send-email-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH v3 3/3] net/ixgbe: enable IPv6 for consistent API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable IPv6 support with rte_flow API. Only support Sigature Match. Signed-off-by: Qi Zhang --- v2: - fix flow type assignment. drivers/net/ixgbe/ixgbe_flow.c | 118 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 107 insertions(+), 11 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index c6653d7..b631d59 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -1333,12 +1333,13 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) * Parse the rule to see if it is a IP or MAC VLAN flow director rule. * And get the flow director filter info BTW. * UDP/TCP/SCTP PATTERN: - * The first not void item can be ETH or IPV4. - * The second not void item must be IPV4 if the first one is ETH. + * The first not void item can be ETH or IPV4 or IPV6 + * The second not void item must be IPV4 or IPV6 if the first one is ETH. * The next not void item could be UDP or TCP or SCTP (optional) * The next not void item could be RAW (for flexbyte, optional) * The next not void item must be END. - * A Roughly Match pattern can appear at any place before END (optional) + * A Roughly Match pattern can appear at any place before END. + * Roughly Match is optional for IPV4 but is required for IPV6 * MAC VLAN PATTERN: * The first not void item must be ETH. * The second not void item must be MAC VLAN. @@ -1386,6 +1387,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_eth *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec; const struct rte_flow_item_ipv4 *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec; + const struct rte_flow_item_ipv6 *ipv6_mask; const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; const struct rte_flow_item_udp *udp_spec; @@ -1397,7 +1400,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, const struct rte_flow_item_raw *raw_mask; const struct rte_flow_item_raw *raw_spec; - uint32_t j; + uint8_t j; if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1436,6 +1439,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, item = next_no_roughly_pattern(pattern, NULL); if (item->type != RTE_FLOW_ITEM_TYPE_ETH && item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && item->type != RTE_FLOW_ITEM_TYPE_TCP && item->type != RTE_FLOW_ITEM_TYPE_UDP && item->type != RTE_FLOW_ITEM_TYPE_SCTP) { @@ -1588,7 +1592,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, } } - /* Get the IP info. */ + /* Get the IPV4 info. */ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) { /** * Set the flow type even if there's no content @@ -1662,14 +1666,106 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, } } + /* Get the IPV6 info. */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /** + * Set the flow type even if there's no content + * as we must have a flow type. + */ + rule->ixgbe_fdir.formatted.flow_type = + IXGBE_ATR_FLOW_TYPE_IPV6; + + /** + * 1. must signature match + * 2. not support last + * 3. mask must not null + */ + if (rule->mode != RTE_FDIR_MODE_SIGNATURE || + item->last || + !item->mask) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + rule->b_mask = TRUE; + ipv6_mask = + (const struct rte_flow_item_ipv6 *)item->mask; + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + + /* check src addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { + rule->mask.src_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.src_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { + if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { + rule->mask.dst_ipv6_mask |= 1 << j; + } else if (ipv6_mask->hdr.dst_addr[j] != 0) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + + if (item->spec) { + rule->b_spec = TRUE; + ipv6_spec = + (const struct rte_flow_item_ipv6 *)item->spec; + rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, + ipv6_spec->hdr.dst_addr, 16); + } + + /** + * Check if the next not void item is + * TCP or UDP or SCTP or END. + */ + item = next_no_roughly_pattern(pattern, item); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_SCTP && + item->type != RTE_FLOW_ITEM_TYPE_END && + item->type != RTE_FLOW_ITEM_TYPE_RAW) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by fdir filter"); + return -rte_errno; + } + } + /* Get the TCP info. */ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { /** * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_TCPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_TCP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1733,8 +1829,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_UDPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_UDP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL, @@ -1793,8 +1889,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr, * Set the flow type even if there's no content * as we must have a flow type. */ - rule->ixgbe_fdir.formatted.flow_type = - IXGBE_ATR_FLOW_TYPE_SCTPV4; + rule->ixgbe_fdir.formatted.flow_type |= + IXGBE_ATR_L4TYPE_SCTP; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, EINVAL,