From patchwork Wed Jun 7 06:53:58 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 25079 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id E57792BA1; Wed, 7 Jun 2017 08:58:23 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id EF7002BBD for ; Wed, 7 Jun 2017 08:58:19 +0200 (CEST) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga104.jf.intel.com with ESMTP; 06 Jun 2017 23:58:19 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.39,310,1493708400"; d="scan'208"; a="1179306075" Received: from dpdk9.sh.intel.com ([10.239.129.190]) by fmsmga002.fm.intel.com with ESMTP; 06 Jun 2017 23:58:16 -0700 From: Beilei Xing To: jingjing.wu@intel.com Cc: dev@dpdk.org Date: Wed, 7 Jun 2017 14:53:58 +0800 Message-Id: <1496818439-38686-2-git-send-email-beilei.xing@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1496818439-38686-1-git-send-email-beilei.xing@intel.com> References: <1496300191-137516-1-git-send-email-beilei.xing@intel.com> <1496818439-38686-1-git-send-email-beilei.xing@intel.com> Subject: [dpdk-dev] [PATCH v3 1/2] net/i40e: optimize vxlan parsing function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The current vxlan parsing function is not easy to read when parsing filter type, this patch optimizes the function and makes it more readable. Signed-off-by: Beilei Xing --- drivers/net/i40e/i40e_flow.c | 196 ++++++++++++++----------------------------- 1 file changed, 63 insertions(+), 133 deletions(-) diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 37b55e7..b4ba555 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -1268,27 +1268,27 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, return 0; } +static uint16_t i40e_supported_tunnel_filter_types[] = { + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID | + ETH_TUNNEL_FILTER_IVLAN, + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, + ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID, + ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID | + ETH_TUNNEL_FILTER_IMAC, + ETH_TUNNEL_FILTER_IMAC, +}; + static int -i40e_check_tenant_id_mask(const uint8_t *mask) +i40e_check_tunnel_filter_type(uint8_t filter_type) { - uint32_t j; - int is_masked = 0; - - for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) { - if (*(mask + j) == UINT8_MAX) { - if (j > 0 && (*(mask + j) != *(mask + j - 1))) - return -EINVAL; - is_masked = 0; - } else if (*(mask + j) == 0) { - if (j > 0 && (*(mask + j) != *(mask + j - 1))) - return -EINVAL; - is_masked = 1; - } else { - return -EINVAL; - } + uint8_t i; + + for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) { + if (filter_type == i40e_supported_tunnel_filter_types[i]) + return 0; } - return is_masked; + return -1; } /* 1. Last in item should be NULL as range is not supported. @@ -1308,18 +1308,17 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_item *item = pattern; const struct rte_flow_item_eth *eth_spec; const struct rte_flow_item_eth *eth_mask; - const struct rte_flow_item_eth *o_eth_spec = NULL; - const struct rte_flow_item_eth *o_eth_mask = NULL; - const struct rte_flow_item_vxlan *vxlan_spec = NULL; - const struct rte_flow_item_vxlan *vxlan_mask = NULL; - const struct rte_flow_item_eth *i_eth_spec = NULL; - const struct rte_flow_item_eth *i_eth_mask = NULL; - const struct rte_flow_item_vlan *vlan_spec = NULL; - const struct rte_flow_item_vlan *vlan_mask = NULL; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + const struct rte_flow_item_vlan *vlan_spec; + const struct rte_flow_item_vlan *vlan_mask; + uint8_t filter_type = 0; bool is_vni_masked = 0; + uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; enum rte_flow_item_type item_type; bool vxlan_flag = 0; uint32_t tenant_id_be = 0; + int ret; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { @@ -1334,6 +1333,11 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_ETH: eth_spec = (const struct rte_flow_item_eth *)item->spec; eth_mask = (const struct rte_flow_item_eth *)item->mask; + + /* Check if ETH item is used for place holder. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ if ((!eth_spec && eth_mask) || (eth_spec && !eth_mask)) { rte_flow_error_set(error, EINVAL, @@ -1357,50 +1361,40 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, return -rte_errno; } - if (!vxlan_flag) + if (!vxlan_flag) { rte_memcpy(&filter->outer_mac, ð_spec->dst, ETHER_ADDR_LEN); - else + filter_type |= ETH_TUNNEL_FILTER_OMAC; + } else { rte_memcpy(&filter->inner_mac, ð_spec->dst, ETHER_ADDR_LEN); + filter_type |= ETH_TUNNEL_FILTER_IMAC; + } } - - if (!vxlan_flag) { - o_eth_spec = eth_spec; - o_eth_mask = eth_mask; - } else { - i_eth_spec = eth_spec; - i_eth_mask = eth_mask; - } - break; case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = (const struct rte_flow_item_vlan *)item->spec; vlan_mask = (const struct rte_flow_item_vlan *)item->mask; - if (vxlan_flag) { - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; - if (!(vlan_spec && vlan_mask)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid vlan item"); - return -rte_errno; - } - } else { - if (vlan_spec || vlan_mask) - rte_flow_error_set(error, EINVAL, + if (!(vlan_spec && vlan_mask)) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid vlan item"); return -rte_errno; } + + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) + filter->inner_vlan = + rte_be_to_cpu_16(vlan_spec->tci) & + I40E_TCI_MASK; + filter_type |= ETH_TUNNEL_FILTER_IVLAN; + } break; case RTE_FLOW_ITEM_TYPE_IPV4: filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; @@ -1447,7 +1441,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, (const struct rte_flow_item_vxlan *)item->mask; /* Check if VXLAN item is used to describe protocol. * If yes, both spec and mask should be NULL. - * If no, either spec or mask shouldn't be NULL. + * If no, both spec and mask shouldn't be NULL. */ if ((!vxlan_spec && vxlan_mask) || (vxlan_spec && !vxlan_mask)) { @@ -1459,17 +1453,25 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, } /* Check if VNI is masked. */ - if (vxlan_mask) { + if (vxlan_spec && vxlan_mask) { is_vni_masked = - i40e_check_tenant_id_mask(vxlan_mask->vni); - if (is_vni_masked < 0) { + !!memcmp(vxlan_mask->vni, vni_mask, + RTE_DIM(vni_mask)); + if (is_vni_masked) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid VNI mask"); return -rte_errno; } + + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->tenant_id = + rte_be_to_cpu_32(tenant_id_be); + filter_type |= ETH_TUNNEL_FILTER_TENID; } + vxlan_flag = 1; break; default: @@ -1477,87 +1479,15 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, } } - /* Check specification and mask to get the filter type */ - if (vlan_spec && vlan_mask && - (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) { - /* If there's inner vlan */ - filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) - & I40E_TCI_MASK; - if (vxlan_spec && vxlan_mask && !is_vni_masked) { - /* If there's vxlan */ - rte_memcpy(((uint8_t *)&tenant_id_be + 1), - vxlan_spec->vni, 3); - filter->tenant_id = rte_be_to_cpu_32(tenant_id_be); - if (!o_eth_spec && !o_eth_mask && - i_eth_spec && i_eth_mask) - filter->filter_type = - RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID; - else { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - NULL, - "Invalid filter type"); - return -rte_errno; - } - } else if (!vxlan_spec && !vxlan_mask) { - /* If there's no vxlan */ - if (!o_eth_spec && !o_eth_mask && - i_eth_spec && i_eth_mask) - filter->filter_type = - RTE_TUNNEL_FILTER_IMAC_IVLAN; - else { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - NULL, - "Invalid filter type"); - return -rte_errno; - } - } else { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - NULL, - "Invalid filter type"); - return -rte_errno; - } - } else if ((!vlan_spec && !vlan_mask) || - (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) { - /* If there's no inner vlan */ - if (vxlan_spec && vxlan_mask && !is_vni_masked) { - /* If there's vxlan */ - rte_memcpy(((uint8_t *)&tenant_id_be + 1), - vxlan_spec->vni, 3); - filter->tenant_id = rte_be_to_cpu_32(tenant_id_be); - if (!o_eth_spec && !o_eth_mask && - i_eth_spec && i_eth_mask) - filter->filter_type = - RTE_TUNNEL_FILTER_IMAC_TENID; - else if (o_eth_spec && o_eth_mask && - i_eth_spec && i_eth_mask) - filter->filter_type = - RTE_TUNNEL_FILTER_OMAC_TENID_IMAC; - } else if (!vxlan_spec && !vxlan_mask) { - /* If there's no vxlan */ - if (!o_eth_spec && !o_eth_mask && - i_eth_spec && i_eth_mask) { - filter->filter_type = ETH_TUNNEL_FILTER_IMAC; - } else { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, NULL, - "Invalid filter type"); - return -rte_errno; - } - } else { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, NULL, - "Invalid filter type"); - return -rte_errno; - } - } else { + ret = i40e_check_tunnel_filter_type(filter_type); + if (ret < 0) { rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, NULL, - "Not supported by tunnel filter."); + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "Invalid filter type"); return -rte_errno; } + filter->filter_type = filter_type; filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;