From patchwork Fri Dec 2 11:53:34 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 17375 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 98555FA69; Fri, 2 Dec 2016 05:14:19 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 6EE0858DB for ; Fri, 2 Dec 2016 05:13:29 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga103.fm.intel.com with ESMTP; 01 Dec 2016 20:13:13 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.33,284,1477983600"; d="scan'208";a="907853557" Received: from dpdk9.sh.intel.com ([10.239.129.141]) by orsmga003.jf.intel.com with ESMTP; 01 Dec 2016 20:13:12 -0800 From: Beilei Xing To: jingjing.wu@intel.com, helin.zhang@intel.com Cc: dev@dpdk.org, wenzhuo.lu@intel.com Date: Fri, 2 Dec 2016 06:53:34 -0500 Message-Id: <1480679625-4157-14-git-send-email-beilei.xing@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1480679625-4157-1-git-send-email-beilei.xing@intel.com> References: <1480679625-4157-1-git-send-email-beilei.xing@intel.com> Subject: [dpdk-dev] [PATCH 13/24] net/i40e: parse VXLAN filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Check if the rule is a VXLAN rule, and get the VXLAN info. Signed-off-by: Beilei Xing --- drivers/net/i40e/i40e_ethdev.c | 349 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 349 insertions(+) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 18247c0..3bdef8e 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -497,6 +497,11 @@ static int i40e_parse_macvlan_filter(const struct rte_flow_attr *attr, const struct rte_flow_action *actions, struct rte_eth_mac_filter *filter, struct rte_flow_error *error); +static int i40e_parse_tunnel_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_tunnel_filter_conf *filter, + struct rte_flow_error *error); static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, @@ -10394,6 +10399,344 @@ i40e_parse_macvlan_filter(const struct rte_flow_attr *attr, return i40e_parse_attr(attr, error); } +/* Parse to get the action and attr info of a tunnle filter */ +static int +i40e_parse_tunnel_act_attr(const struct rte_flow_attr *attr, + const struct rte_flow_action *actions, + struct rte_eth_tunnel_filter_conf *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_queue *act_q; + uint32_t i; + + /* parse action */ + i = 0; + + /* Check if the first not void action is QUEUE. */ + ACTION_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ACTION_NUM); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + error->type = RTE_FLOW_ERROR_TYPE_ACTION; + return -EINVAL; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue_id = act_q->index; + + /* Check if the next not void item is END */ + i++; + ACTION_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ACTION_NUM); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + error->type = RTE_FLOW_ERROR_TYPE_ACTION; + return -EINVAL; + } + + return i40e_parse_attr(attr, error); +} + +/** + * Parse the rule to see if it is a vxlan rule. + * And get the tunnel filter info BTW. + */ +static int +i40e_parse_vxlan_tunnel_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_tunnel_filter_conf *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_item_eth *o_eth_spec = NULL; + const struct rte_flow_item_eth *o_eth_mask = NULL; + const struct rte_flow_item_vxlan *vxlan_spec = NULL; + const struct rte_flow_item_vxlan *vxlan_mask = NULL; + const struct rte_flow_item_eth *i_eth_spec, *i_eth_mask; + const struct rte_flow_item_vlan *vlan_spec = NULL; + const struct rte_flow_item_vlan *vlan_mask = NULL; + struct ether_addr macaddr_unmasked = { + .addr_bytes = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} + }; + struct ether_addr macaddr_masked = { + .addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} + }; + bool is_vni_masked = 0; + uint32_t i; + + /* parse pattern */ + i = 0; + + /* The first not void item should be ETH or IP or UDP or VXLAN */ + PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_UDP && + item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + o_eth_spec = (const struct rte_flow_item_eth *)item->spec; + o_eth_mask = (const struct rte_flow_item_eth *)item->mask; + + if ((!o_eth_spec && o_eth_mask) || + (o_eth_spec && !o_eth_mask)) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + if (o_eth_spec) + rte_memcpy(&filter->outer_mac, &o_eth_spec->dst, + ETHER_ADDR_LEN); + + if (o_eth_mask) { + /** + * DST MAC address shouldn't be masked. + * SRC MAC address should be masked. + * Ethertype should be masked. + */ + if (!is_same_ether_addr(&o_eth_mask->dst, + &macaddr_unmasked) || + !is_same_ether_addr(&o_eth_mask->src, + &macaddr_masked) || + o_eth_mask->type) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } + + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } + + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /** + * If the item is IP, the content should be NULL. + * Only used to describe the protocol stack. + */ + if (item->spec || item->mask) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + /* Check if the next not void item is UDP */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } + + if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { + /** + * If the item is UDP, the content should be NULL + * Only used to describe the protocol stack. + */ + if (item->spec || item->mask) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + /* Check if the next not void item is VXLAN */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + } + + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + vxlan_spec = (const struct rte_flow_item_vxlan *)item->spec; + vxlan_mask = (const struct rte_flow_item_vxlan *)item->mask; + + /** + * Check if VXLAN item is used to describe the protocol stack. + * If yes, both vxlan_spec and vxlan_mask should be NULL. + * If no, either vxlan_spec or vxlan_mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + /* Check if VNI is masked. */ + if (vxlan_mask) { + for (uint32_t j = 0; j < RTE_DIM(vxlan_mask->vni); j++) { + if (vxlan_mask->vni[j] == 0xFF) { + if (j > 0 && + (vxlan_mask->vni[j] != + vxlan_mask->vni[j - 1])) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + is_vni_masked = 0; + } else if (vxlan_mask->vni[j] == 0) { + if (j > 0 && + (vxlan_mask->vni[j] != + vxlan_mask->vni[j - 1])) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + is_vni_masked = 1; + } else { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } + } + + /* Check if the next not void item is ETH. */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + i_eth_spec = (const struct rte_flow_item_eth *)item->spec; + i_eth_mask = (const struct rte_flow_item_eth *)item->mask; + + if (!i_eth_spec || !i_eth_mask) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + /** + * DST address of inner MAC shouldn't be masked. + * SRC address of Inner MAC should be masked. + */ + if (!is_same_ether_addr(&i_eth_mask->dst, &macaddr_unmasked) || + !is_same_ether_addr(&i_eth_mask->src, &macaddr_masked) || + i_eth_mask->type) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + rte_memcpy(&filter->inner_mac, &i_eth_spec->dst, + ETHER_ADDR_LEN); + + /* Check if the next not void item is VLAN or END. */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + if (item->type != RTE_FLOW_ITEM_TYPE_VLAN && + item->type != RTE_FLOW_ITEM_TYPE_END) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + vlan_spec = (const struct rte_flow_item_vlan *)item->spec; + vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + + if (!(vlan_spec && vlan_mask)) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + /* Check if the next not void item is END. */ + i++; + PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf, + RTE_FLOW_ERROR_TYPE_ITEM_NUM); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } + + if (vlan_spec && vlan_mask && + (vlan_mask->tci == rte_cpu_to_be_16(0x0FFF))) { + filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) & 0xFFF; + if (vxlan_spec && vxlan_mask && !is_vni_masked) { + rte_memcpy(&filter->tenant_id, vxlan_spec->vni, + RTE_DIM(vxlan_spec->vni)); + if (!o_eth_spec && !o_eth_mask) + filter->filter_type = + RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID; + else { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } else if (!vxlan_spec && !vxlan_mask) { + if (!o_eth_spec && !o_eth_mask) + filter->filter_type = + RTE_TUNNEL_FILTER_IMAC_IVLAN; + else { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } else { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } else if ((!vlan_spec && !vlan_mask) || + (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) { + if (vxlan_spec && vxlan_mask && !is_vni_masked) { + rte_memcpy(&filter->tenant_id, vxlan_spec->vni, + RTE_DIM(vxlan_spec->vni)); + if (!o_eth_spec && !o_eth_mask) + filter->filter_type = + RTE_TUNNEL_FILTER_IMAC_TENID; + else + filter->filter_type = + RTE_TUNNEL_FILTER_OMAC_TENID_IMAC; + } else if (!vxlan_spec && !vxlan_mask) { + if (!o_eth_spec && !o_eth_mask) + filter->filter_type = ETH_TUNNEL_FILTER_IMAC; + else { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } else { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + } else { + error->type = RTE_FLOW_ERROR_TYPE_ITEM; + return -EINVAL; + } + + filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN; + + return i40e_parse_tunnel_act_attr(attr, actions, filter, error); +} + +static int +i40e_parse_tunnel_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_eth_tunnel_filter_conf *rule, + struct rte_flow_error *error) +{ + int ret; + + ret = i40e_parse_vxlan_tunnel_filter(attr, pattern, + actions, rule, error); + if (!ret) + return 0; + + return ret; +} + static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -10403,6 +10746,7 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev, { struct rte_eth_ethertype_filter ethertype_filter; struct rte_eth_mac_filter macvlan_filter; + struct rte_eth_tunnel_filter_conf tunnel_filter; int ret; ret = cons_parse_ethertype_filter(attr, pattern, actions, @@ -10415,5 +10759,10 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev, if (!ret) return 0; + ret = i40e_parse_tunnel_filter(attr, pattern, actions, + &tunnel_filter, error); + if (!ret) + return 0; + return ret; }