From patchwork Fri Mar 3 09:31:36 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 21248 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id DF2CBD4E0; Fri, 3 Mar 2017 10:33:11 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id E1BC12952 for ; Fri, 3 Mar 2017 10:32:53 +0100 (CET) Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 03 Mar 2017 01:32:53 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.35,236,1484035200"; d="scan'208";a="72335765" Received: from unknown (HELO dpdk9.sh.intel.com) ([10.239.129.31]) by fmsmga006.fm.intel.com with ESMTP; 03 Mar 2017 01:32:51 -0800 From: Beilei Xing To: jingjing.wu@intel.com Cc: helin.zhang@intel.com, dev@dpdk.org, Bernard Iremonger , Yong Liu Date: Fri, 3 Mar 2017 17:31:36 +0800 Message-Id: <1488533497-27682-4-git-send-email-beilei.xing@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1488533497-27682-1-git-send-email-beilei.xing@intel.com> References: <1488533497-27682-1-git-send-email-beilei.xing@intel.com> Subject: [dpdk-dev] [PATCH 3/4] net/i40e: support tunnel filter to VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch is to support tunnel filter to VF. Signed-off-by: Bernard Iremonger Signed-off-by: Yong Liu Signed-off-by: Beilei Xing --- drivers/net/i40e/i40e_ethdev.c | 145 +++++++++++++++++++++++++++++++++++++++++ drivers/net/i40e/i40e_ethdev.h | 32 +++++++++ drivers/net/i40e/i40e_flow.c | 50 +++++++++----- 3 files changed, 212 insertions(+), 15 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 8de68b5..43afd5b 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -6922,6 +6922,151 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, return ret; } +int +i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint32_t ipv4_addr; + uint8_t i, tun_type = 0; + /* internal variable to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_pf_vf *vf = NULL; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_aqc_add_remove_cloud_filters_element_big_data *cld_filter; + struct i40e_aqc_add_remove_cloud_filters_element_big_data *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; + struct i40e_tunnel_filter check_filter; /* Check if filter exists */ + bool big_buffer = 0; + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_remove_cloud_filters_element_big_data), + 0); + + if (cld_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -EINVAL; + } + pfilter = cld_filter; + + ether_addr_copy(&tunnel_filter->outer_mac, + (struct ether_addr *)&pfilter->element.outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, + (struct ether_addr *)&pfilter->element.inner_mac); + + pfilter->element.inner_vlan = + rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + rte_memcpy(&pfilter->element.ipaddr.v4.data, + &rte_cpu_to_le_32(ipv4_addr), + sizeof(pfilter->element.ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(rte_be_to_cpu_32( + tunnel_filter->ip_addr.ipv6_addr[i])); + } + rte_memcpy(&pfilter->element.ipaddr.v6.data, + &convert_ipv6, + sizeof(pfilter->element.ipaddr.v6.data)); + } + + /* check tunneled type */ + switch (tunnel_filter->tunnel_type) { + case RTE_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; + break; + case RTE_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported."); + rte_free(cld_filter); + return -EINVAL; + } + + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->element.flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + + pfilter->element.flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->element.queue_number = + rte_cpu_to_le_16(tunnel_filter->queue_id); + + if (!tunnel_filter->is_to_vf) + vsi = pf->main_vsi; + else { + if (tunnel_filter->vf_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + vf = &pf->vfs[tunnel_filter->vf_id]; + vsi = vf->vsi; + } + + /* Check if there is the filter in SW list */ + memset(&check_filter, 0, sizeof(check_filter)); + i40e_tunnel_filter_convert(cld_filter, &check_filter); + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + return -EINVAL; + } + + if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + return -EINVAL; + } + + if (add) { + if (big_buffer) + ret = i40e_aq_add_cloud_filters_big_buffer(hw, + vsi->seid, cld_filter, 1); + else + ret = i40e_aq_add_cloud_filters(hw, + vsi->seid, &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + return ret; + } + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + } else { + if (big_buffer) + ret = i40e_aq_remove_cloud_filters_big_buffer( + hw, vsi->seid, cld_filter, 1); + else + ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + &cld_filter->element, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + return ret; + } + ret = i40e_sw_tunnel_filter_del(pf, &node->input); + } + + rte_free(cld_filter); + return ret; +} + static int i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) { diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index adf26f4..3c8a420 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -513,6 +513,7 @@ struct i40e_tunnel_filter_input { uint16_t flags; /* Filter type flag */ uint32_t tenant_id; /* Tenant id to match */ uint16_t general_fields[32]; /* Big buffer */ + uint16_t vf_id; /* VF id for tunnel filtering. */ }; struct i40e_tunnel_filter { @@ -529,6 +530,33 @@ struct i40e_tunnel_rule { struct rte_hash *hash_table; }; +/** + * Tunneling Packet filter configuration. + */ +struct i40e_tunnel_filter_conf { + struct ether_addr outer_mac; /**< Outer MAC address to match. */ + struct ether_addr inner_mac; /**< Inner MAC address to match. */ + uint16_t inner_vlan; /**< Inner VLAN to match. */ + uint32_t outer_vlan; /**< Outer VLAN to match */ + enum rte_tunnel_iptype ip_type; /**< IP address type. */ + /** + * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP + * is set in filter_type, or inner destination IP address to match + * if ETH_TUNNEL_FILTER_IIP is set in filter_type. + */ + union { + uint32_t ipv4_addr; /**< IPv4 address in big endian. */ + uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */ + } ip_addr; + /** Flags from ETH_TUNNEL_FILTER_XX - see above. */ + uint16_t filter_type; + enum rte_eth_tunnel_type tunnel_type; /**< Tunnel Type. */ + uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */ + uint16_t queue_id; /**< Queue assigned to if match. */ + uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */ + uint16_t vf_id; /**< VF id for tunnel filter insertion. */ +}; + #define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64 #define I40E_MAX_MIRROR_RULES 64 /* @@ -717,6 +745,7 @@ union i40e_filter_t { struct rte_eth_ethertype_filter ethertype_filter; struct rte_eth_fdir_filter fdir_filter; struct rte_eth_tunnel_filter_conf tunnel_filter; + struct i40e_tunnel_filter_conf consistent_tunnel_filter; }; typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -899,6 +928,9 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct rte_eth_tunnel_filter_conf *tunnel_filter, uint8_t add); +int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, + struct i40e_tunnel_filter_conf *tunnel_filter, + uint8_t add); int i40e_fdir_flush(struct rte_eth_dev *dev); enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw, diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 62f5842..03c7026 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -93,7 +93,7 @@ static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, struct rte_flow_error *error, - struct rte_eth_tunnel_filter_conf *filter); + struct i40e_tunnel_filter_conf *filter); static int i40e_flow_parse_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error); static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev, @@ -1127,34 +1127,54 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, } /* Parse to get the action info of a tunnle filter - * Tunnel action only supports QUEUE. + * Tunnel action only supports PF, VF and QUEUE. */ static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, struct rte_flow_error *error, - struct rte_eth_tunnel_filter_conf *filter) + struct i40e_tunnel_filter_conf *filter) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); const struct rte_flow_action *act; const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; uint32_t index = 0; - /* Check if the first non-void action is QUEUE. */ + /* Check if the first non-void action is PF or VF. */ NEXT_ITEM_OF_ACTION(act, actions, index); - if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + if (act->type != RTE_FLOW_ACTION_TYPE_PF && + act->type != RTE_FLOW_ACTION_TYPE_VF) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); return -rte_errno; } - act_q = (const struct rte_flow_action_queue *)act->conf; - filter->queue_id = act_q->index; - if (filter->queue_id >= pf->dev_data->nb_rx_queues) { - rte_flow_error_set(error, EINVAL, + if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_vf = (const struct rte_flow_action_vf *)act->conf; + filter->vf_id = act_vf->id; + filter->is_to_vf = 1; + if (filter->vf_id >= pf->vf_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid VF ID for tunnel filter"); + return -rte_errno; + } + } + + /* Check if the next non-void item is QUEUE */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue_id = act_q->index; + if (!filter->is_to_vf) + if (filter->queue_id >= pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Invalid queue ID for tunnel filter"); - return -rte_errno; + return -rte_errno; + } } /* Check if the next non-void item is END */ @@ -1204,7 +1224,7 @@ static int i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_item *pattern, struct rte_flow_error *error, - struct rte_eth_tunnel_filter_conf *filter) + struct i40e_tunnel_filter_conf *filter) { const struct rte_flow_item *item = pattern; const struct rte_flow_item_eth *eth_spec; @@ -1473,8 +1493,8 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev, struct rte_flow_error *error, union i40e_filter_t *filter) { - struct rte_eth_tunnel_filter_conf *tunnel_filter = - &filter->tunnel_filter; + struct i40e_tunnel_filter_conf *tunnel_filter = + &filter->consistent_tunnel_filter; int ret; ret = i40e_flow_parse_vxlan_pattern(dev, pattern, @@ -1605,8 +1625,8 @@ i40e_flow_create(struct rte_eth_dev *dev, i40e_fdir_filter_list); break; case RTE_ETH_FILTER_TUNNEL: - ret = i40e_dev_tunnel_filter_set(pf, - &cons_filter.tunnel_filter, 1); + ret = i40e_dev_consistent_tunnel_filter_set(pf, + &cons_filter.consistent_tunnel_filter, 1); if (ret) goto free_flow; flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,