From patchwork Fri Dec 2 11:53:23 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 17363 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 5D75A68BD; Fri, 2 Dec 2016 05:13:44 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id D5812567A for ; Fri, 2 Dec 2016 05:12:59 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga103.fm.intel.com with ESMTP; 01 Dec 2016 20:12:59 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.33,284,1477983600"; d="scan'208";a="907853441" Received: from dpdk9.sh.intel.com ([10.239.129.141]) by orsmga003.jf.intel.com with ESMTP; 01 Dec 2016 20:12:57 -0800 From: Beilei Xing To: jingjing.wu@intel.com, helin.zhang@intel.com Cc: dev@dpdk.org, wenzhuo.lu@intel.com Date: Fri, 2 Dec 2016 06:53:23 -0500 Message-Id: <1480679625-4157-3-git-send-email-beilei.xing@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1480679625-4157-1-git-send-email-beilei.xing@intel.com> References: <1480679625-4157-1-git-send-email-beilei.xing@intel.com> Subject: [dpdk-dev] [PATCH 02/24] net/i40e: store tunnel filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support of storing tunnel filter in SW. Signed-off-by: Beilei Xing --- drivers/net/i40e/i40e_ethdev.c | 161 ++++++++++++++++++++++++++++++++++++++++- drivers/net/i40e/i40e_ethdev.h | 35 +++++++++ 2 files changed, 193 insertions(+), 3 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 30822a0..b20a851 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -473,6 +473,17 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, static int i40e_sw_ethertype_filter_del(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); +static int i40e_tunnel_filter_convert( + struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter, + struct i40e_tunnel_filter *tunnel_filter); +static struct i40e_tunnel_filter * +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_info *tunnel_info, + const struct i40e_tunnel_filter_input *input); +static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter); +static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter); + static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) }, @@ -950,6 +961,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) uint32_t len; uint8_t aq_fail = 0; struct i40e_ethertype_info *ethertype_info = &pf->ethertype; + struct i40e_tunnel_info *tunnel_info = &pf->tunnel; PMD_INIT_FUNC_TRACE(); @@ -961,6 +973,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) .hash_func = rte_hash_crc, }; + char tunnel_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters tunnel_hash_params = { + .name = tunnel_hash_name, + .entries = I40E_MAX_TUNNEL_FILTER_NUM, + .key_len = sizeof(struct i40e_tunnel_filter_input), + .hash_func = rte_hash_crc, + }; + dev->dev_ops = &i40e_eth_dev_ops; dev->rx_pkt_burst = i40e_recv_pkts; dev->tx_pkt_burst = i40e_xmit_pkts; @@ -1221,8 +1241,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) goto err_ethertype_hash_map_alloc; } + /* Initialize tunnel filter rule list and hash */ + TAILQ_INIT(&tunnel_info->tunnel_list); + snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE, + "tunnel_%s", dev->data->name); + tunnel_info->hash_table = rte_hash_create(&tunnel_hash_params); + if (!tunnel_info->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!"); + ret = -EINVAL; + goto err_tunnel_hash_table_create; + } + tunnel_info->hash_map = rte_zmalloc("i40e_tunnel_hash_map", + sizeof(struct i40e_tunnel_filter *) * + I40E_MAX_TUNNEL_FILTER_NUM, + 0); + if (!tunnel_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for tunnel hash map!"); + ret = -ENOMEM; + goto err_tunnel_hash_map_alloc; + } + return 0; +err_tunnel_hash_map_alloc: + rte_hash_free(tunnel_info->hash_table); +err_tunnel_hash_table_create: + rte_free(ethertype_info->hash_map); err_ethertype_hash_map_alloc: rte_hash_free(ethertype_info->hash_table); err_ethertype_hash_table_create: @@ -1254,9 +1299,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) struct i40e_hw *hw; struct i40e_filter_control_settings settings; struct i40e_ethertype_filter *p_ethertype; + struct i40e_tunnel_filter *p_tunnel; int ret; uint8_t aq_fail = 0; struct i40e_ethertype_info *ethertype_info; + struct i40e_tunnel_info *tunnel_info; PMD_INIT_FUNC_TRACE(); @@ -1267,6 +1314,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); pci_dev = dev->pci_dev; ethertype_info = &pf->ethertype; + tunnel_info = &pf->tunnel; if (hw->adapter_stopped == 0) i40e_dev_close(dev); @@ -1283,6 +1331,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) rte_free(p_ethertype); } + /* Remove all tunnel director rules and hash */ + if (tunnel_info->hash_map) + rte_free(tunnel_info->hash_map); + if (tunnel_info->hash_table) + rte_hash_free(tunnel_info->hash_table); + + while ((p_tunnel = TAILQ_FIRST(&tunnel_info->tunnel_list))) { + TAILQ_REMOVE(&tunnel_info->tunnel_list, p_tunnel, rules); + rte_free(p_tunnel); + } + dev->dev_ops = NULL; dev->rx_pkt_burst = NULL; dev->tx_pkt_burst = NULL; @@ -6491,6 +6550,79 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) } static int +i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data + *cld_filter, + struct i40e_tunnel_filter *tunnel_filter) +{ + ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac, + (struct ether_addr *)&tunnel_filter->input.outer_mac); + ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac, + (struct ether_addr *)&tunnel_filter->input.inner_mac); + tunnel_filter->input.inner_vlan = cld_filter->inner_vlan; + tunnel_filter->input.flags = cld_filter->flags; + tunnel_filter->input.tenant_id = cld_filter->tenant_id; + rte_memcpy(&tunnel_filter->input.ipaddr, &cld_filter->ipaddr, + sizeof(tunnel_filter->input.ipaddr)); + tunnel_filter->queue = cld_filter->queue_number; + + return 0; +} + +static struct i40e_tunnel_filter * +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_info *tunnel_info, + const struct i40e_tunnel_filter_input *input) +{ + int ret = 0; + + ret = rte_hash_lookup(tunnel_info->hash_table, (const void *)input); + if (ret < 0) + return NULL; + + return tunnel_info->hash_map[ret]; +} + +static int +i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter) +{ + struct i40e_tunnel_info *tunnel_info = &pf->tunnel; + int ret = 0; + + ret = rte_hash_add_key(tunnel_info->hash_table, + &tunnel_filter->input); + if (ret < 0) + PMD_DRV_LOG(ERR, + "Failed to insert tunnel filter to hash table %d!", + ret); + tunnel_info->hash_map[ret] = tunnel_filter; + + TAILQ_INSERT_TAIL(&tunnel_info->tunnel_list, tunnel_filter, rules); + + return 0; +} + +static int +i40e_sw_tunnel_filter_del(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter) +{ + struct i40e_tunnel_info *tunnel_info = &pf->tunnel; + int ret = 0; + + ret = rte_hash_del_key(tunnel_info->hash_table, + &tunnel_filter->input); + if (ret < 0) + PMD_DRV_LOG(ERR, + "Failed to delete tunnel filter to hash table %d!", + ret); + tunnel_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&tunnel_info->tunnel_list, tunnel_filter, rules); + rte_free(tunnel_filter); + + return 0; +} + +static int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct rte_eth_tunnel_filter_conf *tunnel_filter, uint8_t add) @@ -6505,6 +6637,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct i40e_vsi *vsi = pf->main_vsi; struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter; struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter; + struct i40e_tunnel_info *tunnel_info = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; cld_filter = rte_zmalloc("tunnel_filter", sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data), @@ -6567,11 +6701,32 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); - if (add) + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + i40e_tunnel_filter_convert(cld_filter, tunnel); + node = i40e_sw_tunnel_filter_lookup(tunnel_info, &tunnel->input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(tunnel); + return -EINVAL; + } else if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(tunnel); + return -EINVAL; + } + + if (add) { ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); - else + if (ret < 0) + return ret; + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + } else { ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - cld_filter, 1); + cld_filter, 1); + if (ret < 0) + return ret; + ret = i40e_sw_tunnel_filter_del(pf, node); + rte_free(tunnel); + } rte_free(cld_filter); return ret; diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 8604198..5f9cddd 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -420,6 +420,40 @@ struct i40e_ethertype_info { struct rte_hash *hash_table; }; +#define I40E_MAX_TUNNEL_FILTER_NUM 400 + +/* Tunnel filter struct */ +struct i40e_tunnel_filter_input { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + } ipaddr; + __le16 flags; + __le32 tenant_id; +}; + +struct i40e_tunnel_filter { + TAILQ_ENTRY(i40e_tunnel_filter) rules; + struct i40e_tunnel_filter_input input; + uint16_t queue; +}; + +TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter); + +struct i40e_tunnel_info { + struct i40e_tunnel_filter_list tunnel_list; + struct i40e_tunnel_filter **hash_map; + struct rte_hash *hash_table; +}; + #define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64 #define I40E_MAX_MIRROR_RULES 64 /* @@ -491,6 +525,7 @@ struct i40e_pf { struct i40e_fdir_info fdir; /* flow director info */ struct i40e_ethertype_info ethertype; /* Ethertype filter info */ + struct i40e_tunnel_info tunnel; /* Tunnel filter info */ struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_mirror_rule_list mirror_list; uint16_t nb_mirror_rule; /* The number of mirror rules */