From patchwork Sat Oct 11 05:55:32 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jijiang Liu X-Patchwork-Id: 775 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id E93E08155; Sat, 11 Oct 2014 07:48:28 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 24996812F for ; Sat, 11 Oct 2014 07:48:25 +0200 (CEST) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga101.fm.intel.com with ESMTP; 10 Oct 2014 22:55:54 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,862,1389772800"; d="scan'208";a="398608029" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by FMSMGA003.fm.intel.com with ESMTP; 10 Oct 2014 22:48:56 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id s9B5tqq2019300; Sat, 11 Oct 2014 13:55:52 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s9B5tnC4022605; Sat, 11 Oct 2014 13:55:52 +0800 Received: (from jijiangl@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s9B5tnaF022601; Sat, 11 Oct 2014 13:55:49 +0800 From: Jijiang Liu To: dev@dpdk.org Date: Sat, 11 Oct 2014 13:55:32 +0800 Message-Id: <1413006935-22535-6-git-send-email-jijiang.liu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1413006935-22535-1-git-send-email-jijiang.liu@intel.com> References: <1413006935-22535-1-git-send-email-jijiang.liu@intel.com> Subject: [dpdk-dev] [PATCH v5 5/8]i40e:implement API of VxLAN packet filter in librte_pmd_i40e X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The implementation of VxLAN tunnel filter in librte_pmd_i40e, which include - add the i40e_dev_filter_ctrl() function. - add the i40e_dev_tunnel_filter_set() function. Signed-off-by: Jijiang Liu Acked-by: Helin Zhang Acked-by: Jingjing Wu Acked-by: Jing Chen --- lib/librte_pmd_i40e/i40e_ethdev.c | 221 ++++++++++++++++++++++++++++++++++++- 1 files changed, 220 insertions(+), 1 deletions(-) diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c index ed5938c..f8160eb 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.c +++ b/lib/librte_pmd_i40e/i40e_ethdev.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "i40e_logs.h" #include "i40e/i40e_register_x710_int.h" @@ -211,8 +212,15 @@ static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel, uint8_t count); +static int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *tunnel_filter, + uint8_t add); static int i40e_pf_config_vxlan(struct i40e_pf *pf); - +static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static void i40e_hw_init(struct i40e_hw *hw); /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1]; @@ -266,6 +274,7 @@ static struct eth_dev_ops i40e_eth_dev_ops = { .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, .udp_tunnel_add = i40e_dev_udp_tunnel_add, .udp_tunnel_del = i40e_dev_udp_tunnel_del, + .filter_ctrl = i40e_dev_filter_ctrl, }; static struct eth_driver rte_i40e_pmd = { @@ -395,6 +404,9 @@ eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv, /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); + /* Initialize the hardware */ + i40e_hw_init(hw); + /* Reset here to make sure all is clean for each PF */ ret = i40e_pf_reset(hw); if (ret) { @@ -4122,6 +4134,110 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, } static int +i40e_dev_get_filter_type(enum rte_tunnel_filter_type filter_type, + uint16_t *flag) +{ + switch (filter_type) { + case RTE_TUNNEL_FILTER_IMAC_IVLAN: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; + break; + case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; + break; + case RTE_TUNNEL_FILTER_IMAC_TENID: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID; + break; + case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC: + *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC; + break; + case RTE_TUNNEL_FILTER_IMAC: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC; + break; + default: + PMD_DRV_LOG(ERR, "invalid tunnel filter type\n"); + return -EINVAL; + } + + return 0; +} + +static int +i40e_dev_tunnel_filter_set(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint8_t tun_type = 0; + int ret = 0; + int val; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter; + struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter; + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data), + 0); + + if (NULL == cld_filter) { + PMD_DRV_LOG(ERR, "Failed to alloc memory.\n"); + return -EINVAL; + } + pfilter = cld_filter; + + (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac, + sizeof(struct ether_addr)); + (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac, + sizeof(struct ether_addr)); + + pfilter->inner_vlan = tunnel_filter->inner_vlan; + if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + (void)rte_memcpy(&pfilter->ipaddr.v4.data, + &tunnel_filter->ip_addr, + sizeof(pfilter->ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + (void)rte_memcpy(&pfilter->ipaddr.v6.data, + &tunnel_filter->ip_addr, + sizeof(pfilter->ipaddr.v6.data)); + } + + /* check tunnel type */ + switch (tunnel_filter->tunnel_type) { + case RTE_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported.\n"); + rte_free(cld_filter); + return -EINVAL; + } + + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + + pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type | + (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); + pfilter->tenant_id = tunnel_filter->tenant_id; + pfilter->queue_number = tunnel_filter->queue_id; + + if (add) + ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); + else + ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + cld_filter, 1); + + rte_free(cld_filter); + return ret; +} + +static int i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) { uint8_t i; @@ -4312,6 +4428,72 @@ i40e_pf_config_vxlan(struct i40e_pf *pf) } static int +i40e_tunnel_filter_param_check(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *filter) +{ + if (pf == NULL || filter == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameter\n"); + return -EINVAL; + } + + if (filter->queue_id >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID\n"); + return -EINVAL; + } + + if (filter->inner_vlan > ETHER_MAX_VLAN_ID) { + PMD_DRV_LOG(ERR, "Invalid inner VLAN ID\n"); + return -EINVAL; + } + + if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && + (is_zero_ether_addr(filter->outer_mac))) { + PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address\n"); + return -EINVAL; + } + + if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && + (is_zero_ether_addr(filter->inner_mac))) { + PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address\n"); + return -EINVAL; + } + + return 0; +} + +static int +i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) +{ + struct rte_eth_tunnel_filter_conf *filter; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = I40E_SUCCESS; + + filter = (struct rte_eth_tunnel_filter_conf *)(arg); + + if (i40e_tunnel_filter_param_check(pf, filter) < 0) + return I40E_ERR_PARAM; + + switch (filter_op) { + case RTE_ETH_FILTER_OP_NONE: + if (!(pf->flags & I40E_FLAG_VXLAN)) + ret = I40E_NOT_SUPPORTED; + case RTE_ETH_FILTER_OP_ADD: + ret = i40e_dev_tunnel_filter_set(pf, filter, 1); + break; + case RTE_ETH_FILTER_OP_DELETE: + ret = i40e_dev_tunnel_filter_set(pf, filter, 0); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u\n", filter_op); + ret = I40E_ERR_PARAM; + break; + } + + return ret; +} + +static int i40e_pf_config_mq_rx(struct i40e_pf *pf) { if (!pf->dev_data->sriov.active) { @@ -4327,3 +4509,40 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf) return 0; } + +static int +i40e_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = I40E_NOT_SUPPORTED; + + switch (filter_type) { + case RTE_ETH_FILTER_HASH: + break; + case RTE_ETH_FILTER_FDIR: + break; + case RTE_ETH_FILTER_TUNNEL: + ret = i40e_tunnel_filter_handle(dev, filter_op, arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + break; + } + + return ret; +} + +/* + * As global registers wouldn't be reset unless a global hardware reset, + * hardware initialization is needed to put those registers into an + * expected initial state. + */ +static void +i40e_hw_init(struct i40e_hw *hw) +{ + /* clear the PF Queue Filter control register */ + I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0); +}