From patchwork Tue Jun 4 05:42:45 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leyi Rong X-Patchwork-Id: 54322 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A8FD31BCA1; Tue, 4 Jun 2019 07:45:33 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 2E3651BB3D for ; Tue, 4 Jun 2019 07:44:46 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 03 Jun 2019 22:44:45 -0700 X-ExtLoop1: 1 Received: from lrong-srv-03.sh.intel.com ([10.67.119.177]) by fmsmga008.fm.intel.com with ESMTP; 03 Jun 2019 22:44:44 -0700 From: Leyi Rong To: qi.z.zhang@intel.com Cc: dev@dpdk.org, Leyi Rong , Dan Nowlin , Paul M Stillwell Jr Date: Tue, 4 Jun 2019 13:42:45 +0800 Message-Id: <20190604054248.68510-47-leyi.rong@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190604054248.68510-1-leyi.rong@intel.com> References: <20190604054248.68510-1-leyi.rong@intel.com> Subject: [dpdk-dev] [PATCH 46/49] net/ice/base: add vxlan/generic tunnel management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Added routines for handling tunnel management: - ice_tunnel_port_in_use() - ice_tunnel_get_type() - ice_find_free_tunnel_entry() - ice_create_tunnel() - ice_destroy_tunnel() Signed-off-by: Dan Nowlin Signed-off-by: Paul M Stillwell Jr Signed-off-by: Leyi Rong --- drivers/net/ice/base/ice_flex_pipe.c | 228 +++++++++++++++++++++++++++ drivers/net/ice/base/ice_flex_pipe.h | 6 + 2 files changed, 234 insertions(+) diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c index fda5bef43..1c19548c1 100644 --- a/drivers/net/ice/base/ice_flex_pipe.c +++ b/drivers/net/ice/base/ice_flex_pipe.c @@ -1711,6 +1711,234 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) return &bld->buf; } +/** + * ice_tunnel_port_in_use + * @hw: pointer to the HW structure + * @port: port to search for + * @index: optionally returns index + * + * Returns whether a port is already in use as a tunnel, and optionally its + * index + */ +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) +{ + u16 i; + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { + if (index) + *index = i; + return true; + } + + return false; +} + +/** + * ice_tunnel_get_type + * @hw: pointer to the HW structure + * @port: port to search for + * @type: returns tunnel index + * + * For a given port number, will return the type of tunnel. + */ +bool +ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) +{ + u16 i; + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { + *type = hw->tnl.tbl[i].type; + return true; + } + + return false; +} + +/** + * ice_find_free_tunnel_entry + * @hw: pointer to the HW structure + * @type: tunnel type + * @index: optionally returns index + * + * Returns whether there is a free tunnel entry, and optionally its index + */ +static bool +ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, + u16 *index) +{ + u16 i; + + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && + hw->tnl.tbl[i].type == type) { + if (index) + *index = i; + return true; + } + + return false; +} + +/** + * ice_create_tunnel + * @hw: pointer to the HW structure + * @type: type of tunnel + * @port: port to use for vxlan tunnel + * + * Creates a tunnel + */ +enum ice_status +ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) +{ + struct ice_boost_tcam_section *sect_rx, *sect_tx; + enum ice_status status = ICE_ERR_MAX_LIMIT; + struct ice_buf_build *bld; + u16 index; + + if (ice_tunnel_port_in_use(hw, port, NULL)) + return ICE_ERR_ALREADY_EXISTS; + + if (!ice_find_free_tunnel_entry(hw, type, &index)) + return ICE_ERR_OUT_OF_RANGE; + + bld = ice_pkg_buf_alloc(hw); + if (!bld) + return ICE_ERR_NO_MEMORY; + + /* allocate 2 sections, one for RX parser, one for TX parser */ + if (ice_pkg_buf_reserve_section(bld, 2)) + goto ice_create_tunnel_err; + + sect_rx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, + sizeof(*sect_rx)); + if (!sect_rx) + goto ice_create_tunnel_err; + sect_rx->count = CPU_TO_LE16(1); + + sect_tx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + sizeof(*sect_tx)); + if (!sect_tx) + goto ice_create_tunnel_err; + sect_tx->count = CPU_TO_LE16(1); + + /* copy original boost entry to update package buffer */ + ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, + sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); + + /* over-write the never-match dest port key bits with the encoded port + * bits + */ + ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), + (u8 *)&port, NULL, NULL, NULL, + offsetof(struct ice_boost_key_value, hv_dst_port_key), + sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); + + /* exact copy of entry to TX section entry */ + ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), + ICE_NONDMA_TO_NONDMA); + + status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); + if (!status) { + hw->tnl.tbl[index].port = port; + hw->tnl.tbl[index].in_use = true; + } + +ice_create_tunnel_err: + ice_pkg_buf_free(hw, bld); + + return status; +} + +/** + * ice_destroy_tunnel + * @hw: pointer to the HW structure + * @port: port of tunnel to destroy (ignored if the all parameter is true) + * @all: flag that states to destroy all tunnels + * + * Destroys a tunnel or all tunnels by creating an update package buffer + * targeting the specific updates requested and then performing an update + * package. + */ +enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) +{ + struct ice_boost_tcam_section *sect_rx, *sect_tx; + enum ice_status status = ICE_ERR_MAX_LIMIT; + struct ice_buf_build *bld; + u16 count = 0; + u16 size; + u16 i; + + /* determine count */ + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (all || hw->tnl.tbl[i].port == port)) + count++; + + if (!count) + return ICE_ERR_PARAM; + + /* size of section - there is at least one entry */ + size = (count - 1) * sizeof(*sect_rx->tcam) + sizeof(*sect_rx); + + bld = ice_pkg_buf_alloc(hw); + if (!bld) + return ICE_ERR_NO_MEMORY; + + /* allocate 2 sections, one for RX parser, one for TX parser */ + if (ice_pkg_buf_reserve_section(bld, 2)) + goto ice_destroy_tunnel_err; + + sect_rx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, + size); + if (!sect_rx) + goto ice_destroy_tunnel_err; + sect_rx->count = CPU_TO_LE16(1); + + sect_tx = (struct ice_boost_tcam_section *) + ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, + size); + if (!sect_tx) + goto ice_destroy_tunnel_err; + sect_tx->count = CPU_TO_LE16(1); + + /* copy original boost entry to update package buffer, one copy to RX + * section, another copy to the TX section + */ + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && + (all || hw->tnl.tbl[i].port == port)) { + ice_memcpy(sect_rx->tcam + i, + hw->tnl.tbl[i].boost_entry, + sizeof(*sect_rx->tcam), + ICE_NONDMA_TO_NONDMA); + ice_memcpy(sect_tx->tcam + i, + hw->tnl.tbl[i].boost_entry, + sizeof(*sect_tx->tcam), + ICE_NONDMA_TO_NONDMA); + hw->tnl.tbl[i].marked = true; + } + + status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); + if (!status) + for (i = 0; i < hw->tnl.count && + i < ICE_TUNNEL_MAX_ENTRIES; i++) + if (hw->tnl.tbl[i].marked) { + hw->tnl.tbl[i].port = 0; + hw->tnl.tbl[i].in_use = false; + hw->tnl.tbl[i].marked = false; + } + + ice_pkg_buf_free(hw, bld); + +ice_destroy_tunnel_err: + return status; +} + /** * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h index 7142ae7fe..13066808c 100644 --- a/drivers/net/ice/base/ice_flex_pipe.h +++ b/drivers/net/ice/base/ice_flex_pipe.h @@ -33,6 +33,12 @@ ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, enum ice_status ice_get_sw_fv_list(struct ice_hw *hw, u16 *prot_ids, u8 ids_cnt, struct LIST_HEAD_TYPE *fv_list); +enum ice_status +ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); +enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); +bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); +bool +ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type); /* XLT2/VSI group functions */