From patchwork Thu Apr 27 06:19:40 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qiming Yang X-Patchwork-Id: 126576 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5DDAE42A08; Thu, 27 Apr 2023 08:38:59 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EC33242DA2; Thu, 27 Apr 2023 08:38:03 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id C274442D9B for ; Thu, 27 Apr 2023 08:38:01 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682577481; x=1714113481; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=KNPZQuniB3zwzuyBegarf1L5pi8iHlZKxVFPZNuET0c=; b=aEhC4dYkLLQEulCOaOYvkStRQoqCO6Hbk1dBc7GH9CSbMtFt//24eOYM ++X9fScOSP3HV0KAt02csNSrRbSEoqr7DeP55q8n20BLhTE+POWbUYZAu Iy7MJ46xxxl27eFVB/rRmlkcQJ/DWVg0Fbm55jMcYMEq0lel+g22OabPn 4WyYDlPZ1BUhjR2fL5HbKuRWrHAz1btOHJeCLE6ULrOLh4lO8Q8GimyXa T4L2VmRkOHXnNnPTu6A4xksfd5Lo8rD9mDCKN/4XgzhJFRp2bkmGLdCNd fj4WEbOZLO7ZxPRh7R8/BO5lrYClLCMJGBV7Wj+8Cxjs43Wauk6r+ATbM w==; X-IronPort-AV: E=McAfee;i="6600,9927,10692"; a="375324300" X-IronPort-AV: E=Sophos;i="5.99,230,1677571200"; d="scan'208";a="375324300" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Apr 2023 23:38:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10692"; a="805845712" X-IronPort-AV: E=Sophos;i="5.99,230,1677571200"; d="scan'208";a="805845712" Received: from dpdk-qiming3.sh.intel.com ([10.67.111.4]) by fmsmga002.fm.intel.com with ESMTP; 26 Apr 2023 23:37:58 -0700 From: Qiming Yang To: dev@dpdk.org Cc: qi.z.zhang@intel.com, Qiming Yang , Michal Wilczynski Subject: [PATCH 09/30] net/ice/base: add pre-allocate memory argument Date: Thu, 27 Apr 2023 06:19:40 +0000 Message-Id: <20230427062001.478032-10-qiming.yang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230427062001.478032-1-qiming.yang@intel.com> References: <20230427062001.478032-1-qiming.yang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add an option to pre-allocate memory for ice_sched_node struct. Add new arguments to ice_sched_add() and ice_sched_add_elems() that allow for pre-allocation of memory for ice_sched_node struct Signed-off-by: Michal Wilczynski Signed-off-by: Qiming Yang --- drivers/net/ice/base/ice_adminq_cmd.h | 4 +- drivers/net/ice/base/ice_common.c | 2 +- drivers/net/ice/base/ice_dcb.c | 4 +- drivers/net/ice/base/ice_sched.c | 411 ++++++++++++++++++++++++-- drivers/net/ice/base/ice_sched.h | 71 ++++- drivers/net/ice/base/ice_type.h | 3 + 6 files changed, 466 insertions(+), 29 deletions(-) diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h index 5a44ebbdc0..cd4a6ffddf 100644 --- a/drivers/net/ice/base/ice_adminq_cmd.h +++ b/drivers/net/ice/base/ice_adminq_cmd.h @@ -1120,9 +1120,9 @@ struct ice_aqc_txsched_elem { u8 generic; #define ICE_AQC_ELEM_GENERIC_MODE_M 0x1 #define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1 -#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S) +#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S) #define ICE_AQC_ELEM_GENERIC_SP_S 0x4 -#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S) +#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S) #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5 #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \ (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S) diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c index ed811e406d..984830ea37 100644 --- a/drivers/net/ice/base/ice_common.c +++ b/drivers/net/ice/base/ice_common.c @@ -5129,7 +5129,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, q_ctx->q_teid = LE32_TO_CPU(node.node_teid); /* add a leaf node into scheduler tree queue layer */ - status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); + status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); if (!status) status = ice_sched_replay_q_bw(pi, q_ctx); diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c index 2a308b02bf..ca418090bc 100644 --- a/drivers/net/ice/base/ice_dcb.c +++ b/drivers/net/ice/base/ice_dcb.c @@ -1370,7 +1370,7 @@ ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv, ICE_DSCP_SUBTYPE_TCBW); tlv->ouisubtype = HTONL(ouisubtype); - /* First Octet after subtype + /* First Octect after subtype * ---------------------------- * | RSV | CBS | RSV | Max TCs | * | 1b | 1b | 3b | 3b | @@ -1624,7 +1624,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, /* new TC */ status = ice_sched_query_elem(pi->hw, teid2, &elem); if (!status) - status = ice_sched_add_node(pi, 1, &elem); + status = ice_sched_add_node(pi, 1, &elem, NULL); if (status) break; /* update the TC number */ diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c index e3a638dcdd..421a0085d6 100644 --- a/drivers/net/ice/base/ice_sched.c +++ b/drivers/net/ice/base/ice_sched.c @@ -143,12 +143,14 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, * @pi: port information structure * @layer: Scheduler layer of the node * @info: Scheduler element information from firmware + * @prealloc_node: preallocated ice_sched_node struct for SW DB * * This function inserts a scheduler node to the SW DB. */ enum ice_status ice_sched_add_node(struct ice_port_info *pi, u8 layer, - struct ice_aqc_txsched_elem_data *info) + struct ice_aqc_txsched_elem_data *info, + struct ice_sched_node *prealloc_node) { struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; @@ -176,7 +178,11 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); if (status) return status; - node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); + + if (prealloc_node) + node = prealloc_node; + else + node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); if (!node) return ICE_ERR_NO_MEMORY; if (hw->max_children[layer]) { @@ -901,13 +907,15 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, * @num_nodes: number of nodes * @num_nodes_added: pointer to num nodes added * @first_node_teid: if new nodes are added then return the TEID of first node + * @prealloc_nodes: preallocated nodes struct for software DB * * This function add nodes to HW as well as to SW DB for a given layer */ -static enum ice_status +enum ice_status ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, - u16 *num_nodes_added, u32 *first_node_teid) + u16 *num_nodes_added, u32 *first_node_teid, + struct ice_sched_node **prealloc_nodes) { struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; @@ -953,7 +961,11 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, *num_nodes_added = num_nodes; /* add nodes to the SW DB */ for (i = 0; i < num_nodes; i++) { - status = ice_sched_add_node(pi, layer, &buf->generic[i]); + if (prealloc_nodes) + status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]); + else + status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL); + if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", status); @@ -1032,7 +1044,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, } return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, - num_nodes_added, first_node_teid); + num_nodes_added, first_node_teid, NULL); } /** @@ -1156,6 +1168,240 @@ static u8 ice_sched_get_agg_layer(struct ice_hw *hw) return hw->sw_entry_point_layer; } +/** + * ice_sched_set_l2_node_aq_elem - AQ element setup for L2 node creation + * @pi: port information structure + * @elem: admin queue command element + * + * Setup Admin Queue Command element to default values for L2 Tx node creation + */ +static enum ice_status +ice_sched_set_l2_node_aq_elem(struct ice_port_info *pi, + struct ice_aqc_txsched_elem_data *elem) +{ + if (!pi || !pi->root || !elem) + return ICE_ERR_PARAM; + + elem->parent_teid = pi->root->info.node_teid; + elem->data.elem_type = ICE_AQC_ELEM_TYPE_TC; + elem->data.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + elem->data.generic = 0; + elem->data.cir_bw.bw_profile_idx = + CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); + elem->data.cir_bw.bw_alloc = + CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); + elem->data.eir_bw.bw_profile_idx = + CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); + elem->data.eir_bw.bw_alloc = + CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); + + return ICE_SUCCESS; +} + +/** + * ice_sched_add_dflt_l2_nodes - add default L2 TC nodes into Tx tree + * @pi: port information structure + * + * Function creates default L2 nodes configuration. FW provide TC0 node, + * here remaining TCs are added. + */ +enum ice_status ice_sched_add_dflt_l2_nodes(struct ice_port_info *pi) +{ + /* One node is already created by FW */ + const u16 num_nodes = ICE_MAX_CGD_PER_PORT - 1; + u16 i, buf_size, num_groups_added; + struct ice_aqc_add_elem *buf; + struct ice_sched_node *node; + enum ice_status status; + struct ice_hw *hw; + u32 teid; + + if (!pi || !pi->root) + return ICE_ERR_PARAM; + + hw = pi->hw; + + buf_size = ice_struct_size(buf, generic, num_nodes); + buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size); + if (!buf) + return ICE_ERR_NO_MEMORY; + + ice_acquire_lock(&pi->sched_lock); + + buf->hdr.parent_teid = pi->root->info.node_teid; + buf->hdr.num_elems = CPU_TO_LE16(num_nodes); + + for (i = 0; i < num_nodes; i++) { + status = ice_sched_set_l2_node_aq_elem(pi, &buf->generic[i]); + if (status) + goto exit_add_dflt_l2_nodes; + } + + status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, + &num_groups_added, NULL); + if (status != ICE_SUCCESS || num_groups_added != 1) { + ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", + hw->adminq.sq_last_status); + status = ICE_ERR_CFG; + goto exit_add_dflt_l2_nodes; + } + + for (i = 0; i < num_nodes; i++) { + status = ice_sched_add_node(pi, 1, &buf->generic[i], NULL); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", + status); + break; + } + + teid = LE32_TO_CPU(buf->generic[i].node_teid); + node = ice_sched_find_node_by_teid(pi->root, teid); + if (!node) { + ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); + break; + } + node->sibling = NULL; + node->tc_num = i + 1; + } + +exit_add_dflt_l2_nodes: + ice_release_lock(&pi->sched_lock); + ice_free(hw, buf); + return status; +} + +/** + * ice_sched_clear_l2_nodes - remove all L2 TC nodes from port except for default TC0 + * @pi: port information structure + * + * Remove non-default L2 nodes configuration created by SW leaving only one TC0 L2 default node + */ +enum ice_status ice_sched_clear_l2_nodes(struct ice_port_info *pi) +{ + enum ice_status status = ICE_SUCCESS; + u32 teid; + u8 i; + + if (!pi || !pi->root) + return ICE_ERR_PARAM; + + ice_acquire_lock(&pi->sched_lock); + + /* iterate backwards and do not remove child at index 0 */ + for (i = pi->root->num_children - 1; i; i--) { + struct ice_sched_node *node = pi->root->children[i]; + + teid = LE32_TO_CPU(node->info.node_teid); + ice_free_sched_node(pi, node); + /* ice_free_sched_node does not remove L2 nodes from HW, removing explicitly */ + status = ice_sched_remove_elems(pi->hw, pi->root, 1, &teid); + if (status) + break; + } + + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_sched_set_dflt_cgd_to_tc_map - setup default CGD to TC mapping + * @pi: port information structure + * + * Function creates default CGD to L2 nodes mapping + */ +enum ice_status ice_sched_set_dflt_cgd_to_tc_map(struct ice_port_info *pi) +{ + struct ice_aqc_cfg_l2_node_cgd_elem *buf; + struct ice_sched_node *root; + enum ice_status status; + u16 i, buf_size; + u8 cgd; + + if (!pi || !pi->root) + return ICE_ERR_PARAM; + + buf_size = sizeof(*buf) * ICE_MAX_CGD_PER_PORT; + buf = (struct ice_aqc_cfg_l2_node_cgd_elem *) + ice_malloc(pi->hw, buf_size); + if (!buf) + return ICE_ERR_NO_MEMORY; + + ice_acquire_lock(&pi->sched_lock); + root = pi->root; + + for (i = 0; i < root->num_children; i++) { + buf[i].node_teid = root->children[i]->info.node_teid; + cgd = i + pi->lport * ICE_MAX_CGD_PER_PORT; + buf[i].cgd = cgd; + root->children[i]->cgd = cgd; + } + + status = ice_aq_cfg_l2_node_cgd(pi->hw, root->num_children, buf, + buf_size, NULL); + + ice_release_lock(&pi->sched_lock); + ice_free(pi->hw, buf); + return status; +} + +/** + * ice_sched_copy_cgd - copy congestion domain mapping between ports + * @src: pointer to source port_info struct + * @dst: pointer to destination port_info struct + * @num_cgd: CGD count + * + * Copy first num_cgd congestion domain to TC node mappings from src port to dst port. + * Src port mapping does not change. + */ +enum ice_status +ice_sched_copy_cgd(struct ice_port_info *src, struct ice_port_info *dst, u8 num_cgd) +{ + struct ice_aqc_cfg_l2_node_cgd_elem *buf = NULL; + enum ice_status status; + u16 buf_size; + u8 cgd, i; + + if (!src || !dst || !num_cgd) + return ICE_ERR_PARAM; + + ice_acquire_lock(&src->sched_lock); + ice_acquire_lock(&dst->sched_lock); + + if (!src->root || src->root->num_children < num_cgd || + !dst->root || dst->root->num_children < num_cgd) { + status = ICE_ERR_PARAM; + goto err_copy_cgd; + } + + buf_size = sizeof(*buf) * num_cgd; + buf = (struct ice_aqc_cfg_l2_node_cgd_elem *)ice_malloc(src->hw, buf_size); + + if (!buf) { + status = ICE_ERR_NO_MEMORY; + goto err_copy_cgd; + } + + for (i = 0; i < num_cgd; i++) { + buf[i].node_teid = dst->root->children[i]->info.node_teid; + cgd = src->root->children[i]->cgd; + buf[i].cgd = cgd; + dst->root->children[i]->cgd = cgd; + } + + status = ice_aq_cfg_l2_node_cgd(src->hw, num_cgd, buf, buf_size, NULL); + +err_copy_cgd: + ice_release_lock(&dst->sched_lock); + ice_release_lock(&src->sched_lock); + + if (buf) + ice_free(src->hw, buf); + + return status; +} + /** * ice_rm_dflt_leaf_node - remove the default leaf node in the tree * @pi: port information structure @@ -1292,7 +1538,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) ICE_AQC_ELEM_TYPE_ENTRY_POINT) hw->sw_entry_point_layer = j; - status = ice_sched_add_node(pi, j, &buf[i].generic[j]); + status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL); if (status) goto err_init_port; } @@ -1417,11 +1663,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; -#define PSM_CLK_SRC_367_MHZ 0x0 -#define PSM_CLK_SRC_416_MHZ 0x1 -#define PSM_CLK_SRC_446_MHZ 0x2 -#define PSM_CLK_SRC_390_MHZ 0x3 - switch (clk_src) { case PSM_CLK_SRC_367_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; @@ -1435,11 +1676,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) case PSM_CLK_SRC_390_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; break; - default: - ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", - clk_src); - /* fall back to a safe default */ - hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; + + /* default condition is not required as clk_src is restricted + * to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask. + * The above switch statements cover the possible values of + * this variable. + */ } } @@ -2267,7 +2509,7 @@ ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, * This function removes the child from the old parent and adds it to a new * parent */ -static void +void ice_sched_update_parent(struct ice_sched_node *new_parent, struct ice_sched_node *node) { @@ -2301,7 +2543,7 @@ ice_sched_update_parent(struct ice_sched_node *new_parent, * * This function move the child nodes to a given parent. */ -static enum ice_status +enum ice_status ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { @@ -4372,7 +4614,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile * ID from local database. The caller needs to hold scheduler lock. */ -static enum ice_status +enum ice_status ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { @@ -4408,6 +4650,58 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, ICE_AQC_RL_PROFILE_TYPE_M, old_id); } +/** + * ice_sched_set_node_priority - set node's priority + * @pi: port information structure + * @node: tree node + * @priority: number 0-7 representing priority among siblings + * + * This function sets priority of a node among it's siblings. + */ +enum ice_status +ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, + u16 priority) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + + buf = node->info; + data = &buf.data; + + data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; + data->generic |= ICE_AQC_ELEM_GENERIC_PRIO_M & + (priority << ICE_AQC_ELEM_GENERIC_PRIO_S); + + return ice_sched_update_elem(pi->hw, node, &buf); +} + +/** + * ice_sched_set_node_weight - set node's weight + * @pi: port information structure + * @node: tree node + * @weight: number 1-200 representing weight for WFQ + * + * This function sets weight of the node for WFQ algorithm. + */ +enum ice_status +ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + + buf = node->info; + data = &buf.data; + + data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR | + ICE_AQC_ELEM_VALID_GENERIC; + data->cir_bw.bw_alloc = CPU_TO_LE16(weight); + data->eir_bw.bw_alloc = CPU_TO_LE16(weight); + data->generic |= ICE_AQC_ELEM_GENERIC_SP_M & + (0x0 << ICE_AQC_ELEM_GENERIC_SP_S); + + return ice_sched_update_elem(pi->hw, node, &buf); +} + /** * ice_sched_set_node_bw_lmt - set node's BW limit * @pi: port information structure @@ -4421,7 +4715,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, * NOTE: Caller provides the correct SRL node in case of shared profile * settings. */ -static enum ice_status +enum ice_status ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw) { @@ -4444,6 +4738,81 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num); } +/** + * ice_sched_save_root_node_bw - save root node BW limit + * @pi: port information structure + * @rl_type: min or max + * @bw: bandwidth in Kbps + * + * This function saves the modified values of bandwidth settings for later + * replay purpose (restore) after tree recreation. + */ +static enum ice_status +ice_sched_save_root_node_bw(struct ice_port_info *pi, + enum ice_rl_type rl_type, u32 bw) +{ + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw(&pi->root_node_bw_t_info, bw); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw(&pi->root_node_bw_t_info, bw); + break; + case ICE_SHARED_BW: + ice_set_clear_shared_bw(&pi->root_node_bw_t_info, bw); + break; + default: + return ICE_ERR_PARAM; + } + return ICE_SUCCESS; +} + +/** + * ice_sched_set_root_node_bw_lmt - set root node's BW limit + * @pi: port information structure + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps + * + * It updates root node's BW limit parameters like BW RL profile ID of type + * CIR, EIR, or SRL. + */ +static enum ice_status +ice_sched_set_root_node_bw_lmt(struct ice_port_info *pi, + enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status = ICE_ERR_PARAM; + + ice_acquire_lock(&pi->sched_lock); + if (!pi->root) + goto exit_set_root_node_bw; + + status = ice_sched_set_node_bw_lmt(pi, pi->root, rl_type, bw); + if (!status) + status = ice_sched_save_root_node_bw(pi, rl_type, bw); + +exit_set_root_node_bw: + ice_release_lock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_root_node_bw_lmt - configure the root BW + * @pi: port information structure + * @bw: bandwidth in Kbps - Kilo bits per sec + * @rl_type: rate limit type min, max, or shared + * + * This function configure the root node CIR, EIR or SRL BW limit + */ +enum ice_status +ice_cfg_root_node_bw_lmt(struct ice_port_info *pi, u32 bw, + enum ice_rl_type rl_type) +{ + if (!pi->root) + return ICE_ERR_PARAM; + + return ice_sched_set_root_node_bw_lmt(pi, rl_type, bw); +} + /** * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default * @pi: port information structure diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h index a71619ebf0..d7a548e0c4 100644 --- a/drivers/net/ice/base/ice_sched.h +++ b/drivers/net/ice/base/ice_sched.h @@ -7,6 +7,8 @@ #include "ice_common.h" +#define SCHED_NODE_NAME_MAX_LEN 32 + #define ICE_SCHED_5_LAYERS 5 #define ICE_SCHED_9_LAYERS 9 @@ -38,6 +40,31 @@ #define ICE_PSM_CLK_446MHZ_IN_HZ 446428571 #define ICE_PSM_CLK_390MHZ_IN_HZ 390625000 +/* bit definitions per recipe */ +#define ICE_RECIPE_BIT_INCL_IPG_AND_PREAMBLE BIT(4) +#define ICE_RECIPE_BIT_INCL_OFFSET BIT(3) +#define ICE_RECIPE_BIT_INCL_ESP_TRAILER BIT(2) +#define ICE_RECIPE_BIT_INCL_L2_PADDING BIT(1) +#define ICE_RECIPE_BIT_INCL_CRC BIT(0) + +/* protocol IDs from factory parsing program */ +#define ICE_PROT_ID_MAC_OUTER_1 0x01 +#define ICE_PROT_ID_MAC_OUTER_2 0x02 +#define ICE_PROT_ID_MAC_INNER_LAST 0x04 +#define ICE_PROT_ID_IPV4_OUTER_1 0x20 +#define ICE_PROT_ID_IPV4_INNER_LAST 0x21 +#define ICE_PROT_ID_IPV6_OUTER_1 0x28 +#define ICE_PROT_ID_IPV6_INNER_LAST 0x29 + +/* Packet adjustment profile ID */ +#define ICE_ADJ_PROFILE_ID 0 +#define ICE_DWORDS_PER_ADJ 8 + +#define PSM_CLK_SRC_367_MHZ 0x0 +#define PSM_CLK_SRC_416_MHZ 0x1 +#define PSM_CLK_SRC_446_MHZ 0x2 +#define PSM_CLK_SRC_390_MHZ 0x3 + struct rl_profile_params { u32 bw; /* in Kbps */ u16 rl_multiplier; @@ -96,7 +123,38 @@ enum ice_status ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd); + +enum ice_status +ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, + enum ice_rl_type rl_type, u32 bw); + +enum ice_status +ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, + enum ice_rl_type rl_type, u32 bw, u8 layer_num); + +enum ice_status +ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, u16 num_nodes, + u16 *num_nodes_added, u32 *first_node_teid, + struct ice_sched_node **prealloc_node); + +enum ice_status +ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, + u16 num_items, u32 *list); + +enum ice_status +ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, + u16 priority); +enum ice_status +ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, + u16 weight); + enum ice_status ice_sched_init_port(struct ice_port_info *pi); +enum ice_status ice_sched_add_dflt_l2_nodes(struct ice_port_info *pi); +enum ice_status ice_sched_clear_l2_nodes(struct ice_port_info *pi); +enum ice_status ice_sched_set_dflt_cgd_to_tc_map(struct ice_port_info *pi); +enum ice_status +ice_sched_copy_cgd(struct ice_port_info *src, struct ice_port_info *dst, u8 num_cgd); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); void ice_sched_get_psm_clk_freq(struct ice_hw *hw); @@ -112,7 +170,11 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); /* Add a scheduling node into SW DB for given info */ enum ice_status ice_sched_add_node(struct ice_port_info *pi, u8 layer, - struct ice_aqc_txsched_elem_data *info); + struct ice_aqc_txsched_elem_data *info, + struct ice_sched_node *prealloc_node); +void +ice_sched_update_parent(struct ice_sched_node *new_parent, + struct ice_sched_node *node); void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node * @@ -221,6 +283,9 @@ enum ice_status ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, struct ice_sched_node *node, u8 priority); enum ice_status +ice_cfg_root_node_bw_lmt(struct ice_port_info *pi, u32 bw, + enum ice_rl_type rl_type); +enum ice_status ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc); enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); @@ -229,7 +294,7 @@ void ice_sched_replay_agg(struct ice_hw *hw); enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi); enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi); -enum ice_status -ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); +enum ice_status ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); +void ice_cfg_pkt_len_adj_profiles(struct ice_hw *hw); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h index 17383ae23f..ec4892179a 100644 --- a/drivers/net/ice/base/ice_type.h +++ b/drivers/net/ice/base/ice_type.h @@ -992,9 +992,11 @@ struct ice_sched_node { u8 num_children; u8 tc_num; u8 owner; + u8 cgd; /* Valid only for Layer 2 */ #define ICE_SCHED_NODE_OWNER_LAN 0 #define ICE_SCHED_NODE_OWNER_AE 1 #define ICE_SCHED_NODE_OWNER_RDMA 2 +#define ICE_MAX_CGD_PER_PORT 4 }; /* Access Macros for Tx Sched Elements data */ @@ -1213,6 +1215,7 @@ struct ice_port_info { struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS]; struct ice_qos_cfg qos_cfg; u8 is_vf:1; + u8 is_custom_tx_enabled:1; }; struct ice_switch_info {