From patchwork Thu May 18 15:16:37 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qiming Yang X-Patchwork-Id: 127013 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DE33E42AF1; Thu, 18 May 2023 17:36:41 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E839A42F8A; Thu, 18 May 2023 17:34:50 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id E650142DB7 for ; Thu, 18 May 2023 17:34:48 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684424089; x=1715960089; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=sKgGRNyDtUYH8/088YHo8s2gI8KM/3VbHCXWntBZc/w=; b=YVHMhTSoPOEpoBOlyQ2jH+gtUU7Q7kb7t39JNydF76aKodzI5UvKNsYu xQ8duUCXsQi4bEMmskIP2NOKog3Aqf4R0iq/0CnQVwKAHn5YhXUUB6Kd/ lHiuiCDFZ8Cesj+j0vHRmLExgs/DyjD5NezWEacwgTrUmiXJpF6QMtjp1 7PLl3UxvvuV81s4NT0O/ZoQzbl3AVXMGHHSGvxvJE4ZpmIMaNs537cs9p cfffC0F+KESprI05ozXcQCTcDF2Zjm4vMXF9+RWVWW9vxDs5582oCQXl+ qNUPS/Eg+6jhxbyqCfbLOKkIWIvGpXOZ6Hy3HkvXjixR5BR/PUSGxkmy/ g==; X-IronPort-AV: E=McAfee;i="6600,9927,10714"; a="341527814" X-IronPort-AV: E=Sophos;i="5.99,285,1677571200"; d="scan'208";a="341527814" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 May 2023 08:34:48 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10714"; a="705235272" X-IronPort-AV: E=Sophos;i="5.99,285,1677571200"; d="scan'208";a="705235272" Received: from dpdk-qiming3.sh.intel.com ([10.67.111.4]) by fmsmga007.fm.intel.com with ESMTP; 18 May 2023 08:34:47 -0700 From: Qiming Yang To: dev@dpdk.org Cc: qi.z.zhang@intel.com, Qiming Yang , Michal Wilczynski Subject: [PATCH v2 19/20] net/ice/base: offer memory config for schedual node Date: Thu, 18 May 2023 15:16:37 +0000 Message-Id: <20230518151638.1207021-20-qiming.yang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230518151638.1207021-1-qiming.yang@intel.com> References: <20230427062001.478032-1-qiming.yang@intel.com> <20230518151638.1207021-1-qiming.yang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add an option to pre-allocate memory for ice_sched_node struct. Add new arguments to ice_sched_add() and ice_sched_add_elems() that allow for pre-allocation of memory for ice_sched_node struct. Signed-off-by: Michal Wilczynski Signed-off-by: Qiming Yang --- drivers/net/ice/base/ice_common.c | 2 +- drivers/net/ice/base/ice_dcb.c | 2 +- drivers/net/ice/base/ice_sched.c | 24 ++++++++++++++++++------ drivers/net/ice/base/ice_sched.h | 3 ++- 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c index f253e2f213..f7f43cd7e0 100644 --- a/drivers/net/ice/base/ice_common.c +++ b/drivers/net/ice/base/ice_common.c @@ -5210,7 +5210,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, q_ctx->q_teid = LE32_TO_CPU(node.node_teid); /* add a leaf node into scheduler tree queue layer */ - status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); + status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); if (!status) status = ice_sched_replay_q_bw(pi, q_ctx); diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c index 2a308b02bf..cc4e28a702 100644 --- a/drivers/net/ice/base/ice_dcb.c +++ b/drivers/net/ice/base/ice_dcb.c @@ -1624,7 +1624,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, /* new TC */ status = ice_sched_query_elem(pi->hw, teid2, &elem); if (!status) - status = ice_sched_add_node(pi, 1, &elem); + status = ice_sched_add_node(pi, 1, &elem, NULL); if (status) break; /* update the TC number */ diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c index f558eccb93..a4d31647fe 100644 --- a/drivers/net/ice/base/ice_sched.c +++ b/drivers/net/ice/base/ice_sched.c @@ -143,12 +143,14 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, * @pi: port information structure * @layer: Scheduler layer of the node * @info: Scheduler element information from firmware + * @prealloc_node: preallocated ice_sched_node struct for SW DB * * This function inserts a scheduler node to the SW DB. */ enum ice_status ice_sched_add_node(struct ice_port_info *pi, u8 layer, - struct ice_aqc_txsched_elem_data *info) + struct ice_aqc_txsched_elem_data *info, + struct ice_sched_node *prealloc_node) { struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; @@ -176,7 +178,11 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); if (status) return status; - node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); + + if (prealloc_node) + node = prealloc_node; + else + node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); if (!node) return ICE_ERR_NO_MEMORY; if (hw->max_children[layer]) { @@ -901,13 +907,15 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, * @num_nodes: number of nodes * @num_nodes_added: pointer to num nodes added * @first_node_teid: if new nodes are added then return the TEID of first node + * @prealloc_nodes: preallocated nodes struct for software DB * * This function add nodes to HW as well as to SW DB for a given layer */ static enum ice_status ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, - u16 *num_nodes_added, u32 *first_node_teid) + u16 *num_nodes_added, u32 *first_node_teid, + struct ice_sched_node **prealloc_nodes) { struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; @@ -953,7 +961,11 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, *num_nodes_added = num_nodes; /* add nodes to the SW DB */ for (i = 0; i < num_nodes; i++) { - status = ice_sched_add_node(pi, layer, &buf->generic[i]); + if (prealloc_nodes) + status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]); + else + status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL); + if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", status); @@ -1032,7 +1044,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, } return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, - num_nodes_added, first_node_teid); + num_nodes_added, first_node_teid, NULL); } /** @@ -1292,7 +1304,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) ICE_AQC_ELEM_TYPE_ENTRY_POINT) hw->sw_entry_point_layer = j; - status = ice_sched_add_node(pi, j, &buf[i].generic[j]); + status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL); if (status) goto err_init_port; } diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h index c54f5ca9a0..4b68f3f535 100644 --- a/drivers/net/ice/base/ice_sched.h +++ b/drivers/net/ice/base/ice_sched.h @@ -117,7 +117,8 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); /* Add a scheduling node into SW DB for given info */ enum ice_status ice_sched_add_node(struct ice_port_info *pi, u8 layer, - struct ice_aqc_txsched_elem_data *info); + struct ice_aqc_txsched_elem_data *info, + struct ice_sched_node *prealloc_node); void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node *