From patchwork Mon Jan 8 20:21:56 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 135800 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1633E43864; Mon, 8 Jan 2024 12:54:30 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7190440691; Mon, 8 Jan 2024 12:54:26 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [192.198.163.9]) by mails.dpdk.org (Postfix) with ESMTP id EE39040261 for ; Mon, 8 Jan 2024 12:54:24 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1704714865; x=1736250865; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=82GGFXknY2Z40xcEzzMNrnH8jwchBumTPcpnldq9YG8=; b=ddZ4ztprmSnShoZk2P/PuCSFi9eFrCTeUDyPM39R7DCKD5FMQdlsLUTl QlargY9ciEw/5KtujDp8fb05MTpRmAsZ2N0yp76q/CXxd7SkNAHNsJ0Y0 n77i378Yq8ROeVg4M5MSzQ3BjzRLVRZncw7GlRcljQ8Z2qJAafjkNPWtg 3/e2XZSJEPWel8FnZ3ymxdi9of+GJIMzAInRGdo27YNvwm2T+jmDTvaBq eCAj3/9fJTVmRAwOFbbJFzHScj85ylikRQ0wtwLExDQFcYDLQyO9v0yRF lYLsOOdmYbEWE4/Q002fTGPkyqGBKu12uqqX/BkfjV/APpfgoOKWvY1GE w==; X-IronPort-AV: E=McAfee;i="6600,9927,10946"; a="4650116" X-IronPort-AV: E=Sophos;i="6.04,341,1695711600"; d="scan'208";a="4650116" Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmvoesa103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Jan 2024 03:54:24 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10946"; a="851784651" X-IronPort-AV: E=Sophos;i="6.04,341,1695711600"; d="scan'208";a="851784651" Received: from dpdk-qzhan15-test02.sh.intel.com ([10.67.119.16]) by fmsmga004.fm.intel.com with ESMTP; 08 Jan 2024 03:54:23 -0800 From: Qi Zhang To: qiming.yang@intel.com, wenjun1.wu@intel.com Cc: dev@dpdk.org, Qi Zhang Subject: [PATCH v4 1/3] net/ice: hide port and TC layer in Tx sched tree Date: Mon, 8 Jan 2024 15:21:56 -0500 Message-Id: <20240108202158.567910-2-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.31.1 In-Reply-To: <20240108202158.567910-1-qi.z.zhang@intel.com> References: <20240105211237.394105-1-qi.z.zhang@intel.com> <20240108202158.567910-1-qi.z.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org In currently 5 layer tree implementation, the port and tc layer is not configurable, so its not necessary to expose them to application. The patch hides the top 2 layers and represented the root of the tree at VSI layer. From application's point of view, its a 3 layer scheduler tree: Port -> Queue Group -> Queue. Signed-off-by: Qi Zhang Acked-by: Wenjun Wu --- drivers/net/ice/ice_ethdev.h | 7 ---- drivers/net/ice/ice_tm.c | 79 ++++-------------------------------- 2 files changed, 7 insertions(+), 79 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index fa4981ed14..ae22c29ffc 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -470,7 +470,6 @@ struct ice_tm_shaper_profile { struct ice_tm_node { TAILQ_ENTRY(ice_tm_node) node; uint32_t id; - uint32_t tc; uint32_t priority; uint32_t weight; uint32_t reference_count; @@ -484,8 +483,6 @@ struct ice_tm_node { /* node type of Traffic Manager */ enum ice_tm_node_type { ICE_TM_NODE_TYPE_PORT, - ICE_TM_NODE_TYPE_TC, - ICE_TM_NODE_TYPE_VSI, ICE_TM_NODE_TYPE_QGROUP, ICE_TM_NODE_TYPE_QUEUE, ICE_TM_NODE_TYPE_MAX, @@ -495,12 +492,8 @@ enum ice_tm_node_type { struct ice_tm_conf { struct ice_shaper_profile_list shaper_profile_list; struct ice_tm_node *root; /* root node - port */ - struct ice_tm_node_list tc_list; /* node list for all the TCs */ - struct ice_tm_node_list vsi_list; /* node list for all the VSIs */ struct ice_tm_node_list qgroup_list; /* node list for all the queue groups */ struct ice_tm_node_list queue_list; /* node list for all the queues */ - uint32_t nb_tc_node; - uint32_t nb_vsi_node; uint32_t nb_qgroup_node; uint32_t nb_queue_node; bool committed; diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c index c00ecb6a97..d67783c77e 100644 --- a/drivers/net/ice/ice_tm.c +++ b/drivers/net/ice/ice_tm.c @@ -43,12 +43,8 @@ ice_tm_conf_init(struct rte_eth_dev *dev) /* initialize node configuration */ TAILQ_INIT(&pf->tm_conf.shaper_profile_list); pf->tm_conf.root = NULL; - TAILQ_INIT(&pf->tm_conf.tc_list); - TAILQ_INIT(&pf->tm_conf.vsi_list); TAILQ_INIT(&pf->tm_conf.qgroup_list); TAILQ_INIT(&pf->tm_conf.queue_list); - pf->tm_conf.nb_tc_node = 0; - pf->tm_conf.nb_vsi_node = 0; pf->tm_conf.nb_qgroup_node = 0; pf->tm_conf.nb_queue_node = 0; pf->tm_conf.committed = false; @@ -79,16 +75,6 @@ ice_tm_conf_uninit(struct rte_eth_dev *dev) rte_free(tm_node); } pf->tm_conf.nb_qgroup_node = 0; - while ((tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list))) { - TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node); - rte_free(tm_node); - } - pf->tm_conf.nb_vsi_node = 0; - while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) { - TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node); - rte_free(tm_node); - } - pf->tm_conf.nb_tc_node = 0; if (pf->tm_conf.root) { rte_free(pf->tm_conf.root); pf->tm_conf.root = NULL; @@ -100,8 +86,6 @@ ice_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, enum ice_tm_node_type *node_type) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct ice_tm_node_list *tc_list = &pf->tm_conf.tc_list; - struct ice_tm_node_list *vsi_list = &pf->tm_conf.vsi_list; struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list; struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list; struct ice_tm_node *tm_node; @@ -111,20 +95,6 @@ ice_tm_node_search(struct rte_eth_dev *dev, return pf->tm_conf.root; } - TAILQ_FOREACH(tm_node, tc_list, node) { - if (tm_node->id == node_id) { - *node_type = ICE_TM_NODE_TYPE_TC; - return tm_node; - } - } - - TAILQ_FOREACH(tm_node, vsi_list, node) { - if (tm_node->id == node_id) { - *node_type = ICE_TM_NODE_TYPE_VSI; - return tm_node; - } - } - TAILQ_FOREACH(tm_node, qgroup_list, node) { if (tm_node->id == node_id) { *node_type = ICE_TM_NODE_TYPE_QGROUP; @@ -378,6 +348,8 @@ ice_shaper_profile_del(struct rte_eth_dev *dev, return 0; } +#define MAX_QUEUE_PER_GROUP 8 + static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, uint32_t parent_node_id, uint32_t priority, @@ -391,8 +363,6 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, struct ice_tm_shaper_profile *shaper_profile = NULL; struct ice_tm_node *tm_node; struct ice_tm_node *parent_node; - uint16_t tc_nb = 1; - uint16_t vsi_nb = 1; int ret; if (!params || !error) @@ -447,6 +417,7 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, tm_node->id = node_id; tm_node->parent = NULL; tm_node->reference_count = 0; + tm_node->shaper_profile = shaper_profile; tm_node->children = (struct ice_tm_node **) rte_calloc(NULL, 256, (sizeof(struct ice_tm_node *)), 0); rte_memcpy(&tm_node->params, params, @@ -455,7 +426,6 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, return 0; } - /* TC or queue node */ /* check the parent node */ parent_node = ice_tm_node_search(dev, parent_node_id, &parent_node_type); @@ -465,8 +435,6 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, return -EINVAL; } if (parent_node_type != ICE_TM_NODE_TYPE_PORT && - parent_node_type != ICE_TM_NODE_TYPE_TC && - parent_node_type != ICE_TM_NODE_TYPE_VSI && parent_node_type != ICE_TM_NODE_TYPE_QGROUP) { error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; error->message = "parent is not valid"; @@ -482,20 +450,6 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, /* check the node number */ if (parent_node_type == ICE_TM_NODE_TYPE_PORT) { - /* check the TC number */ - if (pf->tm_conf.nb_tc_node >= tc_nb) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "too many TCs"; - return -EINVAL; - } - } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) { - /* check the VSI number */ - if (pf->tm_conf.nb_vsi_node >= vsi_nb) { - error->type = RTE_TM_ERROR_TYPE_NODE_ID; - error->message = "too many VSIs"; - return -EINVAL; - } - } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) { /* check the queue group number */ if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) { error->type = RTE_TM_ERROR_TYPE_NODE_ID; @@ -504,7 +458,7 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, } } else { /* check the queue number */ - if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) { + if (parent_node->reference_count >= MAX_QUEUE_PER_GROUP) { error->type = RTE_TM_ERROR_TYPE_NODE_ID; error->message = "too many queues"; return -EINVAL; @@ -516,7 +470,6 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, } } - /* add the TC or VSI or queue group or queue node */ tm_node = rte_zmalloc("ice_tm_node", sizeof(struct ice_tm_node), 0); @@ -545,24 +498,12 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id, rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params)); if (parent_node_type == ICE_TM_NODE_TYPE_PORT) { - TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list, - tm_node, node); - tm_node->tc = pf->tm_conf.nb_tc_node; - pf->tm_conf.nb_tc_node++; - } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) { - TAILQ_INSERT_TAIL(&pf->tm_conf.vsi_list, - tm_node, node); - tm_node->tc = parent_node->tc; - pf->tm_conf.nb_vsi_node++; - } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) { TAILQ_INSERT_TAIL(&pf->tm_conf.qgroup_list, tm_node, node); - tm_node->tc = parent_node->parent->tc; pf->tm_conf.nb_qgroup_node++; } else { TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list, tm_node, node); - tm_node->tc = parent_node->parent->parent->tc; pf->tm_conf.nb_queue_node++; } tm_node->parent->reference_count++; @@ -610,15 +551,9 @@ ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id, return 0; } - /* TC or VSI or queue group or queue node */ + /* queue group or queue node */ tm_node->parent->reference_count--; - if (node_type == ICE_TM_NODE_TYPE_TC) { - TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node); - pf->tm_conf.nb_tc_node--; - } else if (node_type == ICE_TM_NODE_TYPE_VSI) { - TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node); - pf->tm_conf.nb_vsi_node--; - } else if (node_type == ICE_TM_NODE_TYPE_QGROUP) { + if (node_type == ICE_TM_NODE_TYPE_QGROUP) { TAILQ_REMOVE(&pf->tm_conf.qgroup_list, tm_node, node); pf->tm_conf.nb_qgroup_node--; } else { @@ -884,7 +819,7 @@ int ice_do_hierarchy_commit(struct rte_eth_dev *dev, /* config vsi node */ vsi_node = ice_get_vsi_node(hw); - tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list); + tm_node = pf->tm_conf.root; ret_val = ice_set_node_rate(hw, tm_node, vsi_node); if (ret_val) {