@@ -60,6 +60,11 @@ New Features
* Added Tx QoS queue rate limitation support.
* Added quanta size configuration support.
+* **Updated Intel ice driver.**
+
+ * Added Tx QoS rate limitation and priority configuration support for queue and queue group.
+ * Added TX QoS queue weight configuration support.
+
Removed Items
-------------
@@ -205,6 +205,18 @@ static const struct rte_pci_id pci_id_ice_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
+static int
+ice_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &ice_tm_ops;
+
+ return 0;
+}
+
static const struct eth_dev_ops ice_eth_dev_ops = {
.dev_configure = ice_dev_configure,
.dev_start = ice_dev_start,
@@ -267,6 +279,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
.timesync_read_time = ice_timesync_read_time,
.timesync_write_time = ice_timesync_write_time,
.timesync_disable = ice_timesync_disable,
+ .tm_ops_get = ice_tm_ops_get,
};
/* store statistics names and its offset in stats structure */
@@ -2328,6 +2341,9 @@ ice_dev_init(struct rte_eth_dev *dev)
/* Initialize RSS context for gtpu_eh */
ice_rss_ctx_init(pf);
+ /* Initialize TM configuration */
+ ice_tm_conf_init(dev);
+
if (!ad->is_safe_mode) {
ret = ice_flow_init(ad);
if (ret) {
@@ -2508,6 +2524,9 @@ ice_dev_close(struct rte_eth_dev *dev)
rte_free(pf->proto_xtr);
pf->proto_xtr = NULL;
+ /* Uninit TM configuration */
+ ice_tm_conf_uninit(dev);
+
if (ad->devargs.pps_out_ena) {
ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
@@ -9,10 +9,12 @@
#include <rte_time.h>
#include <ethdev_driver.h>
+#include <rte_tm_driver.h>
#include "base/ice_common.h"
#include "base/ice_adminq_cmd.h"
#include "base/ice_flow.h"
+#include "base/ice_sched.h"
#define ICE_ADMINQ_LEN 32
#define ICE_SBIOQ_LEN 32
@@ -453,6 +455,48 @@ struct ice_acl_info {
uint64_t hw_entry_id[MAX_ACL_NORMAL_ENTRIES];
};
+TAILQ_HEAD(ice_shaper_profile_list, ice_tm_shaper_profile);
+TAILQ_HEAD(ice_tm_node_list, ice_tm_node);
+
+struct ice_tm_shaper_profile {
+ TAILQ_ENTRY(ice_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct ice_tm_node {
+ TAILQ_ENTRY(ice_tm_node) node;
+ uint32_t id;
+ uint32_t tc;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ struct ice_tm_node *parent;
+ struct ice_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+/* node type of Traffic Manager */
+enum ice_tm_node_type {
+ ICE_TM_NODE_TYPE_PORT,
+ ICE_TM_NODE_TYPE_TC,
+ ICE_TM_NODE_TYPE_QUEUE,
+ ICE_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store all the Traffic Manager configuration. */
+struct ice_tm_conf {
+ struct ice_shaper_profile_list shaper_profile_list;
+ struct ice_tm_node *root; /* root node - vf vsi */
+ struct ice_tm_node_list tc_list; /* node list for all the TCs */
+ struct ice_tm_node_list queue_list; /* node list for all the queues */
+ uint32_t nb_tc_node;
+ uint32_t nb_queue_node;
+ bool committed;
+};
+
struct ice_pf {
struct ice_adapter *adapter; /* The adapter this PF associate to */
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -497,6 +541,7 @@ struct ice_pf {
uint64_t old_tx_bytes;
uint64_t supported_rxdid; /* bitmap for supported RXDID */
uint64_t rss_hf;
+ struct ice_tm_conf tm_conf;
};
#define ICE_MAX_QUEUE_NUM 2048
@@ -624,6 +669,9 @@ int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
struct ice_rss_hash_cfg *cfg);
int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
struct ice_rss_hash_cfg *cfg);
+void ice_tm_conf_init(struct rte_eth_dev *dev);
+void ice_tm_conf_uninit(struct rte_eth_dev *dev);
+extern const struct rte_tm_ops ice_tm_ops;
static inline int
ice_align_floor(int n)
new file mode 100644
@@ -0,0 +1,599 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+#include <rte_tm_driver.h>
+
+#include "ice_ethdev.h"
+#include "ice_rxtx.h"
+
+static int ice_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ __rte_unused struct rte_tm_error *error);
+static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int ice_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int ice_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops ice_tm_ops = {
+ .shaper_profile_add = ice_shaper_profile_add,
+ .shaper_profile_delete = ice_shaper_profile_del,
+ .node_add = ice_tm_node_add,
+ .node_delete = ice_tm_node_delete,
+ .node_type_get = ice_node_type_get,
+ .hierarchy_commit = ice_hierarchy_commit,
+};
+
+void
+ice_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* initialize node configuration */
+ TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
+ pf->tm_conf.root = NULL;
+ TAILQ_INIT(&pf->tm_conf.tc_list);
+ TAILQ_INIT(&pf->tm_conf.queue_list);
+ pf->tm_conf.nb_tc_node = 0;
+ pf->tm_conf.nb_queue_node = 0;
+ pf->tm_conf.committed = false;
+}
+
+void
+ice_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_tc_node = 0;
+ if (pf->tm_conf.root) {
+ rte_free(pf->tm_conf.root);
+ pf->tm_conf.root = NULL;
+ }
+}
+
+static inline struct ice_tm_node *
+ice_tm_node_search(struct rte_eth_dev *dev,
+ uint32_t node_id, enum ice_tm_node_type *node_type)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct ice_tm_node *tm_node;
+
+ if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
+ *node_type = ICE_TM_NODE_TYPE_PORT;
+ return pf->tm_conf.root;
+ }
+
+ TAILQ_FOREACH(tm_node, tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = ICE_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = ICE_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+ice_node_param_check(struct ice_pf *pf, uint32_t node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ /* checked all the unsupported parameter */
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for non-leaf node */
+ if (node_id >= pf->dev_data->nb_tx_queues) {
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for leaf node */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
+ struct ice_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ice_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == ICE_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static inline struct ice_tm_shaper_profile *
+ice_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_shaper_profile_list *shaper_profile_list =
+ &pf->tm_conf.shaper_profile_list;
+ struct ice_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+ice_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = ice_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("ice_tm_shaper_profile",
+ sizeof(struct ice_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+ice_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static int
+ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
+ enum ice_tm_node_type parent_node_type = ICE_TM_NODE_TYPE_MAX;
+ struct ice_tm_shaper_profile *shaper_profile = NULL;
+ struct ice_tm_node *tm_node;
+ struct ice_tm_node *parent_node;
+ uint16_t tc_nb = 1;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = ice_node_param_check(pf, node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node is already existed */
+ if (ice_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ shaper_profile = ice_shaper_profile_search(dev,
+ params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != ICE_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (pf->tm_conf.root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("ice_tm_node",
+ sizeof(struct ice_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->parent = NULL;
+ tm_node->reference_count = 0;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ pf->tm_conf.root = tm_node;
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = ice_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != ICE_TM_NODE_TYPE_PORT &&
+ parent_node_type != ICE_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not root or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != (uint32_t)parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
+ /* check the TC number */
+ if (pf->tm_conf.nb_tc_node >= tc_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check the queue number */
+ if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+ if (node_id >= pf->dev_data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("ice_tm_node",
+ sizeof(struct ice_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = parent_node;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
+ tm_node, node);
+ tm_node->tc = pf->tm_conf.nb_tc_node;
+ pf->tm_conf.nb_tc_node++;
+ } else {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
+ tm_node, node);
+ tm_node->tc = parent_node->tc;
+ pf->tm_conf.nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ return 0;
+}
+
+static int
+ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
+ struct ice_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (pf->tm_conf.committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = ice_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == ICE_TM_NODE_TYPE_PORT) {
+ rte_free(tm_node);
+ pf->tm_conf.root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ tm_node->parent->reference_count--;
+ if (node_type == ICE_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
+ pf->tm_conf.nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
+ pf->tm_conf.nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int ice_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ __rte_unused struct rte_tm_error *error)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
+ struct ice_tm_node *tm_node;
+ struct ice_tx_queue *txq;
+ struct ice_vsi *vsi;
+ int ret_val = ICE_SUCCESS;
+ uint64_t peak = 0;
+
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ txq = dev->data->tx_queues[tm_node->id];
+ vsi = txq->vsi;
+ if (tm_node->shaper_profile)
+ /* Transfer from Byte per seconds to Kbps */
+ peak = tm_node->shaper_profile->profile.peak.rate;
+
+ peak = peak / 1000 * BITS_PER_BYTE;
+ ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
+ tm_node->tc, tm_node->id, ICE_MAX_BW, (u32)peak);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR, "configure queue %u bandwidth failed", tm_node->id);
+ goto fail_clear;
+ }
+ }
+
+ return ret_val;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ ice_tm_conf_uninit(dev);
+ ice_tm_conf_init(dev);
+ }
+ return ret_val;
+}
@@ -12,6 +12,7 @@ sources = files(
'ice_hash.c',
'ice_rxtx.c',
'ice_switch_filter.c',
+ 'ice_tm.c',
)
deps += ['hash', 'net', 'common_iavf']