@@ -474,6 +474,7 @@ struct ice_tm_node {
uint32_t weight;
uint32_t reference_count;
struct ice_tm_node *parent;
+ struct ice_tm_node **children;
struct ice_tm_shaper_profile *shaper_profile;
struct rte_tm_node_params params;
};
@@ -482,6 +483,8 @@ struct ice_tm_node {
enum ice_tm_node_type {
ICE_TM_NODE_TYPE_PORT,
ICE_TM_NODE_TYPE_TC,
+ ICE_TM_NODE_TYPE_VSI,
+ ICE_TM_NODE_TYPE_QGROUP,
ICE_TM_NODE_TYPE_QUEUE,
ICE_TM_NODE_TYPE_MAX,
};
@@ -489,10 +492,14 @@ enum ice_tm_node_type {
/* Struct to store all the Traffic Manager configuration. */
struct ice_tm_conf {
struct ice_shaper_profile_list shaper_profile_list;
- struct ice_tm_node *root; /* root node - vf vsi */
+ struct ice_tm_node *root; /* root node - port */
struct ice_tm_node_list tc_list; /* node list for all the TCs */
+ struct ice_tm_node_list vsi_list; /* node list for all the VSIs */
+ struct ice_tm_node_list qgroup_list; /* node list for all the queue groups */
struct ice_tm_node_list queue_list; /* node list for all the queues */
uint32_t nb_tc_node;
+ uint32_t nb_vsi_node;
+ uint32_t nb_qgroup_node;
uint32_t nb_queue_node;
bool committed;
};
@@ -44,8 +44,12 @@ ice_tm_conf_init(struct rte_eth_dev *dev)
TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
pf->tm_conf.root = NULL;
TAILQ_INIT(&pf->tm_conf.tc_list);
+ TAILQ_INIT(&pf->tm_conf.vsi_list);
+ TAILQ_INIT(&pf->tm_conf.qgroup_list);
TAILQ_INIT(&pf->tm_conf.queue_list);
pf->tm_conf.nb_tc_node = 0;
+ pf->tm_conf.nb_vsi_node = 0;
+ pf->tm_conf.nb_qgroup_node = 0;
pf->tm_conf.nb_queue_node = 0;
pf->tm_conf.committed = false;
}
@@ -62,6 +66,16 @@ ice_tm_conf_uninit(struct rte_eth_dev *dev)
rte_free(tm_node);
}
pf->tm_conf.nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.qgroup_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.qgroup_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_qgroup_node = 0;
+ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list))) {
+ TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ pf->tm_conf.nb_vsi_node = 0;
while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
rte_free(tm_node);
@@ -79,6 +93,8 @@ ice_tm_node_search(struct rte_eth_dev *dev,
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct ice_tm_node_list *tc_list = &pf->tm_conf.tc_list;
+ struct ice_tm_node_list *vsi_list = &pf->tm_conf.vsi_list;
+ struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
struct ice_tm_node *tm_node;
@@ -94,6 +110,20 @@ ice_tm_node_search(struct rte_eth_dev *dev,
}
}
+ TAILQ_FOREACH(tm_node, vsi_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = ICE_TM_NODE_TYPE_VSI;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, qgroup_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = ICE_TM_NODE_TYPE_QGROUP;
+ return tm_node;
+ }
+ }
+
TAILQ_FOREACH(tm_node, queue_list, node) {
if (tm_node->id == node_id) {
*node_type = ICE_TM_NODE_TYPE_QUEUE;
@@ -354,6 +384,7 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
struct ice_tm_node *tm_node;
struct ice_tm_node *parent_node;
uint16_t tc_nb = 1;
+ uint16_t vsi_nb = 1;
int ret;
if (!params || !error)
@@ -415,6 +446,8 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->id = node_id;
tm_node->parent = NULL;
tm_node->reference_count = 0;
+ tm_node->children = (struct ice_tm_node **)
+ rte_calloc(NULL, 256, (sizeof(struct ice_tm_node *)), 0);
rte_memcpy(&tm_node->params, params,
sizeof(struct rte_tm_node_params));
pf->tm_conf.root = tm_node;
@@ -431,9 +464,11 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
return -EINVAL;
}
if (parent_node_type != ICE_TM_NODE_TYPE_PORT &&
- parent_node_type != ICE_TM_NODE_TYPE_TC) {
+ parent_node_type != ICE_TM_NODE_TYPE_TC &&
+ parent_node_type != ICE_TM_NODE_TYPE_VSI &&
+ parent_node_type != ICE_TM_NODE_TYPE_QGROUP) {
error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
- error->message = "parent is not root or TC";
+ error->message = "parent is not valid";
return -EINVAL;
}
/* check level */
@@ -452,6 +487,20 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
error->message = "too many TCs";
return -EINVAL;
}
+ } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) {
+ /* check the VSI number */
+ if (pf->tm_conf.nb_vsi_node >= vsi_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many VSIs";
+ return -EINVAL;
+ }
+ } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) {
+ /* check the queue group number */
+ if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queue groups";
+ return -EINVAL;
+ }
} else {
/* check the queue number */
if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
@@ -466,7 +515,7 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
}
}
- /* add the TC or queue node */
+ /* add the TC or VSI or queue group or queue node */
tm_node = rte_zmalloc("ice_tm_node",
sizeof(struct ice_tm_node),
0);
@@ -478,6 +527,10 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->reference_count = 0;
tm_node->parent = parent_node;
tm_node->shaper_profile = shaper_profile;
+ tm_node->children = (struct ice_tm_node **)
+ rte_calloc(NULL, 256, (sizeof(struct ice_tm_node *)), 0);
+ tm_node->parent->children[tm_node->parent->reference_count] = tm_node;
+
rte_memcpy(&tm_node->params, params,
sizeof(struct rte_tm_node_params));
if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
@@ -485,10 +538,20 @@ ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node, node);
tm_node->tc = pf->tm_conf.nb_tc_node;
pf->tm_conf.nb_tc_node++;
+ } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.vsi_list,
+ tm_node, node);
+ tm_node->tc = parent_node->tc;
+ pf->tm_conf.nb_vsi_node++;
+ } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) {
+ TAILQ_INSERT_TAIL(&pf->tm_conf.qgroup_list,
+ tm_node, node);
+ tm_node->tc = parent_node->parent->tc;
+ pf->tm_conf.nb_qgroup_node++;
} else {
TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
tm_node, node);
- tm_node->tc = parent_node->tc;
+ tm_node->tc = parent_node->parent->parent->tc;
pf->tm_conf.nb_queue_node++;
}
tm_node->parent->reference_count++;
@@ -543,11 +606,17 @@ ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
return 0;
}
- /* TC or queue node */
+ /* TC or VSI or queue group or queue node */
tm_node->parent->reference_count--;
if (node_type == ICE_TM_NODE_TYPE_TC) {
TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
pf->tm_conf.nb_tc_node--;
+ } else if (node_type == ICE_TM_NODE_TYPE_VSI) {
+ TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node);
+ pf->tm_conf.nb_vsi_node--;
+ } else if (node_type == ICE_TM_NODE_TYPE_QGROUP) {
+ TAILQ_REMOVE(&pf->tm_conf.qgroup_list, tm_node, node);
+ pf->tm_conf.nb_qgroup_node--;
} else {
TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
pf->tm_conf.nb_queue_node--;
@@ -557,36 +626,176 @@ ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
return 0;
}
+static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev,
+ struct ice_sched_node *queue_sched_node,
+ struct ice_sched_node *dst_node,
+ uint16_t queue_id)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_aqc_move_txqs_data *buf;
+ struct ice_sched_node *queue_parent_node;
+ uint8_t txqs_moved;
+ int ret = ICE_SUCCESS;
+ uint16_t buf_size = ice_struct_size(buf, txqs, 1);
+
+ buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf));
+
+ queue_parent_node = queue_sched_node->parent;
+ buf->src_teid = queue_parent_node->info.node_teid;
+ buf->dest_teid = dst_node->info.node_teid;
+ buf->txqs[0].q_teid = queue_sched_node->info.node_teid;
+ buf->txqs[0].txq_id = queue_id;
+
+ ret = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50,
+ NULL, buf, buf_size, &txqs_moved, NULL);
+ if (ret || txqs_moved == 0) {
+ PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id);
+ return ICE_ERR_PARAM;
+ }
+
+ if (queue_parent_node->num_children > 0) {
+ queue_parent_node->num_children--;
+ queue_parent_node->children[queue_parent_node->num_children] = NULL;
+ } else {
+ PMD_DRV_LOG(ERR, "invalid children number %d for queue %u",
+ queue_parent_node->num_children, queue_id);
+ return ICE_ERR_PARAM;
+ }
+ dst_node->children[dst_node->num_children++] = queue_sched_node;
+ queue_sched_node->parent = dst_node;
+ ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info);
+
+ return ret;
+}
+
static int ice_hierarchy_commit(struct rte_eth_dev *dev,
int clear_on_fail,
__rte_unused struct rte_tm_error *error)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
struct ice_tm_node *tm_node;
+ struct ice_sched_node *node;
+ struct ice_sched_node *vsi_node;
+ struct ice_sched_node *queue_node;
struct ice_tx_queue *txq;
struct ice_vsi *vsi;
int ret_val = ICE_SUCCESS;
uint64_t peak = 0;
+ uint32_t i;
+ uint32_t idx_vsi_child;
+ uint32_t idx_qg;
+ uint32_t nb_vsi_child;
+ uint32_t nb_qg;
+ uint32_t qid;
+ uint32_t q_teid;
+ uint32_t vsi_layer;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ret_val = ice_tx_queue_stop(dev, i);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR, "stop queue %u failed", i);
+ goto fail_clear;
+ }
+ }
- TAILQ_FOREACH(tm_node, queue_list, node) {
- txq = dev->data->tx_queues[tm_node->id];
- vsi = txq->vsi;
- if (tm_node->shaper_profile)
+ node = hw->port_info->root;
+ vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+ for (i = 0; i < vsi_layer; i++)
+ node = node->children[0];
+ vsi_node = node;
+ nb_vsi_child = vsi_node->num_children;
+ nb_qg = vsi_node->children[0]->num_children;
+
+ idx_vsi_child = 0;
+ idx_qg = 0;
+
+ TAILQ_FOREACH(tm_node, qgroup_list, node) {
+ struct ice_tm_node *tm_child_node;
+ struct ice_sched_node *qgroup_sched_node =
+ vsi_node->children[idx_vsi_child]->children[idx_qg];
+
+ for (i = 0; i < tm_node->reference_count; i++) {
+ tm_child_node = tm_node->children[i];
+ qid = tm_child_node->id;
+ ret_val = ice_tx_queue_start(dev, qid);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR, "start queue %u failed", qid);
+ goto fail_clear;
+ }
+ txq = dev->data->tx_queues[qid];
+ q_teid = txq->q_teid;
+ queue_node = ice_sched_get_node(hw->port_info, q_teid);
+ if (queue_node == NULL) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR, "get queue %u node failed", qid);
+ goto fail_clear;
+ }
+ if (queue_node->info.parent_teid == qgroup_sched_node->info.node_teid)
+ continue;
+ ret_val = ice_move_recfg_lan_txq(dev, queue_node, qgroup_sched_node, qid);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR, "move queue %u failed", qid);
+ goto fail_clear;
+ }
+ }
+ if (tm_node->reference_count != 0 && tm_node->shaper_profile) {
+ uint32_t node_teid = qgroup_sched_node->info.node_teid;
/* Transfer from Byte per seconds to Kbps */
peak = tm_node->shaper_profile->profile.peak.rate;
-
- peak = peak / 1000 * BITS_PER_BYTE;
- ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
- tm_node->tc, tm_node->id, ICE_MAX_BW, (u32)peak);
- if (ret_val) {
+ peak = peak / 1000 * BITS_PER_BYTE;
+ ret_val = ice_sched_set_node_bw_lmt_per_tc(hw->port_info,
+ node_teid,
+ ICE_AGG_TYPE_Q,
+ tm_node->tc,
+ ICE_MAX_BW,
+ (u32)peak);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR,
+ "configure queue group %u bandwidth failed",
+ tm_node->id);
+ goto fail_clear;
+ }
+ }
+ idx_qg++;
+ if (idx_qg >= nb_qg) {
+ idx_qg = 0;
+ idx_vsi_child++;
+ }
+ if (idx_vsi_child >= nb_vsi_child) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
- PMD_DRV_LOG(ERR, "configure queue %u bandwidth failed", tm_node->id);
+ PMD_DRV_LOG(ERR, "too many queues");
goto fail_clear;
}
}
+ TAILQ_FOREACH(tm_node, queue_list, node) {
+ qid = tm_node->id;
+ txq = dev->data->tx_queues[qid];
+ vsi = txq->vsi;
+ if (tm_node->shaper_profile) {
+ /* Transfer from Byte per seconds to Kbps */
+ peak = tm_node->shaper_profile->profile.peak.rate;
+ peak = peak / 1000 * BITS_PER_BYTE;
+ ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
+ tm_node->tc, tm_node->id,
+ ICE_MAX_BW, (u32)peak);
+ if (ret_val) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ PMD_DRV_LOG(ERR,
+ "configure queue %u bandwidth failed",
+ tm_node->id);
+ goto fail_clear;
+ }
+ }
+ }
+
return ret_val;
fail_clear: