[v7,6/9] net/ice: support queue priority configuration

Message ID 20220422005746.2300736-7-wenjun1.wu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series Enable ETS-based TX QoS on PF |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Wenjun Wu April 22, 2022, 12:57 a.m. UTC
  This patch adds queue priority configuration support.
The highest priority is 0, and the lowest priority is 7.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/ice/ice_tm.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)
  

Patch

diff --git a/drivers/net/ice/ice_tm.c b/drivers/net/ice/ice_tm.c
index d70d077286..91e420d653 100644
--- a/drivers/net/ice/ice_tm.c
+++ b/drivers/net/ice/ice_tm.c
@@ -147,9 +147,9 @@  ice_node_param_check(struct ice_pf *pf, uint32_t node_id,
 		return -EINVAL;
 	}
 
-	if (priority) {
+	if (priority >= 8) {
 		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
-		error->message = "priority should be 0";
+		error->message = "priority should be less than 8";
 		return -EINVAL;
 	}
 
@@ -684,6 +684,7 @@  static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 	struct ice_vsi *vsi;
 	int ret_val = ICE_SUCCESS;
 	uint64_t peak = 0;
+	uint8_t priority;
 	uint32_t i;
 	uint32_t idx_vsi_child;
 	uint32_t idx_qg;
@@ -779,6 +780,7 @@  static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 		qid = tm_node->id;
 		txq = dev->data->tx_queues[qid];
 		vsi = txq->vsi;
+		q_teid = txq->q_teid;
 		if (tm_node->shaper_profile) {
 			/* Transfer from Byte per seconds to Kbps */
 			peak = tm_node->shaper_profile->profile.peak.rate;
@@ -794,6 +796,14 @@  static int ice_hierarchy_commit(struct rte_eth_dev *dev,
 				goto fail_clear;
 			}
 		}
+		priority = 7 - tm_node->priority;
+		ret_val = ice_cfg_vsi_q_priority(hw->port_info, 1,
+						 &q_teid, &priority);
+		if (ret_val) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+			PMD_DRV_LOG(ERR, "configure queue %u priority failed", tm_node->priority);
+			goto fail_clear;
+		}
 	}
 
 	return ret_val;