[v2,29/33] net/ice: enable IRQ mapping configuration for large VF

Message ID 20220413160932.2074781-30-kevinx.liu@intel.com (mailing list archive)
State Superseded, archived
Headers
Series support full function of DCF |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Kevin Liu April 13, 2022, 4:09 p.m. UTC
  From: Steve Yang <stevex.yang@intel.com>

The current IRQ mapping configuration only supports max 16 queues and
16 MSIX vectors. Change the queue vector mapping structure to indicate
up to 256 queues. A new opcode is used to handle the case with large
number of queues. To avoid adminq buffer size limitation, we support
to send the virtchnl message multiple times if needed.

Signed-off-by: Steve Yang <stevex.yang@intel.com>
Signed-off-by: Kevin Liu <kevinx.liu@intel.com>
---
 drivers/net/ice/ice_dcf.c        | 50 +++++++++++++++++++++++++++----
 drivers/net/ice/ice_dcf.h        | 10 ++++++-
 drivers/net/ice/ice_dcf_ethdev.c | 51 +++++++++++++++++++++++++++-----
 drivers/net/ice/ice_dcf_ethdev.h |  1 +
 4 files changed, 99 insertions(+), 13 deletions(-)
  

Patch

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7004c00f1c..290f754049 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -1115,7 +1115,6 @@  ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw)
 	return 0;
 }
 
-
 int
 ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
 {
@@ -1132,13 +1131,14 @@  ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
 		return -ENOMEM;
 
 	map_info->num_vectors = hw->nb_msix;
-	for (i = 0; i < hw->nb_msix; i++) {
-		vecmap = &map_info->vecmap[i];
+	for (i = 0; i < hw->eth_dev->data->nb_rx_queues; i++) {
+		vecmap =
+		  &map_info->vecmap[hw->qv_map[i].vector_id - hw->msix_base];
 		vecmap->vsi_id = hw->vsi_res->vsi_id;
 		vecmap->rxitr_idx = 0;
-		vecmap->vector_id = hw->msix_base + i;
+		vecmap->vector_id = hw->qv_map[i].vector_id;
 		vecmap->txq_map = 0;
-		vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
+		vecmap->rxq_map |= 1 << hw->qv_map[i].queue_id;
 	}
 
 	memset(&args, 0, sizeof(args));
@@ -1154,6 +1154,46 @@  ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
 	return err;
 }
 
+int
+ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw,
+		       uint16_t num, uint16_t index)
+{
+	struct virtchnl_queue_vector_maps *map_info;
+	struct virtchnl_queue_vector *qv_maps;
+	struct dcf_virtchnl_cmd args;
+	int len, i, err;
+	int count = 0;
+
+	len = sizeof(struct virtchnl_queue_vector_maps) +
+	      sizeof(struct virtchnl_queue_vector) * (num - 1);
+
+	map_info = rte_zmalloc("map_info", len, 0);
+	if (!map_info)
+		return -ENOMEM;
+
+	map_info->vport_id = hw->vsi_res->vsi_id;
+	map_info->num_qv_maps = num;
+	for (i = index; i < index + map_info->num_qv_maps; i++) {
+		qv_maps = &map_info->qv_maps[count++];
+		qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
+		qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
+		qv_maps->queue_id = hw->qv_map[i].queue_id;
+		qv_maps->vector_id = hw->qv_map[i].vector_id;
+	}
+
+	args.v_op = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
+	args.req_msg = (u8 *)map_info;
+	args.req_msglen = len;
+	args.rsp_msgbuf = hw->arq_buf;
+	args.req_msglen = ICE_DCF_AQ_BUF_SZ;
+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
+
+	rte_free(map_info);
+	return err;
+}
+
 int
 ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
 {
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index e36428a92a..ce57a687ab 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -74,6 +74,11 @@  struct ice_dcf_tm_conf {
 	bool committed;
 };
 
+struct ice_dcf_qv_map {
+	uint16_t queue_id;
+	uint16_t vector_id;
+};
+
 struct ice_dcf_hw {
 	struct iavf_hw avf;
 
@@ -106,7 +111,8 @@  struct ice_dcf_hw {
 	uint16_t msix_base;
 	uint16_t nb_msix;
 	uint16_t max_rss_qregion; /* max RSS queue region supported by PF */
-	uint16_t rxq_map[16];
+
+	struct ice_dcf_qv_map *qv_map; /* queue vector mapping */
 	struct virtchnl_eth_stats eth_stats_offset;
 	struct virtchnl_vlan_caps vlan_v2_caps;
 
@@ -134,6 +140,8 @@  int ice_dcf_configure_queues(struct ice_dcf_hw *hw,
 int ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num);
 int ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw);
 int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
+int ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw,
+			      uint16_t num, uint16_t index);
 int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on);
 int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
 int ice_dcf_query_stats(struct ice_dcf_hw *hw,
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 78df82d5b5..1ddba02ebb 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -143,6 +143,7 @@  ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 {
 	struct ice_dcf_adapter *adapter = dev->data->dev_private;
 	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_qv_map *qv_map;
 	uint16_t interval, i;
 	int vec;
 
@@ -161,6 +162,14 @@  ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 		}
 	}
 
+	qv_map = rte_zmalloc("qv_map",
+		dev->data->nb_rx_queues * sizeof(struct ice_dcf_qv_map), 0);
+	if (!qv_map) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+				dev->data->nb_rx_queues);
+		return -1;
+	}
+
 	if (!dev->data->dev_conf.intr_conf.rxq ||
 	    !rte_intr_dp_is_en(intr_handle)) {
 		/* Rx interrupt disabled, Map interrupt only for writeback */
@@ -196,17 +205,22 @@  ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 		}
 		IAVF_WRITE_FLUSH(&hw->avf);
 		/* map all queues to the same interrupt */
-		for (i = 0; i < dev->data->nb_rx_queues; i++)
-			hw->rxq_map[hw->msix_base] |= 1 << i;
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			qv_map[i].queue_id = i;
+			qv_map[i].vector_id = hw->msix_base;
+		}
+		hw->qv_map = qv_map;
 	} else {
 		if (!rte_intr_allow_others(intr_handle)) {
 			hw->nb_msix = 1;
 			hw->msix_base = IAVF_MISC_VEC_ID;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				hw->rxq_map[hw->msix_base] |= 1 << i;
+				qv_map[i].queue_id = i;
+				qv_map[i].vector_id = hw->msix_base;
 				rte_intr_vec_list_index_set(intr_handle,
 							i, IAVF_MISC_VEC_ID);
 			}
+			hw->qv_map = qv_map;
 			PMD_DRV_LOG(DEBUG,
 				    "vector %u are mapping to all Rx queues",
 				    hw->msix_base);
@@ -219,21 +233,44 @@  ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 			hw->msix_base = IAVF_MISC_VEC_ID;
 			vec = IAVF_MISC_VEC_ID;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				hw->rxq_map[vec] |= 1 << i;
+				qv_map[i].queue_id = i;
+				qv_map[i].vector_id = vec;
 				rte_intr_vec_list_index_set(intr_handle,
 								   i, vec++);
 				if (vec >= hw->nb_msix)
 					vec = IAVF_RX_VEC_START;
 			}
+			hw->qv_map = qv_map;
 			PMD_DRV_LOG(DEBUG,
 				    "%u vectors are mapping to %u Rx queues",
 				    hw->nb_msix, dev->data->nb_rx_queues);
 		}
 	}
 
-	if (ice_dcf_config_irq_map(hw)) {
-		PMD_DRV_LOG(ERR, "config interrupt mapping failed");
-		return -1;
+	if (!hw->lv_enabled) {
+		if (ice_dcf_config_irq_map(hw)) {
+			PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+			return -1;
+		}
+	} else {
+		uint16_t num_qv_maps = dev->data->nb_rx_queues;
+		uint16_t index = 0;
+
+		while (num_qv_maps > ICE_DCF_IRQ_MAP_NUM_PER_BUF) {
+			if (ice_dcf_config_irq_map_lv(hw,
+					ICE_DCF_IRQ_MAP_NUM_PER_BUF, index)) {
+				PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
+				return -1;
+			}
+			num_qv_maps -= ICE_DCF_IRQ_MAP_NUM_PER_BUF;
+			index += ICE_DCF_IRQ_MAP_NUM_PER_BUF;
+		}
+
+		if (ice_dcf_config_irq_map_lv(hw, num_qv_maps, index)) {
+			PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed");
+			return -1;
+		}
+
 	}
 	return 0;
 }
diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h
index 2fac1e5b21..9ef524c97c 100644
--- a/drivers/net/ice/ice_dcf_ethdev.h
+++ b/drivers/net/ice/ice_dcf_ethdev.h
@@ -23,6 +23,7 @@ 
 #define ICE_DCF_MAX_NUM_QUEUES_DFLT 16
 #define ICE_DCF_MAX_NUM_QUEUES_LV   256
 #define ICE_DCF_CFG_Q_NUM_PER_BUF   32
+#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128
 
 struct ice_dcf_queue {
 	uint64_t dummy;