[v1,2/2] net/iavf: enable large VF configuration

Message ID 20200909072028.16726-3-ting.xu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series enable large VF configuration |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Xu, Ting Sept. 9, 2020, 7:20 a.m. UTC
  Add support to configure VSI queues, enable/disable queues and IRQ
mapping for large VF. The max VF queue pairs number is 256. Request
more queues from PF if allocated queues are not enough. Add support
to query max RSS queue regions number. Large VF offload should be
supported before configuring large VF.

Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 drivers/net/iavf/iavf.h        |  24 ++-
 drivers/net/iavf/iavf_ethdev.c |  81 +++++++--
 drivers/net/iavf/iavf_rxtx.c   |  27 ++-
 drivers/net/iavf/iavf_vchnl.c  | 309 ++++++++++++++++++++++++++++++---
 4 files changed, 397 insertions(+), 44 deletions(-)
  

Patch

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 1a42936a0..a16365d3b 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -19,7 +19,10 @@ 
 #define IAVF_FRAME_SIZE_MAX       9728
 #define IAVF_QUEUE_BASE_ADDR_UNIT 128
 
-#define IAVF_MAX_NUM_QUEUES       16
+#define IAVF_MAX_NUM_QUEUES_DFLT	 16
+#define IAVF_MAX_NUM_QUEUES_LV		 256
+#define IAVF_RXTX_QUEUE_CHUNKS_NUM	 2
+#define IAVF_CFG_Q_NUM_PER_BUF		 32
 
 #define IAVF_NUM_MACADDR_MAX      64
 
@@ -104,8 +107,10 @@  struct iavf_fdir_info {
 	struct iavf_fdir_conf conf;
 };
 
-/* TODO: is that correct to assume the max number to be 16 ?*/
-#define IAVF_MAX_MSIX_VECTORS   16
+struct iavf_qv_map {
+	uint16_t queue_id;
+	uint16_t vector_id;
+};
 
 /* Event status from PF */
 enum pending_msg {
@@ -157,14 +162,17 @@  struct iavf_info {
 	uint8_t *rss_key;
 	uint16_t nb_msix;   /* number of MSI-X interrupts on Rx */
 	uint16_t msix_base; /* msix vector base from */
-	/* queue bitmask for each vector */
-	uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
+	uint32_t max_rss_qregion; /* max RSS queue region supported by PF */
+	/* queue vector mapping */
+	struct iavf_qv_map *qv_map;
 	struct iavf_flow_list flow_list;
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
+	/* indicate large VF support enabled or not */
+	bool lv_enabled;
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -269,13 +277,18 @@  int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 		     bool rx, bool on);
+int iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+		     bool rx, bool on);
 int iavf_enable_queues(struct iavf_adapter *adapter);
+int iavf_enable_queues_lv(struct iavf_adapter *adapter);
 int iavf_disable_queues(struct iavf_adapter *adapter);
+int iavf_disable_queues_lv(struct iavf_adapter *adapter);
 int iavf_configure_rss_lut(struct iavf_adapter *adapter);
 int iavf_configure_rss_key(struct iavf_adapter *adapter);
 int iavf_configure_queues(struct iavf_adapter *adapter);
 int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
 int iavf_config_irq_map(struct iavf_adapter *adapter);
+int iavf_config_irq_map_lv(struct iavf_adapter *adapter);
 void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add);
 int iavf_dev_link_update(struct rte_eth_dev *dev,
 			__rte_unused int wait_to_complete);
@@ -296,4 +309,5 @@  int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
 			uint32_t mc_addrs_num, bool add);
 int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
+int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 48e474f53..6765aac33 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -160,12 +160,12 @@  iavf_init_rss(struct iavf_adapter *adapter)
 {
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	struct rte_eth_rss_conf *rss_conf;
-	uint8_t i, j, nb_q;
+	uint16_t i, j, nb_q;
 	int ret;
 
 	rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
 	nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
-		       IAVF_MAX_NUM_QUEUES);
+		       IAVF_MAX_NUM_QUEUES_LV);
 
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
 		PMD_DRV_LOG(DEBUG, "RSS is not supported");
@@ -218,6 +218,9 @@  iavf_dev_configure(struct rte_eth_dev *dev)
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+	uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+		dev->data->nb_tx_queues);
+	int ret = 0;
 
 	ad->rx_bulk_alloc_allowed = true;
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
@@ -229,6 +232,29 @@  iavf_dev_configure(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 
+	/* Large VF setting */
+	if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
+		if (!(vf->vf_res->vf_cap_flags &
+				VIRTCHNL_VF_OFFLOAD_LARGE_VF)) {
+			PMD_DRV_LOG(ERR, "large VF is not supported");
+			return -1;
+		}
+
+		ret = iavf_request_queues(dev, num_queue_pairs);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "request queues from PF failed");
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
+				vf->vsi_res->num_queue_pairs, num_queue_pairs);
+
+		ret = iavf_dev_reset(dev);
+		if (ret != 0)
+			return ret;
+
+		vf->lv_enabled = true;
+	}
+
 	/* Vlan stripping setting */
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
 		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
@@ -243,6 +269,7 @@  iavf_dev_configure(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
+
 	return 0;
 }
 
@@ -325,6 +352,7 @@  static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	struct iavf_qv_map *qv_map;
 	uint16_t interval, i;
 	int vec;
 
@@ -345,6 +373,14 @@  static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 		}
 	}
 
+	qv_map = rte_zmalloc("qv_map",
+		dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
+	if (!qv_map) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+				dev->data->nb_rx_queues);
+		return -1;
+	}
+
 	if (!dev->data->dev_conf.intr_conf.rxq ||
 	    !rte_intr_dp_is_en(intr_handle)) {
 		/* Rx interrupt disabled, Map interrupt only for writeback */
@@ -375,16 +411,21 @@  static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 		}
 		IAVF_WRITE_FLUSH(hw);
 		/* map all queues to the same interrupt */
-		for (i = 0; i < dev->data->nb_rx_queues; i++)
-			vf->rxq_map[vf->msix_base] |= 1 << i;
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			qv_map[i].queue_id = i;
+			qv_map[i].vector_id = vf->msix_base;
+		}
+		vf->qv_map = qv_map;
 	} else {
 		if (!rte_intr_allow_others(intr_handle)) {
 			vf->nb_msix = 1;
 			vf->msix_base = IAVF_MISC_VEC_ID;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				vf->rxq_map[vf->msix_base] |= 1 << i;
+				qv_map[i].queue_id = i;
+				qv_map[i].vector_id = vf->msix_base;
 				intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
 			}
+			vf->qv_map = qv_map;
 			PMD_DRV_LOG(DEBUG,
 				    "vector %u are mapping to all Rx queues",
 				    vf->msix_base);
@@ -397,21 +438,33 @@  static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 			vf->msix_base = IAVF_RX_VEC_START;
 			vec = IAVF_RX_VEC_START;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				vf->rxq_map[vec] |= 1 << i;
+				qv_map[i].queue_id = i;
+				qv_map[i].vector_id = vec;
 				intr_handle->intr_vec[i] = vec++;
 				if (vec >= vf->nb_msix)
 					vec = IAVF_RX_VEC_START;
 			}
+			vf->qv_map = qv_map;
 			PMD_DRV_LOG(DEBUG,
 				    "%u vectors are mapping to %u Rx queues",
 				    vf->nb_msix, dev->data->nb_rx_queues);
 		}
 	}
 
-	if (iavf_config_irq_map(adapter)) {
-		PMD_DRV_LOG(ERR, "config interrupt mapping failed");
-		return -1;
+	if (!vf->lv_enabled) {
+		if (iavf_config_irq_map(adapter)) {
+			PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+			return -1;
+		}
+	} else {
+		if (iavf_config_irq_map_lv(adapter)) {
+			PMD_DRV_LOG(ERR, "config interrupt mapping "
+				"for large VF failed");
+			return -1;
+		}
 	}
+
+	rte_free(qv_map);
 	return 0;
 }
 
@@ -539,8 +592,8 @@  iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
-	dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
-	dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
+	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
 	dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
 	dev_info->hash_key_size = vf->vf_res->rss_key_size;
@@ -1269,6 +1322,12 @@  iavf_init_vf(struct rte_eth_dev *dev)
 		PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
 		goto err_alloc;
 	}
+
+	if (iavf_get_max_rss_queue_region(adapter) != 0) {
+		PMD_INIT_LOG(ERR, "get max rss queue region failed");
+		goto err_alloc;
+	}
+
 	/* Allocate memort for RSS info */
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
 		vf->rss_key = rte_zmalloc("rss_key",
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 05a7dd898..86004ac90 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -532,6 +532,7 @@  iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_rx_queue *rxq;
 	int err = 0;
 
@@ -555,7 +556,11 @@  iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	IAVF_WRITE_FLUSH(hw);
 
 	/* Ready to switch the queue on */
-	err = iavf_switch_queue(adapter, rx_queue_id, true, true);
+	if (!vf->lv_enabled)
+		err = iavf_switch_queue(adapter, rx_queue_id, true, true);
+	else
+		err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
+
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
 			    rx_queue_id);
@@ -572,6 +577,7 @@  iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
 	int err = 0;
 
@@ -587,7 +593,10 @@  iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	IAVF_WRITE_FLUSH(hw);
 
 	/* Ready to switch the queue on */
-	err = iavf_switch_queue(adapter, tx_queue_id, false, true);
+	if (!vf->lv_enabled)
+		err = iavf_switch_queue(adapter, tx_queue_id, false, true);
+	else
+		err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
 
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
@@ -688,14 +697,22 @@  iavf_stop_queues(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_rx_queue *rxq;
 	struct iavf_tx_queue *txq;
 	int ret, i;
 
 	/* Stop All queues */
-	ret = iavf_disable_queues(adapter);
-	if (ret)
-		PMD_DRV_LOG(WARNING, "Fail to stop queues");
+	if (!vf->lv_enabled) {
+		ret = iavf_disable_queues(adapter);
+		if (ret)
+			PMD_DRV_LOG(WARNING, "Fail to stop queues");
+	} else {
+		ret = iavf_disable_queues_lv(adapter);
+		if (ret)
+			PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
+	}
+
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index eab2f2a22..6090afab6 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -128,6 +128,7 @@  iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	case VIRTCHNL_OP_VERSION:
 	case VIRTCHNL_OP_GET_VF_RESOURCES:
 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
 		/* for init virtchnl ops, need to poll the response */
 		do {
 			result = iavf_read_msg_from_pf(adapter, args->out_size,
@@ -448,7 +449,8 @@  iavf_get_vf_resource(struct iavf_adapter *adapter)
 	caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
-		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_LARGE_VF;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -598,6 +600,142 @@  iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	return err;
 }
 
+int
+iavf_enable_queues_lv(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_del_ena_dis_queues *queue_select;
+	struct virtchnl_queue_chunk *queue_chunk;
+	struct iavf_cmd_info args;
+	int err, len;
+
+	len = sizeof(struct virtchnl_del_ena_dis_queues) +
+		  sizeof(struct virtchnl_queue_chunk) *
+		  (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+	queue_select = rte_zmalloc("queue_select", len, 0);
+	if (!queue_select)
+		return -ENOMEM;
+
+	queue_chunk = queue_select->chunks.chunks;
+	queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+	queue_select->vport_id = vf->vsi_res->vsi_id;
+
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+		adapter->eth_dev->data->nb_tx_queues;
+
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+		adapter->eth_dev->data->nb_rx_queues;
+
+	args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+	args.in_args = (u8 *)queue_select;
+	args.in_args_size = len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
+		return err;
+	}
+	return 0;
+}
+
+int
+iavf_disable_queues_lv(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_del_ena_dis_queues *queue_select;
+	struct virtchnl_queue_chunk *queue_chunk;
+	struct iavf_cmd_info args;
+	int err, len;
+
+	len = sizeof(struct virtchnl_del_ena_dis_queues) +
+		  sizeof(struct virtchnl_queue_chunk) *
+		  (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+	queue_select = rte_zmalloc("queue_select", len, 0);
+	if (!queue_select)
+		return -ENOMEM;
+
+	queue_chunk = queue_select->chunks.chunks;
+	queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+	queue_select->vport_id = vf->vsi_res->vsi_id;
+
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+		adapter->eth_dev->data->nb_tx_queues;
+
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+		adapter->eth_dev->data->nb_rx_queues;
+
+	args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+	args.in_args = (u8 *)queue_select;
+	args.in_args_size = len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
+		return err;
+	}
+	return 0;
+}
+
+int
+iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+		 bool rx, bool on)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_del_ena_dis_queues *queue_select;
+	struct virtchnl_queue_chunk *queue_chunk;
+	struct iavf_cmd_info args;
+	int err, len;
+
+	len = sizeof(struct virtchnl_del_ena_dis_queues) +
+		  sizeof(struct virtchnl_queue_chunk) *
+		  (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+	queue_select = rte_zmalloc("queue_select", len, 0);
+	if (!queue_select)
+		return -ENOMEM;
+
+	queue_chunk = queue_select->chunks.chunks;
+	queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+	queue_select->vport_id = vf->vsi_res->vsi_id;
+
+	if (rx) {
+		queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type =
+			VIRTCHNL_QUEUE_TYPE_RX;
+		queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = qid;
+		queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = 1;
+	} else {
+		queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type =
+			VIRTCHNL_QUEUE_TYPE_TX;
+		queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = qid;
+		queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = 1;
+	}
+
+	if (on)
+		args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+	else
+		args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+	args.in_args = (u8 *)queue_select;
+	args.in_args_size = len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
+	return err;
+}
+
 int
 iavf_configure_rss_lut(struct iavf_adapter *adapter)
 {
@@ -662,32 +800,24 @@  iavf_configure_rss_key(struct iavf_adapter *adapter)
 	return err;
 }
 
-int
-iavf_configure_queues(struct iavf_adapter *adapter)
+static int
+iavf_exec_queue_cfg(struct iavf_adapter *adapter,
+	struct virtchnl_vsi_queue_config_info *vc_config,
+	uint16_t count, uint16_t size)
 {
 	struct iavf_rx_queue **rxq =
 		(struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
 	struct iavf_tx_queue **txq =
 		(struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
-	struct virtchnl_vsi_queue_config_info *vc_config;
 	struct virtchnl_queue_pair_info *vc_qp;
 	struct iavf_cmd_info args;
-	uint16_t i, size;
-	int err;
-
-	size = sizeof(*vc_config) +
-	       sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
-	vc_config = rte_zmalloc("cfg_queue", size, 0);
-	if (!vc_config)
-		return -ENOMEM;
-
-	vc_config->vsi_id = vf->vsi_res->vsi_id;
-	vc_config->num_queue_pairs = vf->num_queue_pairs;
+	int err = 0;
+	uint16_t i;
 
-	for (i = 0, vc_qp = vc_config->qpair;
-	     i < vf->num_queue_pairs;
-	     i++, vc_qp++) {
+	for (i = count * IAVF_CFG_Q_NUM_PER_BUF, vc_qp = vc_config->qpair;
+	     i < count * IAVF_CFG_Q_NUM_PER_BUF + vc_config->num_queue_pairs;
+		 i++, vc_qp++) {
 		vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
 		vc_qp->txq.queue_id = i;
 		/* Virtchnnl configure queues by pairs */
@@ -741,8 +871,73 @@  iavf_configure_queues(struct iavf_adapter *adapter)
 	err = iavf_execute_vf_cmd(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
-			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+				" VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+
+	return err;
+}
+
+/* Configure VSI queues. Max VF queue pairs number is 256, may
+ * send this command multiple times to configure all queues.
+ */
+int
+iavf_configure_queues(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_vsi_queue_config_info *vc_config;
+	uint16_t nb_cmd_full, nb_cmd_rest, nbq_rest;
+	uint16_t i, size;
+	int err = 0;
+
+	/* Compute how many times should the command to be sent */
+	nbq_rest = vf->num_queue_pairs % IAVF_CFG_Q_NUM_PER_BUF;
+	nb_cmd_rest = nbq_rest ? 1 : 0;
+	nb_cmd_full = vf->num_queue_pairs / IAVF_CFG_Q_NUM_PER_BUF;
+
+	/* send cmd only once for small queues number */
+	if (!nb_cmd_full) {
+		size = sizeof(*vc_config) +
+		       sizeof(vc_config->qpair[0]) * nbq_rest;
+		vc_config = rte_zmalloc("cfg_queue", size, 0);
+		if (!vc_config)
+			return -ENOMEM;
+
+		vc_config->vsi_id = vf->vsi_res->vsi_id;
+		vc_config->num_queue_pairs = nbq_rest;
+		err = iavf_exec_queue_cfg(adapter, vc_config, 0, size);
+		goto free;
+	}
+
+	size = sizeof(*vc_config) +
+		   sizeof(vc_config->qpair[0]) * IAVF_CFG_Q_NUM_PER_BUF;
+	vc_config = rte_zmalloc("cfg_queue", size, 0);
+	if (!vc_config)
+		return -ENOMEM;
+
+	for (i = 0; i < nb_cmd_full + nb_cmd_rest; i++) {
+		if (i < nb_cmd_full) {
+			vc_config->vsi_id = vf->vsi_res->vsi_id;
+			vc_config->num_queue_pairs = IAVF_CFG_Q_NUM_PER_BUF;
+		} else {
+			/* re-allocate virtchnl msg for less queues */
+			rte_free(vc_config);
+			size = sizeof(*vc_config) +
+			       sizeof(vc_config->qpair[0]) * nbq_rest;
+			vc_config = rte_zmalloc("cfg_queue", size, 0);
+			if (!vc_config)
+				return -ENOMEM;
+
+			vc_config->vsi_id = vf->vsi_res->vsi_id;
+			vc_config->num_queue_pairs = nbq_rest;
+		}
 
+		err = iavf_exec_queue_cfg(adapter, vc_config, i, size);
+		if (err)
+			goto free;
+
+		memset(vc_config, 0, size);
+	}
+
+free:
 	rte_free(vc_config);
 	return err;
 }
@@ -797,13 +992,14 @@  iavf_config_irq_map(struct iavf_adapter *adapter)
 
 	map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
 	map_info->num_vectors = nb_msix;
-	for (i = 0; i < nb_msix; i++) {
-		vecmap = &map_info->vecmap[i];
+	for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+		vecmap =
+		    &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
 		vecmap->vsi_id = vf->vsi_res->vsi_id;
 		vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
-		vecmap->vector_id = vf->msix_base + i;
+		vecmap->vector_id = vf->qv_map[i].vector_id;
 		vecmap->txq_map = 0;
-		vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+		vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id;
 	}
 
 	args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -819,6 +1015,46 @@  iavf_config_irq_map(struct iavf_adapter *adapter)
 	return err;
 }
 
+int
+iavf_config_irq_map_lv(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_queue_vector_maps *map_info;
+	struct virtchnl_queue_vector *qv_maps;
+	struct iavf_cmd_info args;
+	int len, i, err;
+
+	len = sizeof(struct virtchnl_queue_vector_maps) +
+	      sizeof(struct virtchnl_queue_vector) *
+	      (adapter->eth_dev->data->nb_rx_queues - 1);
+
+	map_info = rte_zmalloc("map_info", len, 0);
+	if (!map_info)
+		return -ENOMEM;
+
+	map_info->vport_id = vf->vsi_res->vsi_id;
+	map_info->num_qv_maps = adapter->eth_dev->data->nb_rx_queues;
+	for (i = 0; i < map_info->num_qv_maps; i++) {
+		qv_maps = &map_info->qv_maps[i];
+		qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
+		qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
+		qv_maps->queue_id = vf->qv_map[i].queue_id;
+		qv_maps->vector_id = vf->qv_map[i].vector_id;
+	}
+
+	args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
+	args.in_args = (u8 *)map_info;
+	args.in_args_size = len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+	err = iavf_execute_vf_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
+
+	rte_free(map_info);
+	return err;
+}
+
 void
 iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 {
@@ -1261,3 +1497,30 @@  iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 
 	return err;
 }
+
+int
+iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int ret;
+
+	args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION;
+	args.in_args = NULL;
+	args.in_args_size = 0;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	ret = iavf_execute_vf_cmd(adapter, &args);
+	if (ret) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of "
+			    "VIRTCHNL_OP_GET_MAX_RSS_QREGION");
+		return ret;
+	}
+
+	vf->max_rss_qregion =
+	((struct virtchnl_max_rss_qregion *)args.out_buffer)->max_rss_qregion;
+
+	return 0;
+}