[v9,05/14] net/idpf: add support for queue start and stop

Message ID 20221021051821.2164939-6-junfeng.guo@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Junfeng Guo Oct. 21, 2022, 5:18 a.m. UTC
  Add support for queue operations:
 - rx_queue_start
 - rx_queue_stop
 - tx_queue_start
 - tx_queue_stop

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/nics/features/idpf.ini |   1 +
 drivers/net/idpf/idpf_ethdev.c    |  46 +++++
 drivers/net/idpf/idpf_ethdev.h    |   3 +
 drivers/net/idpf/idpf_rxtx.c      | 309 ++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h      |   6 +
 drivers/net/idpf/idpf_vchnl.c     | 150 +++++++++++++++
 6 files changed, 515 insertions(+)
  

Comments

Andrew Rybchenko Oct. 21, 2022, 7:53 a.m. UTC | #1
On 10/21/22 08:18, Junfeng Guo wrote:
> Add support for queue operations:
>   - rx_queue_start
>   - rx_queue_stop
>   - tx_queue_start
>   - tx_queue_stop

Qeueues start/stop are required for deferred start.
So, before the patch device start must simply start
all setup queues which should reject deferred_start
request in queue configuration.
  

Patch

diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini
index 681a908194..597beec5d9 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -7,6 +7,7 @@ 
 ; is selected.
 ;
 [Features]
+Queue start/stop     = Y
 Runtime Rx queue setup = Y
 Runtime Tx queue setup = Y
 Multiprocess aware   = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index f9717cd5ff..c25f222c5e 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -57,6 +57,10 @@  static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.dev_start			= idpf_dev_start,
 	.dev_stop			= idpf_dev_stop,
 	.dev_close			= idpf_dev_close,
+	.rx_queue_start			= idpf_rx_queue_start,
+	.rx_queue_stop			= idpf_rx_queue_stop,
+	.tx_queue_start			= idpf_tx_queue_start,
+	.tx_queue_stop			= idpf_tx_queue_stop,
 	.rx_queue_setup			= idpf_rx_queue_setup,
 	.rx_queue_release		= idpf_dev_rx_queue_release,
 	.tx_queue_setup			= idpf_tx_queue_setup,
@@ -211,6 +215,39 @@  idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+idpf_start_queues(struct rte_eth_dev *dev)
+{
+	struct idpf_rx_queue *rxq;
+	struct idpf_tx_queue *txq;
+	int err = 0;
+	int i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq || txq->tx_deferred_start)
+			continue;
+		err = idpf_tx_queue_start(dev, i);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+			return err;
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq || rxq->rx_deferred_start)
+			continue;
+		err = idpf_rx_queue_start(dev, i);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+			return err;
+		}
+	}
+
+	return err;
+}
+
 static int
 idpf_dev_start(struct rte_eth_dev *dev)
 {
@@ -220,6 +257,11 @@  idpf_dev_start(struct rte_eth_dev *dev)
 
 	vport->stopped = 0;
 
+	if (idpf_start_queues(dev)) {
+		PMD_DRV_LOG(ERR, "Failed to start queues");
+		goto err_mtu;
+	}
+
 	if (idpf_vc_ena_dis_vport(vport, true)) {
 		PMD_DRV_LOG(ERR, "Failed to enable vport");
 		goto err_vport;
@@ -228,6 +270,8 @@  idpf_dev_start(struct rte_eth_dev *dev)
 	return 0;
 
 err_vport:
+	idpf_stop_queues(dev);
+err_mtu:
 	return -1;
 }
 
@@ -244,6 +288,8 @@  idpf_dev_stop(struct rte_eth_dev *dev)
 	if (idpf_vc_ena_dis_vport(vport, false))
 		PMD_DRV_LOG(ERR, "disable vport failed");
 
+	idpf_stop_queues(dev);
+
 	vport->stopped = 1;
 	dev->data->dev_started = 0;
 
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 49d71154aa..9608204e4c 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -219,6 +219,9 @@  int idpf_vc_config_rxqs(struct idpf_vport *vport);
 int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
 int idpf_vc_config_txqs(struct idpf_vport *vport);
 int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);
+int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
+		      bool rx, bool on);
+int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
 		      uint16_t buf_len, uint8_t *buf);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 82cc1d8f05..95193713c4 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -929,6 +929,289 @@  idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 						 socket_id, tx_conf);
 }
 
+static int
+idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+	volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
+	struct rte_mbuf *mbuf = NULL;
+	uint64_t dma_addr;
+	uint16_t i;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		mbuf = rte_mbuf_raw_alloc(rxq->mp);
+		if (unlikely(!mbuf)) {
+			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+			return -ENOMEM;
+		}
+
+		rte_mbuf_refcnt_set(mbuf, 1);
+		mbuf->next = NULL;
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->nb_segs = 1;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+		rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];
+		rxd->pkt_addr = dma_addr;
+		rxd->hdr_addr = 0;
+		rxd->rsvd1 = 0;
+		rxd->rsvd2 = 0;
+		rxq->sw_ring[i] = mbuf;
+	}
+
+	return 0;
+}
+
+static int
+idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+	volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
+	struct rte_mbuf *mbuf = NULL;
+	uint64_t dma_addr;
+	uint16_t i;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		mbuf = rte_mbuf_raw_alloc(rxq->mp);
+		if (unlikely(!mbuf)) {
+			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+			return -ENOMEM;
+		}
+
+		rte_mbuf_refcnt_set(mbuf, 1);
+		mbuf->next = NULL;
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->nb_segs = 1;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+		rxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];
+		rxd->qword0.buf_id = i;
+		rxd->qword0.rsvd0 = 0;
+		rxd->qword0.rsvd1 = 0;
+		rxd->pkt_addr = dma_addr;
+		rxd->hdr_addr = 0;
+		rxd->rsvd2 = 0;
+
+		rxq->sw_ring[i] = mbuf;
+	}
+
+	rxq->nb_rx_hold = 0;
+	rxq->rx_tail = rxq->nb_rx_desc - 1;
+
+	return 0;
+}
+
+int
+idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct idpf_rx_queue *rxq;
+	int err;
+
+	if (rx_queue_id >= dev->data->nb_rx_queues)
+		return -EINVAL;
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+
+	if (!rxq || !rxq->q_set) {
+		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
+					rx_queue_id);
+		return -EINVAL;
+	}
+
+	if (!rxq->bufq1) {
+		/* Single queue */
+		err = idpf_alloc_single_rxq_mbufs(rxq);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+			return err;
+		}
+
+		rte_wmb();
+
+		/* Init the RX tail register. */
+		IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+	} else {
+		/* Split queue */
+		err = idpf_alloc_split_rxq_mbufs(rxq->bufq1);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+			return err;
+		}
+		err = idpf_alloc_split_rxq_mbufs(rxq->bufq2);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+			return err;
+		}
+
+		rte_wmb();
+
+		/* Init the RX tail register. */
+		IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
+		IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+	}
+
+	return err;
+}
+
+int
+idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_rx_queue *rxq =
+		(struct idpf_rx_queue *)dev->data->rx_queues[rx_queue_id];
+	int err = 0;
+
+	PMD_DRV_FUNC_TRACE();
+
+	err = idpf_vc_config_rxq(vport, rx_queue_id);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id);
+		return err;
+	}
+
+	err = idpf_rx_queue_init(dev, rx_queue_id);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to init RX queue %u",
+			    rx_queue_id);
+		return err;
+	}
+
+	/* Ready to switch the queue on */
+	err = idpf_switch_queue(vport, rx_queue_id, true, true);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+			    rx_queue_id);
+	} else {
+		rxq->q_started = true;
+		dev->data->rx_queue_state[rx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+	}
+
+	return err;
+}
+
+int
+idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct idpf_tx_queue *txq;
+
+	if (tx_queue_id >= dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	txq = dev->data->tx_queues[tx_queue_id];
+
+	/* Init the RX tail register. */
+	IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+
+	return 0;
+}
+
+int
+idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_tx_queue *txq =
+		(struct idpf_tx_queue *)dev->data->tx_queues[tx_queue_id];
+	int err = 0;
+
+	PMD_DRV_FUNC_TRACE();
+
+	err = idpf_vc_config_txq(vport, tx_queue_id);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
+		return err;
+	}
+
+	err = idpf_tx_queue_init(dev, tx_queue_id);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to init TX queue %u",
+			    tx_queue_id);
+		return err;
+	}
+
+	/* Ready to switch the queue on */
+	err = idpf_switch_queue(vport, tx_queue_id, false, true);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+			    tx_queue_id);
+	} else {
+		txq->q_started = true;
+		dev->data->tx_queue_state[tx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+	}
+
+	return err;
+}
+
+int
+idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_rx_queue *rxq;
+	int err;
+
+	PMD_DRV_FUNC_TRACE();
+
+	if (rx_queue_id >= dev->data->nb_rx_queues)
+		return -EINVAL;
+
+	err = idpf_switch_queue(vport, rx_queue_id, true, false);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+			    rx_queue_id);
+		return err;
+	}
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		rxq->ops->release_mbufs(rxq);
+		reset_single_rx_queue(rxq);
+	} else {
+		rxq->bufq1->ops->release_mbufs(rxq->bufq1);
+		rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+		reset_split_rx_queue(rxq);
+	}
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+int
+idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_tx_queue *txq;
+	int err;
+
+	PMD_DRV_FUNC_TRACE();
+
+	if (tx_queue_id >= dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	err = idpf_switch_queue(vport, tx_queue_id, false, false);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+			    tx_queue_id);
+		return err;
+	}
+
+	txq = dev->data->tx_queues[tx_queue_id];
+	txq->ops->release_mbufs(txq);
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		reset_single_tx_queue(txq);
+	} else {
+		reset_split_tx_descq(txq);
+		reset_split_tx_complq(txq->complq);
+	}
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
 void
 idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
@@ -940,3 +1223,29 @@  idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
 	idpf_tx_queue_release(dev->data->tx_queues[qid]);
 }
+
+void
+idpf_stop_queues(struct rte_eth_dev *dev)
+{
+	struct idpf_rx_queue *rxq;
+	struct idpf_tx_queue *txq;
+	int i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq)
+			continue;
+
+		if (idpf_rx_queue_stop(dev, i))
+			PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+
+		if (idpf_tx_queue_stop(dev, i))
+			PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
+	}
+}
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 69a1fa6348..f0427b96c5 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -175,12 +175,18 @@  int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			const struct rte_eth_rxconf *rx_conf,
 			struct rte_mempool *mp);
 int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf);
 int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
+void idpf_stop_queues(struct rte_eth_dev *dev);
+
 #endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 3578346bce..6b6881872b 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -831,6 +831,156 @@  idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
 	return err;
 }
 
+static int
+idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+			  uint32_t type, bool on)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_del_ena_dis_queues *queue_select;
+	struct virtchnl2_queue_chunk *queue_chunk;
+	struct idpf_cmd_info args;
+	int err, len;
+
+	len = sizeof(struct virtchnl2_del_ena_dis_queues);
+	queue_select = rte_zmalloc("queue_select", len, 0);
+	if (!queue_select)
+		return -ENOMEM;
+
+	queue_chunk = queue_select->chunks.chunks;
+	queue_select->chunks.num_chunks = 1;
+	queue_select->vport_id = vport->vport_id;
+
+	queue_chunk->type = type;
+	queue_chunk->start_queue_id = qid;
+	queue_chunk->num_queues = 1;
+
+	args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
+		VIRTCHNL2_OP_DISABLE_QUEUES;
+	args.in_args = (u8 *)queue_select;
+	args.in_args_size = len;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
+			    on ? "ENABLE" : "DISABLE");
+
+	rte_free(queue_select);
+	return err;
+}
+
+int
+idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
+		     bool rx, bool on)
+{
+	uint32_t type;
+	int err, queue_id;
+
+	/* switch txq/rxq */
+	type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+	if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+		queue_id = vport->chunks_info.rx_start_qid + qid;
+	else
+		queue_id = vport->chunks_info.tx_start_qid + qid;
+	err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+	if (err)
+		return err;
+
+	/* switch tx completion queue */
+	if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+		queue_id = vport->chunks_info.tx_compl_start_qid + qid;
+		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+		if (err)
+			return err;
+	}
+
+	/* switch rx buffer queue */
+	if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+		queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
+		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+		if (err)
+			return err;
+		queue_id++;
+		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+		if (err)
+			return err;
+	}
+
+	return err;
+}
+
+#define IDPF_RXTX_QUEUE_CHUNKS_NUM	2
+int
+idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_del_ena_dis_queues *queue_select;
+	struct virtchnl2_queue_chunk *queue_chunk;
+	uint32_t type;
+	struct idpf_cmd_info args;
+	uint16_t num_chunks;
+	int err, len;
+
+	num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+		num_chunks++;
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+		num_chunks++;
+
+	len = sizeof(struct virtchnl2_del_ena_dis_queues) +
+		sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
+	queue_select = rte_zmalloc("queue_select", len, 0);
+	if (queue_select == NULL)
+		return -ENOMEM;
+
+	queue_chunk = queue_select->chunks.chunks;
+	queue_select->chunks.num_chunks = num_chunks;
+	queue_select->vport_id = vport->vport_id;
+
+	type = VIRTCHNL_QUEUE_TYPE_RX;
+	queue_chunk[type].type = type;
+	queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
+	queue_chunk[type].num_queues = vport->num_rx_q;
+
+	type = VIRTCHNL2_QUEUE_TYPE_TX;
+	queue_chunk[type].type = type;
+	queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
+	queue_chunk[type].num_queues = vport->num_tx_q;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+		queue_chunk[type].type = type;
+		queue_chunk[type].start_queue_id =
+			vport->chunks_info.rx_buf_start_qid;
+		queue_chunk[type].num_queues = vport->num_rx_bufq;
+	}
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+		queue_chunk[type].type = type;
+		queue_chunk[type].start_queue_id =
+			vport->chunks_info.tx_compl_start_qid;
+		queue_chunk[type].num_queues = vport->num_tx_complq;
+	}
+
+	args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
+		VIRTCHNL2_OP_DISABLE_QUEUES;
+	args.in_args = (u8 *)queue_select;
+	args.in_args_size = len;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
+			    enable ? "ENABLE" : "DISABLE");
+
+	rte_free(queue_select);
+	return err;
+}
+
 int
 idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
 {