[v8,03/14] net/idpf: add queue setup and release in single queue model

Message ID 20221020062951.645121-4-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Junfeng Guo Oct. 20, 2022, 6:29 a.m. UTC
  Add support for queue operations in single queue model:
 - rx_queue_setup
 - rx_queue_release
 - tx_queue_setup
 - tx_queue_release

In the single queue model, the same descriptor queue is used by SW to
post buffer descriptors to HW and by HW to post completed descriptors
to SW.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/nics/features/idpf.ini |   2 +
 doc/guides/nics/idpf.rst          |  22 ++
 drivers/net/idpf/idpf_ethdev.c    |  58 ++++
 drivers/net/idpf/idpf_ethdev.h    |   9 +
 drivers/net/idpf/idpf_rxtx.c      | 465 ++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h      | 186 ++++++++++++
 drivers/net/idpf/idpf_vchnl.c     | 251 ++++++++++++++++
 drivers/net/idpf/meson.build      |   1 +
 8 files changed, 994 insertions(+)
 create mode 100644 drivers/net/idpf/idpf_rxtx.c
 create mode 100644 drivers/net/idpf/idpf_rxtx.h
  

Patch

diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini
index f029a279b3..681a908194 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -7,6 +7,8 @@ 
 ; is selected.
 ;
 [Features]
+Runtime Rx queue setup = Y
+Runtime Tx queue setup = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
 Linux                = Y
diff --git a/doc/guides/nics/idpf.rst b/doc/guides/nics/idpf.rst
index 428bf4266a..fbc0f51de6 100644
--- a/doc/guides/nics/idpf.rst
+++ b/doc/guides/nics/idpf.rst
@@ -45,6 +45,28 @@  Runtime Config Options
   Then idpf PMD will create 3 vports (ethdevs) for device ca:00.0.
   NOTE: This parameter is MUST, otherwise there'll be no any ethdev created.
 
+- ``rx_single`` (default ``0``)
+
+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue
+  mode and split queue mode for Rx queue. User can choose Rx queue mode by the ``devargs``
+  parameter ``rx_single``.
+
+    -a ca:00.0,rx_single=1
+
+  Then idpf PMD will configure Rx queue with single queue mode. Otherwise, split queue
+  mode is chosen by default.
+
+- ``tx_single`` (default ``0``)
+
+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue
+  mode and split queue mode for Tx queue. User can choose Tx queue mode by the ``devargs``
+  parameter ``tx_single``.
+
+    -a ca:00.0,tx_single=1
+
+  Then idpf PMD will configure Tx queue with single queue mode. Otherwise, split queue
+  mode is chosen by default.
+
 Driver compilation and testing
 ------------------------------
 
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 7806c43668..96af54f47b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -10,13 +10,18 @@ 
 #include <rte_dev.h>
 
 #include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
 
+#define IDPF_TX_SINGLE_Q	"tx_single"
+#define IDPF_RX_SINGLE_Q	"rx_single"
 #define IDPF_VPORT		"vport"
 
 struct idpf_adapter_list adapter_list;
 bool adapter_list_init;
 
 static const char * const idpf_valid_args[] = {
+	IDPF_TX_SINGLE_Q,
+	IDPF_RX_SINGLE_Q,
 	IDPF_VPORT,
 	NULL
 };
@@ -52,6 +57,10 @@  static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.dev_start			= idpf_dev_start,
 	.dev_stop			= idpf_dev_stop,
 	.dev_close			= idpf_dev_close,
+	.rx_queue_setup			= idpf_rx_queue_setup,
+	.rx_queue_release		= idpf_dev_rx_queue_release,
+	.tx_queue_setup			= idpf_tx_queue_setup,
+	.tx_queue_release		= idpf_dev_tx_queue_release,
 	.link_update			= idpf_dev_link_update,
 };
 
@@ -81,6 +90,18 @@  idpf_init_vport_req_info(struct rte_eth_dev *dev)
 		(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
 
 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	if (adapter->txq_model) {
+		vport_info->txq_model =
+			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+		vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
+		vport_info->num_tx_complq = 0;
+	}
+	if (adapter->rxq_model) {
+		vport_info->rxq_model =
+			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+		vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
+		vport_info->num_rx_bufq = 0;
+	}
 
 	return 0;
 }
@@ -110,6 +131,8 @@  idpf_init_vport(struct rte_eth_dev *dev)
 	int i;
 
 	vport->vport_id = vport_info->vport_id;
+	vport->txq_model = vport_info->txq_model;
+	vport->rxq_model = vport_info->rxq_model;
 	vport->num_tx_q = vport_info->num_tx_q;
 	vport->num_rx_q = vport_info->num_rx_q;
 	vport->max_mtu = vport_info->max_mtu;
@@ -149,6 +172,12 @@  idpf_init_vport(struct rte_eth_dev *dev)
 static int
 idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)
 {
+	if (dev->data->nb_tx_queues > IDPF_DEFAULT_TXQ_NUM ||
+	    dev->data->nb_rx_queues > IDPF_DEFAULT_RXQ_NUM) {
+		PMD_INIT_LOG(ERR, "Invalid queue number.");
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -312,6 +341,25 @@  parse_vport(const char *key, const char *value, void *args)
 	return 0;
 }
 
+static int
+parse_bool(const char *key, const char *value, void *args)
+{
+	int *i = (int *)args;
+	char *end;
+	int num;
+
+	num = strtoul(value, &end, 10);
+
+	if (num != 0 && num != 1) {
+		PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", value must be 0 or 1",
+			value, key);
+		return -1;
+	}
+
+	*i = num;
+	return 0;
+}
+
 static int
 idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 {
@@ -333,6 +381,16 @@  idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
+				 &adapter->txq_model);
+	if (ret)
+		goto bail;
+
+	ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
+				 &adapter->rxq_model);
+	if (ret)
+		goto bail;
+
 bail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 77824d5f7f..8b7170f49e 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -83,6 +83,8 @@  struct idpf_chunks_info {
 struct idpf_vport {
 	struct idpf_adapter *adapter; /* Backreference to associated adapter */
 	uint16_t vport_id;
+	uint32_t txq_model;
+	uint32_t rxq_model;
 	uint16_t num_tx_q;
 	uint16_t num_rx_q;
 
@@ -118,6 +120,9 @@  struct idpf_adapter {
 	uint32_t cmd_retval; /* return value of the cmd response from ipf */
 	uint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */
 
+	uint32_t txq_model;
+	uint32_t rxq_model;
+
 	/* Vport info */
 	uint8_t **vport_req_info;
 	uint8_t **vport_recv_info;
@@ -197,6 +202,10 @@  int idpf_vc_check_api_version(struct idpf_adapter *adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
 int idpf_vc_create_vport(struct rte_eth_dev *dev);
 int idpf_vc_destroy_vport(struct idpf_vport *vport);
+int idpf_vc_config_rxqs(struct idpf_vport *vport);
+int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
+int idpf_vc_config_txqs(struct idpf_vport *vport);
+int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);
 int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
 		      uint16_t buf_len, uint8_t *buf);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
new file mode 100644
index 0000000000..bff90dd9c6
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -0,0 +1,465 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <ethdev_driver.h>
+#include <rte_net.h>
+
+#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
+
+static inline int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+	/* The following constraints must be satisfied:
+	 *   thresh < rxq->nb_rx_desc
+	 */
+	if (thresh >= nb_desc) {
+		PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+			     thresh, nb_desc);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+		uint16_t tx_free_thresh)
+{
+	/* TX descriptors will have their RS bit set after tx_rs_thresh
+	 * descriptors have been used. The TX descriptor ring will be cleaned
+	 * after tx_free_thresh descriptors are used or if the number of
+	 * descriptors required to transmit a packet is greater than the
+	 * number of free TX descriptors.
+	 *
+	 * The following constraints must be satisfied:
+	 *  - tx_rs_thresh must be less than the size of the ring minus 2.
+	 *  - tx_free_thresh must be less than the size of the ring minus 3.
+	 *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
+	 *  - tx_rs_thresh must be a divisor of the ring size.
+	 *
+	 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+	 * race condition, hence the maximum threshold constraints. When set
+	 * to zero use default values.
+	 */
+	if (tx_rs_thresh >= (nb_desc - 2)) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+			     "number of TX descriptors (%u) minus 2",
+			     tx_rs_thresh, nb_desc);
+		return -EINVAL;
+	}
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+			     "number of TX descriptors (%u) minus 3.",
+			     tx_free_thresh, nb_desc);
+		return -EINVAL;
+	}
+	if (tx_rs_thresh > tx_free_thresh) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+			     "equal to tx_free_thresh (%u).",
+			     tx_rs_thresh, tx_free_thresh);
+		return -EINVAL;
+	}
+	if ((nb_desc % tx_rs_thresh) != 0) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+			     "number of TX descriptors (%u).",
+			     tx_rs_thresh, nb_desc);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+	uint16_t i;
+
+	if (!rxq->sw_ring)
+		return;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		if (rxq->sw_ring[i]) {
+			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+			rxq->sw_ring[i] = NULL;
+		}
+	}
+}
+
+static inline void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+	uint16_t nb_desc, i;
+
+	if (!txq || !txq->sw_ring) {
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+		return;
+	}
+
+	if (txq->sw_nb_desc) {
+		nb_desc = 0;
+	} else {
+		/* For single queue model */
+		nb_desc = txq->nb_tx_desc;
+	}
+	for (i = 0; i < nb_desc; i++) {
+		if (txq->sw_ring[i].mbuf) {
+			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+			txq->sw_ring[i].mbuf = NULL;
+		}
+	}
+}
+
+static const struct idpf_rxq_ops def_rxq_ops = {
+	.release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+	.release_mbufs = release_txq_mbufs,
+};
+
+static void
+idpf_rx_queue_release(void *rxq)
+{
+	struct idpf_rx_queue *q = (struct idpf_rx_queue *)rxq;
+
+	if (!q)
+		return;
+
+	/* Single queue */
+	q->ops->release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_free(q);
+}
+
+static void
+idpf_tx_queue_release(void *txq)
+{
+	struct idpf_tx_queue *q = (struct idpf_tx_queue *)txq;
+
+	if (!q)
+		return;
+
+	rte_free(q->complq);
+	q->ops->release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_free(q);
+}
+
+static inline void
+reset_single_rx_queue(struct idpf_rx_queue *rxq)
+{
+	uint16_t len;
+	uint32_t i;
+
+	if (!rxq)
+		return;
+
+	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+	for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
+	     i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+	for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+		rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+
+	if (rxq->pkt_first_seg != NULL)
+		rte_pktmbuf_free(rxq->pkt_first_seg);
+
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+	rxq->rxrearm_start = 0;
+	rxq->rxrearm_nb = 0;
+}
+
+static inline void
+reset_single_tx_queue(struct idpf_tx_queue *txq)
+{
+	struct idpf_tx_entry *txe;
+	uint32_t i, size;
+	uint16_t prev;
+
+	if (!txq) {
+		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+		return;
+	}
+
+	txe = txq->sw_ring;
+	size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+	for (i = 0; i < size; i++)
+		((volatile char *)txq->tx_ring)[i] = 0;
+
+	prev = (uint16_t)(txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		txq->tx_ring[i].qw1.cmd_dtype =
+			rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+		txe[i].mbuf =  NULL;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_tail = 0;
+	txq->nb_used = 0;
+
+	txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+	txq->nb_free = txq->nb_tx_desc - 1;
+
+	txq->next_dd = txq->rs_thresh - 1;
+	txq->next_rs = txq->rs_thresh - 1;
+}
+
+static int
+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			   uint16_t nb_desc, unsigned int socket_id,
+			   const struct rte_eth_rxconf *rx_conf,
+			   struct rte_mempool *mp)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_hw *hw = &adapter->hw;
+	const struct rte_memzone *mz;
+	struct idpf_rx_queue *rxq;
+	uint16_t rx_free_thresh;
+	uint32_t ring_size;
+	uint64_t offloads;
+	uint16_t len;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+	    nb_desc > IDPF_MAX_RING_DESC ||
+	    nb_desc < IDPF_MIN_RING_DESC) {
+		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid",
+			     nb_desc);
+		return -EINVAL;
+	}
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	/* Check free threshold */
+	rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+		IDPF_DEFAULT_RX_FREE_THRESH :
+		rx_conf->rx_free_thresh;
+	if (check_rx_thresh(nb_desc, rx_free_thresh))
+		return -EINVAL;
+
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_idx]) {
+		idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* Setup Rx description queue */
+	rxq = rte_zmalloc_socket("idpf rxq",
+				 sizeof(struct idpf_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (!rxq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+		return -ENOMEM;
+	}
+
+	rxq->mp = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_free_thresh;
+	rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+	rxq->port_id = dev->data->port_id;
+	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->rx_hdr_len = 0;
+	rxq->adapter = adapter;
+	rxq->offloads = offloads;
+
+	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+	rxq->rx_buf_len = len;
+
+	len = nb_desc + IDPF_RX_MAX_BURST;
+	rxq->sw_ring =
+		rte_zmalloc_socket("idpf rxq sw ring",
+				   sizeof(struct rte_mbuf *) * len,
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (!rxq->sw_ring) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+		rte_free(rxq);
+		return -ENOMEM;
+	}
+
+	/* Allocate a liitle more to support bulk allocate. */
+	len = nb_desc + IDPF_RX_MAX_BURST;
+	ring_size = RTE_ALIGN(len *
+			      sizeof(struct virtchnl2_singleq_rx_buf_desc),
+			      IDPF_DMA_MEM_ALIGN);
+	mz = rte_eth_dma_zone_reserve(dev, "rx ring", queue_idx,
+				      ring_size, IDPF_RING_BASE_ALIGN,
+				      socket_id);
+	if (!mz) {
+		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue.");
+		rte_free(rxq->sw_ring);
+		rte_free(rxq);
+		return -ENOMEM;
+	}
+
+	/* Zero all the descriptors in the ring. */
+	memset(mz->addr, 0, ring_size);
+	rxq->rx_ring_phys_addr = mz->iova;
+	rxq->rx_ring = mz->addr;
+
+	rxq->mz = mz;
+	reset_single_rx_queue(rxq);
+	rxq->q_set = true;
+	dev->data->rx_queues[queue_idx] = rxq;
+	rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
+			queue_idx * vport->chunks_info.rx_qtail_spacing);
+	rxq->ops = &def_rxq_ops;
+
+	return 0;
+}
+
+int
+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+		    uint16_t nb_desc, unsigned int socket_id,
+		    const struct rte_eth_rxconf *rx_conf,
+		    struct rte_mempool *mp)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+		return idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,
+						  socket_id, rx_conf, mp);
+	else
+		return -1;
+}
+
+static int
+idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			   uint16_t nb_desc, unsigned int socket_id,
+			   const struct rte_eth_txconf *tx_conf)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	uint16_t tx_rs_thresh, tx_free_thresh;
+	struct idpf_hw *hw = &adapter->hw;
+	const struct rte_memzone *mz;
+	struct idpf_tx_queue *txq;
+	uint32_t ring_size;
+	uint64_t offloads;
+
+	PMD_INIT_FUNC_TRACE();
+
+	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+	if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+	    nb_desc > IDPF_MAX_RING_DESC ||
+	    nb_desc < IDPF_MIN_RING_DESC) {
+		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+			     nb_desc);
+		return -EINVAL;
+	}
+
+	tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+		tx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);
+	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+		tx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);
+	if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))
+		return -EINVAL;
+
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_idx]) {
+		idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	/* Allocate the TX queue data structure. */
+	txq = rte_zmalloc_socket("idpf txq",
+				 sizeof(struct idpf_tx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (!txq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+		return -ENOMEM;
+	}
+
+	/* TODO: vlan offload */
+
+	txq->nb_tx_desc = nb_desc;
+	txq->rs_thresh = tx_rs_thresh;
+	txq->free_thresh = tx_free_thresh;
+	txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
+	txq->port_id = dev->data->port_id;
+	txq->offloads = offloads;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+	/* Allocate software ring */
+	txq->sw_ring =
+		rte_zmalloc_socket("idpf tx sw ring",
+				   sizeof(struct idpf_tx_entry) * nb_desc,
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (!txq->sw_ring) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+		rte_free(txq);
+		return -ENOMEM;
+	}
+
+	/* Allocate TX hardware ring descriptors. */
+	ring_size = sizeof(struct idpf_flex_tx_desc) * nb_desc;
+	ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+	mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+				      ring_size, IDPF_RING_BASE_ALIGN,
+				      socket_id);
+	if (!mz) {
+		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+		rte_free(txq->sw_ring);
+		rte_free(txq);
+		return -ENOMEM;
+	}
+
+	txq->tx_ring_phys_addr = mz->iova;
+	txq->tx_ring = (struct idpf_flex_tx_desc *)mz->addr;
+
+	txq->mz = mz;
+	reset_single_tx_queue(txq);
+	txq->q_set = true;
+	dev->data->tx_queues[queue_idx] = txq;
+	txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
+			queue_idx * vport->chunks_info.tx_qtail_spacing);
+	txq->ops = &def_txq_ops;
+
+	return 0;
+}
+
+int
+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+		    uint16_t nb_desc, unsigned int socket_id,
+		    const struct rte_eth_txconf *tx_conf)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+		return idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,
+						  socket_id, tx_conf);
+	else
+		return -1;
+}
+
+void
+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+	idpf_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+	idpf_tx_queue_release(dev->data->tx_queues[qid]);
+}
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
new file mode 100644
index 0000000000..69a1fa6348
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -0,0 +1,186 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _IDPF_RXTX_H_
+#define _IDPF_RXTX_H_
+
+#include "idpf_osdep.h"
+#include "idpf_type.h"
+#include "idpf_devids.h"
+#include "idpf_lan_txrx.h"
+#include "idpf_lan_pf_regs.h"
+#include "virtchnl.h"
+#include "virtchnl2.h"
+#include "virtchnl2_lan_desc.h"
+
+#include "idpf_ethdev.h"
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define IDPF_ALIGN_RING_DESC	32
+#define IDPF_MIN_RING_DESC	32
+#define IDPF_MAX_RING_DESC	4096
+#define IDPF_DMA_MEM_ALIGN	4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define IDPF_RING_BASE_ALIGN	128
+
+/* used for Rx Bulk Allocate */
+#define IDPF_RX_MAX_BURST	32
+#define IDPF_TX_MAX_BURST	32
+
+#define IDPF_DEFAULT_RX_FREE_THRESH	32
+
+/* used for Vector PMD */
+#define IDPF_VPMD_RX_MAX_BURST	32
+#define IDPF_VPMD_TX_MAX_BURST	32
+#define IDPF_VPMD_DESCS_PER_LOOP	4
+#define IDPF_RXQ_REARM_THRESH	64
+
+#define IDPF_DEFAULT_TX_RS_THRESH	32
+#define IDPF_DEFAULT_TX_FREE_THRESH	32
+
+#define IDPF_MIN_TSO_MSS	88
+#define IDPF_MAX_TSO_MSS	9728
+#define IDPF_MAX_TSO_FRAME_SIZE	262143
+#define IDPF_TX_MAX_MTU_SEG     10
+
+#define IDPF_TX_OFFLOAD_NOTSUP_MASK \
+		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
+
+struct idpf_rx_queue {
+	struct idpf_adapter *adapter;	/* the adapter this queue belongs to */
+	struct rte_mempool *mp;		/* mbuf pool to populate Rx ring */
+	const struct rte_memzone *mz;	/* memzone for Rx ring */
+	volatile void *rx_ring;
+	struct rte_mbuf **sw_ring;	/* address of SW ring */
+	uint64_t rx_ring_phys_addr;	/* Rx ring DMA address */
+
+	uint16_t nb_rx_desc;		/* ring length */
+	uint16_t rx_tail;		/* current value of tail */
+	volatile uint8_t *qrx_tail;	/* register address of tail */
+	uint16_t rx_free_thresh;	/* max free RX desc to hold */
+	uint16_t nb_rx_hold;		/* number of held free RX desc */
+	struct rte_mbuf *pkt_first_seg;	/* first segment of current packet */
+	struct rte_mbuf *pkt_last_seg;	/* last segment of current packet */
+	struct rte_mbuf fake_mbuf;	/* dummy mbuf */
+
+	/* used for VPMD */
+	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
+	uint16_t rxrearm_start;    /* the idx we start the re-arming from */
+	uint64_t mbuf_initializer; /* value to init mbufs */
+
+	/* for rx bulk */
+	uint16_t rx_nb_avail;      /* number of staged packets ready */
+	uint16_t rx_next_avail;    /* index of next staged packets */
+	uint16_t rx_free_trigger;  /* triggers rx buffer allocation */
+	struct rte_mbuf *rx_stage[IDPF_RX_MAX_BURST * 2]; /* store mbuf */
+
+	uint16_t port_id;	/* device port ID */
+	uint16_t queue_id;      /* Rx queue index */
+	uint16_t rx_buf_len;    /* The packet buffer size */
+	uint16_t rx_hdr_len;    /* The header buffer size */
+	uint16_t max_pkt_len;   /* Maximum packet length */
+	uint8_t rxdid;
+
+	bool q_set;		/* if rx queue has been configured */
+	bool q_started;		/* if rx queue has been started */
+	bool rx_deferred_start;	/* don't start this queue in dev start */
+	const struct idpf_rxq_ops *ops;
+
+	/* only valid for split queue mode */
+	uint8_t expected_gen_id;
+	struct idpf_rx_queue *bufq1;
+	struct idpf_rx_queue *bufq2;
+
+	uint64_t offloads;
+	uint32_t hw_register_set;
+};
+
+struct idpf_tx_entry {
+	struct rte_mbuf *mbuf;
+	uint16_t next_id;
+	uint16_t last_id;
+};
+
+struct idpf_tx_vec_entry {
+	struct rte_mbuf *mbuf;
+};
+
+/* Structure associated with each TX queue. */
+struct idpf_tx_queue {
+	const struct rte_memzone *mz;		/* memzone for Tx ring */
+	volatile struct idpf_flex_tx_desc *tx_ring;	/* Tx ring virtual address */
+	volatile union {
+		struct idpf_flex_tx_sched_desc *desc_ring;
+		struct idpf_splitq_tx_compl_desc *compl_ring;
+	};
+	uint64_t tx_ring_phys_addr;		/* Tx ring DMA address */
+	struct idpf_tx_entry *sw_ring;		/* address array of SW ring */
+
+	uint16_t nb_tx_desc;		/* ring length */
+	uint16_t tx_tail;		/* current value of tail */
+	volatile uint8_t *qtx_tail;	/* register address of tail */
+	/* number of used desc since RS bit set */
+	uint16_t nb_used;
+	uint16_t nb_free;
+	uint16_t last_desc_cleaned;	/* last desc have been cleaned*/
+	uint16_t free_thresh;
+	uint16_t rs_thresh;
+
+	uint16_t port_id;
+	uint16_t queue_id;
+	uint64_t offloads;
+	uint16_t next_dd;	/* next to set RS, for VPMD */
+	uint16_t next_rs;	/* next to check DD,  for VPMD */
+
+	bool q_set;		/* if tx queue has been configured */
+	bool q_started;		/* if tx queue has been started */
+	bool tx_deferred_start;	/* don't start this queue in dev start */
+	const struct idpf_txq_ops *ops;
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1       BIT(0)
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2       BIT(1)
+	uint8_t vlan_flag;
+
+	/* only valid for split queue mode */
+	uint16_t sw_nb_desc;
+	uint16_t sw_tail;
+	void **txqs;
+	uint32_t tx_start_qid;
+	uint8_t expected_gen_id;
+	struct idpf_tx_queue *complq;
+};
+
+/* Offload features */
+union idpf_tx_offload {
+	uint64_t data;
+	struct {
+		uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+		uint64_t l3_len:9; /* L3 (IP) Header Length. */
+		uint64_t l4_len:8; /* L4 Header Length. */
+		uint64_t tso_segsz:16; /* TCP TSO segment size */
+		/* uint64_t unused : 24; */
+	};
+};
+
+struct idpf_rxq_ops {
+	void (*release_mbufs)(struct idpf_rx_queue *rxq);
+};
+
+struct idpf_txq_ops {
+	void (*release_mbufs)(struct idpf_tx_queue *txq);
+};
+
+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			uint16_t nb_desc, unsigned int socket_id,
+			const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mp);
+int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			uint16_t nb_desc, unsigned int socket_id,
+			const struct rte_eth_txconf *tx_conf);
+int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+#endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index ef0288ff45..88cda54a26 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -21,6 +21,7 @@ 
 #include <rte_dev.h>
 
 #include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
 
 #include "idpf_prototype.h"
 
@@ -469,6 +470,256 @@  idpf_vc_destroy_vport(struct idpf_vport *vport)
 	return err;
 }
 
+#define IDPF_RX_BUF_STRIDE		64
+int
+idpf_vc_config_rxqs(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_rx_queue **rxq =
+		(struct idpf_rx_queue **)vport->dev_data->rx_queues;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t total_qs, num_qs;
+	int size, i;
+	int err = 0;
+	int k = 0;
+
+	total_qs = vport->num_rx_q;
+	while (total_qs) {
+		if (total_qs > adapter->max_rxq_per_msg) {
+			num_qs = adapter->max_rxq_per_msg;
+			total_qs -= adapter->max_rxq_per_msg;
+		} else {
+			num_qs = total_qs;
+			total_qs = 0;
+		}
+
+		size = sizeof(*vc_rxqs) + (num_qs - 1) *
+			sizeof(struct virtchnl2_rxq_info);
+		vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+		if (vc_rxqs == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+			err = -ENOMEM;
+			break;
+		}
+		vc_rxqs->vport_id = vport->vport_id;
+		vc_rxqs->num_qinfo = num_qs;
+		if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+			for (i = 0; i < num_qs; i++, k++) {
+				rxq_info = &vc_rxqs->qinfo[i];
+				rxq_info->dma_ring_addr = rxq[k]->rx_ring_phys_addr;
+				rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+				rxq_info->queue_id = rxq[k]->queue_id;
+				rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+				rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+				rxq_info->max_pkt_size = vport->max_pkt_len;
+
+				rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+				rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+				rxq_info->ring_len = rxq[k]->nb_rx_desc;
+			}
+		} else {
+			return -1;
+		}
+		memset(&args, 0, sizeof(args));
+		args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+		args.in_args = (uint8_t *)vc_rxqs;
+		args.in_args_size = size;
+		args.out_buffer = adapter->mbx_resp;
+		args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+		err = idpf_execute_vc_cmd(adapter, &args);
+		rte_free(vc_rxqs);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+			break;
+		}
+	}
+
+	return err;
+}
+
+int
+idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_rx_queue **rxq =
+		(struct idpf_rx_queue **)vport->dev_data->rx_queues;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+		num_qs = IDPF_RXQ_PER_GRP;
+	else
+		num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
+
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (vc_rxqs == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		rxq_info = &vc_rxqs->qinfo[0];
+		rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+		rxq_info->queue_id = rxq[rxq_id]->queue_id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
+		rxq_info->max_pkt_size = vport->max_pkt_len;
+
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+		rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
+	}  else {
+		return -1;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+idpf_vc_config_txqs(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_tx_queue **txq =
+		(struct idpf_tx_queue **)vport->dev_data->tx_queues;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t total_qs, num_qs;
+	int size, i;
+	int err = 0;
+	int k = 0;
+
+	total_qs = vport->num_tx_q;
+	while (total_qs) {
+		if (total_qs > adapter->max_txq_per_msg) {
+			num_qs = adapter->max_txq_per_msg;
+			total_qs -= adapter->max_txq_per_msg;
+		} else {
+			num_qs = total_qs;
+			total_qs = 0;
+		}
+		size = sizeof(*vc_txqs) + (num_qs - 1) *
+			sizeof(struct virtchnl2_txq_info);
+		vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+		if (vc_txqs == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+			err = -ENOMEM;
+			break;
+		}
+		vc_txqs->vport_id = vport->vport_id;
+		vc_txqs->num_qinfo = num_qs;
+		if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+			for (i = 0; i < num_qs; i++, k++) {
+				txq_info = &vc_txqs->qinfo[i];
+				txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
+				txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+				txq_info->queue_id = txq[k]->queue_id;
+				txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+				txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+				txq_info->ring_len = txq[k]->nb_tx_desc;
+			}
+		} else {
+			return -1;
+		}
+
+		memset(&args, 0, sizeof(args));
+		args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+		args.in_args = (uint8_t *)vc_txqs;
+		args.in_args_size = size;
+		args.out_buffer = adapter->mbx_resp;
+		args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+		err = idpf_execute_vc_cmd(adapter, &args);
+		rte_free(vc_txqs);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+			break;
+		}
+	}
+
+	return err;
+}
+
+int
+idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_tx_queue **txq =
+		(struct idpf_tx_queue **)vport->dev_data->tx_queues;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err;
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+		num_qs = IDPF_TXQ_PER_GRP;
+	else
+		num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
+
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (vc_txqs == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		txq_info = &vc_txqs->qinfo[0];
+		txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+		txq_info->queue_id = txq[txq_id]->queue_id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = txq[txq_id]->nb_tx_desc;
+	} else {
+		return -1;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
+
 int
 idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
 {
diff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build
index bf8bf58ef5..832f887296 100644
--- a/drivers/net/idpf/meson.build
+++ b/drivers/net/idpf/meson.build
@@ -12,5 +12,6 @@  deps += ['common_idpf', 'security', 'cryptodev']
 
 sources = files(
     'idpf_ethdev.c',
+    'idpf_rxtx.c',
     'idpf_vchnl.c',
 )