[2/9] net/cnxk: probing representor ports

Message ID 20230811163419.165790-3-hkalra@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Jerin Jacob
Headers
Series net/cnxk: support for port representors |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Harman Kalra Aug. 11, 2023, 4:34 p.m. UTC
  Basic skeleton for probing representor devices. If PF device is
passed with "representor" devargs, representor ports gets probed
as a separate ethdev device.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 doc/guides/nics/cnxk.rst        |  39 +++++
 drivers/net/cnxk/cn10k_ethdev.c |   4 +-
 drivers/net/cnxk/cn9k_ethdev.c  |   4 +-
 drivers/net/cnxk/cnxk_ethdev.c  |  42 ++++-
 drivers/net/cnxk/cnxk_ethdev.h  |  12 ++
 drivers/net/cnxk/cnxk_rep.c     | 262 ++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_rep.h     |  51 +++++++
 drivers/net/cnxk/cnxk_rep_ops.c | 112 ++++++++++++++
 drivers/net/cnxk/meson.build    |   2 +
 9 files changed, 516 insertions(+), 12 deletions(-)
 create mode 100644 drivers/net/cnxk/cnxk_rep.c
 create mode 100644 drivers/net/cnxk/cnxk_rep.h
 create mode 100644 drivers/net/cnxk/cnxk_rep_ops.c
  

Patch

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 9229056f6f..dd14102efa 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -37,6 +37,8 @@  Features of the CNXK Ethdev PMD are:
 - Inline IPsec processing support
 - Ingress meter support
 - Queue based priority flow control support
+- Virtual function representors
+- Represented port pattern matching and action
 
 Prerequisites
 -------------
@@ -581,6 +583,41 @@  Runtime Config Options for inline device
    With the above configuration, driver would poll for soft expiry events every
    1000 usec.
 
+Virtual Function Representors
+-----------------------------
+
+The CNXK driver supports port representor model by adding virtual ethernet
+ports providing a logical representation in DPDK for SR-IOV virtual function
+(VF) devices for control and monitoring.
+
+These port representor ethdev instances can be spawned on an as needed basis
+through configuration parameters passed to the driver of the underlying
+base device using devargs ``-a pci:dbdf,representor=[0]``
+
+.. note::
+
+   Base device is the PF whose VFs will be represented by these representors
+
+   Above devarg parameters can be provided as a range of representor device
+   ``-a pci:dbdf,representor=[0-3]`` or a single representor device on need
+   basis ``-a pci:dbdf,representor=[0]``
+
+In case of exception path (i.e. until the flow definition is offloaded to the
+hardware), packets transmitted by the VFs shall be received by these
+representor port, while packets transmitted by representor ports shall be
+received by respective VFs.
+
+On receiving the VF traffic via these representor ports, applications holding
+these representor ports can decide to offload the traffic flow into the HW.
+Henceforth the matching traffic shall be directly steered to the respective
+VFs without being received by the application.
+
+Current virtual representor port PMD supports following operations:
+
+- Get and clear VF statistics
+- Set mac address
+- Flow operations - create, validate, destroy, query, flush, dump
+
 Debugging Options
 -----------------
 
@@ -595,3 +632,5 @@  Debugging Options
    +---+------------+-------------------------------------------------------+
    | 2 | NPC        | --log-level='pmd\.net.cnxk\.flow,8'                   |
    +---+------------+-------------------------------------------------------+
+   | 3 | REP        | --log-level='pmd\.net.cnxk\.rep,8'                   |
+   +---+------------+-------------------------------------------------------+
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 4c4acc7cf0..a6a4665af1 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -912,8 +912,8 @@  static const struct rte_pci_id cn10k_pci_nix_map[] = {
 
 static struct rte_pci_driver cn10k_pci_nix = {
 	.id_table = cn10k_pci_nix_map,
-	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
-		     RTE_PCI_DRV_INTR_LSC,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | RTE_PCI_DRV_INTR_LSC |
+		     RTE_PCI_DRV_PROBE_AGAIN,
 	.probe = cn10k_nix_probe,
 	.remove = cn10k_nix_remove,
 };
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index bae4dda5e2..0448d7e219 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -834,8 +834,8 @@  static const struct rte_pci_id cn9k_pci_nix_map[] = {
 
 static struct rte_pci_driver cn9k_pci_nix = {
 	.id_table = cn9k_pci_nix_map,
-	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
-		     RTE_PCI_DRV_INTR_LSC,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | RTE_PCI_DRV_INTR_LSC |
+		     RTE_PCI_DRV_PROBE_AGAIN,
 	.probe = cn9k_nix_probe,
 	.remove = cn9k_nix_remove,
 };
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 4b98faa729..902e6df72d 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -2102,6 +2102,10 @@  cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 	rte_free(eth_dev->data->mac_addrs);
 	eth_dev->data->mac_addrs = NULL;
 
+	/* Remove representor devices associated with PF */
+	if (dev->num_reps)
+		cnxk_rep_dev_remove(eth_dev);
+
 	rc = roc_nix_dev_fini(nix);
 	/* Can be freed later by PMD if NPA LF is in use */
 	if (rc == -EAGAIN) {
@@ -2180,18 +2184,40 @@  cnxk_nix_remove(struct rte_pci_device *pci_dev)
 int
 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 {
-	int rc;
+	struct rte_eth_devargs eth_da = {.nb_representor_ports = 0};
+	struct rte_eth_dev *pf_ethdev;
+	uint16_t num_rep;
+	int rc = 0;
 
 	RTE_SET_USED(pci_drv);
 
-	rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
-					   cnxk_eth_dev_init);
+	if (pci_dev->device.devargs) {
+		rc = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
+		if (rc)
+			return rc;
+	}
+
+	num_rep = eth_da.nb_representor_ports;
+	plt_rep_dbg("nb_representor_ports = %d\n", num_rep);
 
-	/* On error on secondary, recheck if port exists in primary or
-	 * in mid of detach state.
+	/* This probing API may get invoked even after first level of probe is
+	 * done, as part of an application bringup(OVS-DPDK vswitchd), checking
+	 * if eth_dev is allocated for the PF device
 	 */
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
-		if (!rte_eth_dev_allocated(pci_dev->device.name))
-			return 0;
+	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+	if (pf_ethdev == NULL) {
+		rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
+						   cnxk_eth_dev_init);
+		if (rc || !num_rep)
+			return rc;
+
+		pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+	}
+
+	if (!num_rep)
+		return rc;
+
+	rc = cnxk_rep_dev_probe(pci_dev, pf_ethdev, &eth_da);
+
 	return rc;
 }
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index ed531fb277..3896db38e1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -22,7 +22,9 @@ 
 #include <rte_tm_driver.h>
 
 #include "roc_api.h"
+
 #include <cnxk_ethdev_dp.h>
+#include <cnxk_rep.h>
 
 #define CNXK_ETH_DEV_PMD_VERSION "1.0"
 
@@ -307,6 +309,10 @@  struct cnxk_macsec_sess {
 };
 TAILQ_HEAD(cnxk_macsec_sess_list, cnxk_macsec_sess);
 
+struct cnxk_rep_info {
+	struct rte_eth_dev *rep_eth_dev;
+};
+
 struct cnxk_eth_dev {
 	/* ROC NIX */
 	struct roc_nix nix;
@@ -414,6 +420,12 @@  struct cnxk_eth_dev {
 	/* MCS device */
 	struct cnxk_mcs_dev *mcs_dev;
 	struct cnxk_macsec_sess_list mcs_list;
+
+	/* Port representor fields */
+	uint16_t switch_domain_id;
+	uint16_t num_reps;
+	uint16_t rep_xport_vdev;
+	struct cnxk_rep_info *rep_info;
 };
 
 struct cnxk_eth_rxq_sp {
diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
new file mode 100644
index 0000000000..ebefc34ac8
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -0,0 +1,262 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+#include <cnxk_rep.h>
+
+/* CNXK platform representor dev ops */
+struct eth_dev_ops cnxk_rep_dev_ops = {
+	.dev_infos_get = cnxk_rep_dev_info_get,
+	.dev_configure = cnxk_rep_dev_configure,
+	.dev_start = cnxk_rep_dev_start,
+	.rx_queue_setup = cnxk_rep_rx_queue_setup,
+	.rx_queue_release = cnxk_rep_rx_queue_release,
+	.tx_queue_setup = cnxk_rep_tx_queue_setup,
+	.tx_queue_release = cnxk_rep_tx_queue_release,
+	.link_update = cnxk_rep_link_update,
+	.dev_close = cnxk_rep_dev_close,
+	.dev_stop = cnxk_rep_dev_stop,
+	.stats_get = cnxk_rep_stats_get,
+	.stats_reset = cnxk_rep_stats_reset,
+	.flow_ops_get = cnxk_rep_flow_ops_get
+};
+
+int
+cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
+{
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	plt_rep_dbg("Representor port:%d uninit", ethdev->data->port_id);
+	rte_free(ethdev->data->mac_addrs);
+	ethdev->data->mac_addrs = NULL;
+
+	return 0;
+}
+
+int
+cnxk_rep_dev_remove(struct rte_eth_dev *pf_ethdev)
+{
+	struct cnxk_eth_dev *pf_dev = cnxk_eth_pmd_priv(pf_ethdev);
+	int rc = 0;
+
+	rc = rte_eth_switch_domain_free(pf_dev->switch_domain_id);
+	if (rc)
+		plt_err("Failed to alloc switch domain: %d", rc);
+
+	return rc;
+}
+
+static int
+hotplug_rep_xport_vdev(struct cnxk_eth_dev *pf_dev)
+{
+	char rep_xport_devargs[] = CNXK_REP_XPORT_VDEV_DEVARGS;
+	char name[] = CNXK_REP_XPORT_VDEV_NAME;
+	uint16_t portid;
+	int rc = 0;
+
+	rc = rte_eth_dev_get_port_by_name(name, &portid);
+	if (rc != 0) {
+		if (rc == -ENODEV) {
+			/* rep_xport device should get added once during first PF probe */
+			rc = rte_eal_hotplug_add("vdev", name, rep_xport_devargs);
+			if (rc) {
+				plt_err("rep base hotplug failed %d", -rte_errno);
+				goto fail;
+			}
+
+			/* Get the portID of rep_xport port */
+			if (rte_eth_dev_get_port_by_name(name, &portid)) {
+				plt_err("cannot find added vdev %s", name);
+				goto free;
+			}
+		} else {
+			plt_err("cannot find added vdev %s", name);
+			goto free;
+		}
+	}
+
+	plt_rep_dbg("rep_xport vdev port %d, name %s", portid, name);
+	pf_dev->rep_xport_vdev = portid;
+
+	return 0;
+free:
+	rte_eal_hotplug_remove("vdev", name);
+fail:
+	return rc;
+}
+
+static int
+cnxk_init_rep_internal(struct cnxk_eth_dev *pf_dev)
+{
+	int rc;
+
+	if (pf_dev->rep_info)
+		return 0;
+
+	pf_dev->rep_info =
+		plt_zmalloc(sizeof(pf_dev->rep_info[0]) * CNXK_MAX_REP_PORTS, 0);
+	if (!pf_dev->rep_info) {
+		plt_err("Failed to alloc memory for rep info");
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	/* Allocate switch domain for this PF */
+	rc = rte_eth_switch_domain_alloc(&pf_dev->switch_domain_id);
+	if (rc) {
+		plt_err("Failed to alloc switch domain: %d", rc);
+		goto fail;
+	}
+
+	rc = hotplug_rep_xport_vdev(pf_dev);
+	if (rc) {
+		plt_err("Failed to hotplug representor base port, err %d", rc);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return rc;
+}
+
+static uint16_t
+cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	PLT_SET_USED(tx_queue);
+	PLT_SET_USED(tx_pkts);
+	PLT_SET_USED(nb_pkts);
+
+	return 0;
+}
+
+static uint16_t
+cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	PLT_SET_USED(rx_queue);
+	PLT_SET_USED(rx_pkts);
+	PLT_SET_USED(nb_pkts);
+
+	return 0;
+}
+
+static int
+cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
+{
+	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+	struct cnxk_rep_dev *rep_params = (struct cnxk_rep_dev *)params;
+	struct rte_eth_link *link;
+	struct cnxk_eth_dev *pf_dev;
+
+	rep_dev->vf_id = rep_params->vf_id;
+	rep_dev->switch_domain_id = rep_params->switch_domain_id;
+	rep_dev->parent_dev = rep_params->parent_dev;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+	eth_dev->data->representor_id = rep_params->vf_id;
+	eth_dev->data->backer_port_id = rep_params->parent_dev->data->port_id;
+
+	eth_dev->data->mac_addrs = plt_zmalloc(RTE_ETHER_ADDR_LEN, 0);
+	if (!eth_dev->data->mac_addrs) {
+		plt_err("Failed to allocate memory for mac addr");
+		return -ENOMEM;
+	}
+
+	rte_eth_random_addr(rep_dev->mac_addr);
+	memcpy(eth_dev->data->mac_addrs, rep_dev->mac_addr, RTE_ETHER_ADDR_LEN);
+
+	/* Set the device operations */
+	eth_dev->dev_ops = &cnxk_rep_dev_ops;
+
+	/* Rx/Tx functions stubs to avoid crashing */
+	eth_dev->rx_pkt_burst = cnxk_rep_rx_burst;
+	eth_dev->tx_pkt_burst = cnxk_rep_tx_burst;
+
+	/* Link state. Inherited from PF */
+	pf_dev = cnxk_eth_pmd_priv(rep_dev->parent_dev);
+	link = &pf_dev->eth_dev->data->dev_link;
+
+	eth_dev->data->dev_link.link_speed = link->link_speed;
+	eth_dev->data->dev_link.link_duplex = link->link_duplex;
+	eth_dev->data->dev_link.link_status = link->link_status;
+	eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
+
+	return 0;
+}
+
+int
+cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *pf_ethdev,
+		   struct rte_eth_devargs *eth_da)
+{
+	char name[RTE_ETH_NAME_MAX_LEN];
+	struct rte_eth_dev *rep_eth_dev;
+	struct cnxk_eth_dev *pf_dev;
+	uint16_t num_rep;
+	int i, rc;
+
+	/* Get the PF device */
+	pf_dev = cnxk_eth_pmd_priv(pf_ethdev);
+
+	/* Check the representor devargs */
+	if (eth_da->type == RTE_ETH_REPRESENTOR_NONE)
+		return 0;
+	if (eth_da->type != RTE_ETH_REPRESENTOR_VF) {
+		plt_err("unsupported representor type %d\n", eth_da->type);
+		return -ENOTSUP;
+	}
+	num_rep = eth_da->nb_representor_ports;
+	if (num_rep > CNXK_MAX_REP_PORTS) {
+		plt_err("nb_representor_ports = %d > %d MAX VF REPS\n", num_rep,
+			CNXK_MAX_REP_PORTS);
+		return -EINVAL;
+	}
+
+	if (num_rep >= RTE_MAX_ETHPORTS) {
+		plt_err("nb_representor_ports = %d > %d MAX ETHPORTS\n", num_rep, RTE_MAX_ETHPORTS);
+		return -EINVAL;
+	}
+
+	/* Initialize the internals of representor ports */
+	if (cnxk_init_rep_internal(pf_dev))
+		return 0;
+
+	for (i = 0; i < num_rep; i++) {
+		struct cnxk_rep_dev representor = {.vf_id = eth_da->representor_ports[i],
+						   .switch_domain_id = pf_dev->switch_domain_id,
+						   .parent_dev = pf_ethdev};
+
+		if (representor.vf_id >= pci_dev->max_vfs) {
+			plt_err("VF-Rep id %d >= %d pci dev max vfs\n", representor.vf_id,
+				pci_dev->max_vfs);
+			continue;
+		}
+
+		/* Representor port net_bdf_port */
+		snprintf(name, sizeof(name), "net_%s_representor_%d", pci_dev->device.name,
+			 eth_da->representor_ports[i]);
+
+		rc = rte_eth_dev_create(&pci_dev->device, name, sizeof(struct cnxk_rep_dev), NULL,
+					NULL, cnxk_rep_dev_init, &representor);
+		if (rc) {
+			plt_err("failed to create cnxk vf representor %s", name);
+			rc = -EINVAL;
+			goto err;
+		}
+
+		rep_eth_dev = rte_eth_dev_allocated(name);
+		if (!rep_eth_dev) {
+			plt_err("Failed to find the eth_dev for VF-Rep: %s.", name);
+			rc = -ENODEV;
+			goto err;
+		}
+
+		plt_rep_dbg("PF portid %d switch domain %d representor portid %d (%s) probe done",
+			    pf_ethdev->data->port_id, pf_dev->switch_domain_id,
+			    rep_eth_dev->data->port_id, name);
+		pf_dev->rep_info[representor.vf_id].rep_eth_dev = rep_eth_dev;
+		pf_dev->num_reps++;
+	}
+
+	return 0;
+err:
+	return rc;
+}
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
new file mode 100644
index 0000000000..24adb9649b
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -0,0 +1,51 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+#include <cnxk_ethdev.h>
+
+#ifndef __CNXK_REP_H__
+#define __CNXK_REP_H__
+
+#define CNXK_REP_XPORT_VDEV_DEVARGS "role=server"
+#define CNXK_REP_XPORT_VDEV_NAME	   "net_memif"
+#define CNXK_MAX_REP_PORTS	   128
+
+/* Common ethdev ops */
+extern struct eth_dev_ops cnxk_rep_dev_ops;
+
+struct cnxk_rep_dev {
+	uint16_t vf_id;
+	uint16_t switch_domain_id;
+	struct rte_eth_dev *parent_dev;
+	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+static inline struct cnxk_rep_dev *
+cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)
+{
+	return eth_dev->data->dev_private;
+}
+
+int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *pf_ethdev,
+		       struct rte_eth_devargs *eth_da);
+int cnxk_rep_dev_remove(struct rte_eth_dev *pf_ethdev);
+int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev);
+int cnxk_rep_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info);
+int cnxk_rep_dev_configure(struct rte_eth_dev *eth_dev);
+
+int cnxk_rep_link_update(struct rte_eth_dev *eth_dev, int wait_to_compl);
+int cnxk_rep_dev_start(struct rte_eth_dev *eth_dev);
+int cnxk_rep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc,
+			    unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
+			    struct rte_mempool *mp);
+int cnxk_rep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc,
+			    unsigned int socket_id, const struct rte_eth_txconf *tx_conf);
+void cnxk_rep_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
+void cnxk_rep_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
+int cnxk_rep_dev_stop(struct rte_eth_dev *eth_dev);
+int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev);
+int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats);
+int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);
+int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops);
+
+#endif /* __CNXK_REP_H__ */
diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c
new file mode 100644
index 0000000000..3f1aab077b
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_rep_ops.c
@@ -0,0 +1,112 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include <cnxk_rep.h>
+
+int
+cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(wait_to_complete);
+	return 0;
+}
+
+int
+cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(devinfo);
+	return 0;
+}
+
+int
+cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
+{
+	PLT_SET_USED(ethdev);
+	return 0;
+}
+
+int
+cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
+{
+	PLT_SET_USED(ethdev);
+	return 0;
+}
+
+int
+cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
+{
+	PLT_SET_USED(ethdev);
+	return 0;
+}
+
+int
+cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
+{
+	PLT_SET_USED(ethdev);
+	return 0;
+}
+
+int
+cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16_t nb_rx_desc,
+			unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mb_pool)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(rx_queue_id);
+	PLT_SET_USED(nb_rx_desc);
+	PLT_SET_USED(socket_id);
+	PLT_SET_USED(rx_conf);
+	PLT_SET_USED(mb_pool);
+	return 0;
+}
+
+void
+cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(queue_id);
+}
+
+int
+cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc,
+			unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(tx_queue_id);
+	PLT_SET_USED(nb_tx_desc);
+	PLT_SET_USED(socket_id);
+	PLT_SET_USED(tx_conf);
+	return 0;
+}
+
+void
+cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(queue_id);
+}
+
+int
+cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(stats);
+	return 0;
+}
+
+int
+cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)
+{
+	PLT_SET_USED(ethdev);
+	return 0;
+}
+
+int
+cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops)
+{
+	PLT_SET_USED(ethdev);
+	PLT_SET_USED(ops);
+	return 0;
+}
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index e83f3c9050..38dde54ce9 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -32,6 +32,8 @@  sources = files(
         'cnxk_lookup.c',
         'cnxk_ptp.c',
         'cnxk_flow.c',
+        'cnxk_rep.c',
+        'cnxk_rep_ops.c',
         'cnxk_stats.c',
         'cnxk_tm.c',
 )