[07/19] net/cpfl: create port representor

Message ID 20230809155134.539287-8-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series net/cpfl: support port representor |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Xing, Beilei Aug. 9, 2023, 3:51 p.m. UTC
  From: Beilei Xing <beilei.xing@intel.com>

Track representor request in a whitelist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 107 ++++---
 drivers/net/cpfl/cpfl_ethdev.h      |  34 +++
 drivers/net/cpfl/cpfl_representor.c | 448 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 573 insertions(+), 43 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 09015fbb08..949a2c8069 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1898,6 +1898,42 @@  cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_whitelist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_wl", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_whitelist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_whitelist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr whitelist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_whitelist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_whitelist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1928,6 +1964,12 @@  cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_whitelist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor whitelist");
+		goto err_repr_whitelist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@  cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_whitelist_uninit(adapter);
+err_repr_whitelist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@  cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2304,6 +2306,7 @@  cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2343,11 +2346,23 @@  cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2374,6 +2389,12 @@  cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 9c4d8d3ea1..1f5c3a39b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@ 
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,11 +61,32 @@ 
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
 #define CPFL_HOST_ID_HOST	0
 #define CPFL_HOST_ID_ACC	1
 #define CPFL_PF_TYPE_APF	0
 #define CPFL_PF_TYPE_CPF	1
 
+/* Function IDs on IMC side */
+#define HOST0_APF	0
+#define HOST1_APF	1
+#define HOST2_APF	2
+#define HOST3_APF	3
+#define ACC_APF_ID	4
+#define IMC_APF_ID	5
+#define HOST0_NVME_ID	6
+#define ACC_NVME_ID	7
+#define HOST0_CPF_ID	8
+#define HOST1_CPF_ID	9
+#define HOST2_CPF_ID	10
+#define HOST3_CPF_ID	11
+#define ACC_CPF_ID	12
+#define IMC_IPF_ID	13
+#define ATE_CPF_ID	14
+#define ACC_LCE_ID	15
+#define IMC_MBX_EFD_ID	0
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -136,6 +158,13 @@  struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -153,6 +182,9 @@  struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_whitelist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -163,6 +195,8 @@  TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..4d91d7311d
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,448 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_whitelist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_whitelist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_whitelist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_whitelist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_whitelist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_whitelist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to whitelist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_whitelist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to whitelist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_whitelist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_whitelist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from whitelist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_whitelist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport_info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+	/* bit[15:14] type
+	 * bit[13] xeon/acc
+	 * bit[12] apf/cpf
+	 * bit[11:0] vf
+	 */
+	eth_dev->data->representor_id =
+		(uint16_t)(repr->repr_id.type << 14 |
+			   repr->repr_id.host_id << 13 |
+			   repr->repr_id.pf_id << 12 |
+			   repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_whitelist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+		      struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == 0) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == 1) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_whitelist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		bool matched;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* find a matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		matched = false;
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!match_repr_with_vport(repr_id, &vi->vport_info))
+				continue;
+
+			matched = true;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				rte_spinlock_unlock(&adapter->repr_lock);
+				return ret;
+			}
+			break;
+		}
+
+		/* warning if no match vport detected */
+		if (!matched)
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s "
+					      "creation will be deferred when vport is detected",
+					      name);
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..1d963e5fd1 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@  deps += ['hash', 'common_idpf']
 sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'