[v4,09/10] net/cpfl: create port representor

Message ID 20230908111701.1022724-10-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series net/cpfl: support port representor |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Xing, Beilei Sept. 8, 2023, 11:17 a.m. UTC
  From: Beilei Xing <beilei.xing@intel.com>

Track representor request in the allowlist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c      | 109 +++---
 drivers/net/cpfl/cpfl_ethdev.h      |  36 ++
 drivers/net/cpfl/cpfl_representor.c | 586 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build        |   1 +
 5 files changed, 714 insertions(+), 44 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
  

Comments

Jingjing Wu Sept. 9, 2023, 3:04 a.m. UTC | #1
> +		/* warning if no match vport detected */
> +		if (!matched)
> +			PMD_INIT_LOG(WARNING, "No matched vport for
> representor %s "
> +					      "creation will be deferred when
> vport is detected",
> +					      name);
> +
If vport info is responded successfully, what is the case that matched is false? And I did not find the defer process.
> +		rte_spinlock_unlock(&adapter->vport_map_lock);
> +	}
> +
> +err:
> +	rte_spinlock_unlock(&adapter->repr_lock);
> +	rte_free(vlist_resp);
> +	return ret;
> +}
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 236347eeb3..330a865e3c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1643,7 +1643,7 @@  cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 	}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 		       struct cpfl_vport_id *vport_identity,
 		       struct cpchnl2_vport_info *vport_info)
@@ -1896,6 +1896,42 @@  cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
 	rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+	char hname[32];
+
+	snprintf(hname, 32, "%s-repr_wl", adapter->name);
+
+	rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+	struct rte_hash_parameters params = {
+		.name = hname,
+		.entries = CPFL_REPR_HASH_ENTRY_NUM,
+		.key_len = sizeof(struct cpfl_repr_id),
+		.hash_func = rte_hash_crc,
+		.socket_id = SOCKET_ID_ANY,
+	};
+
+	adapter->repr_allowlist_hash = rte_hash_create(&params);
+
+	if (adapter->repr_allowlist_hash == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+	rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -1926,6 +1962,12 @@  cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vport_map_init;
 	}
 
+	ret = cpfl_repr_allowlist_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+		goto err_repr_allowlist_init;
+	}
+
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
 	adapter->max_vport_nb = adapter->base.caps.max_vports > CPFL_MAX_VPORT_NUM ?
@@ -1950,6 +1992,8 @@  cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+	cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
 	cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
 	idpf_adapter_deinit(base);
@@ -2225,48 +2269,6 @@  cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
 	return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-	struct cpfl_devargs *devargs = &adapter->devargs;
-	int i, j;
-
-	/* check and refine repr args */
-	for (i = 0; i < devargs->repr_args_num; i++) {
-		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-		/* set default host_id to xeon host */
-		if (eth_da->nb_mh_controllers == 0) {
-			eth_da->nb_mh_controllers = 1;
-			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-		} else {
-			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->mh_controllers[j]);
-					return -EINVAL;
-				}
-			}
-		}
-
-		/* set default pf to APF */
-		if (eth_da->nb_ports == 0) {
-			eth_da->nb_ports = 1;
-			eth_da->ports[0] = CPFL_PF_TYPE_APF;
-		} else {
-			for (j = 0; j < eth_da->nb_ports; j++) {
-				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-						     eth_da->ports[j]);
-					return -EINVAL;
-				}
-			}
-		}
-	}
-
-	return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
 {
@@ -2302,6 +2304,7 @@  cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
 	struct cpfl_adapter_ext *adapter;
 	int retval;
+	uint16_t port_id;
 
 	adapter = rte_zmalloc("cpfl_adapter_ext",
 			      sizeof(struct cpfl_adapter_ext), 0);
@@ -2341,11 +2344,23 @@  cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 	retval = cpfl_repr_devargs_process(adapter);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to process repr devargs");
-		goto err;
+		goto close_ethdev;
 	}
 
+	retval = cpfl_repr_create(pci_dev, adapter);
+	if (retval != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		goto close_ethdev;
+	}
+
+
 	return 0;
 
+close_ethdev:
+	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
+	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+		rte_eth_dev_close(port_id);
+	}
 err:
 	rte_spinlock_lock(&cpfl_adapter_lock);
 	TAILQ_REMOVE(&cpfl_adapter_list, adapter, next);
@@ -2372,6 +2387,12 @@  cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad
 		return ret;
 	}
 
+	ret = cpfl_repr_create(pci_dev, adapter);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to create representors ");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a501ccde14..4937d2c6e3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -21,6 +21,7 @@ 
 
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
+#include "cpfl_representor.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -60,11 +61,31 @@ 
 #define IDPF_DEV_ID_CPF			0x1453
 #define VIRTCHNL2_QUEUE_GROUP_P2P	0x100
 
+#define CPFL_HOST_ID_NUM	2
+#define CPFL_PF_TYPE_NUM	2
 #define CPFL_HOST_ID_HOST	0
 #define CPFL_HOST_ID_ACC	1
 #define CPFL_PF_TYPE_APF	0
 #define CPFL_PF_TYPE_CPF	1
 
+/* Function IDs on IMC side */
+#define CPFL_HOST0_APF		0
+#define CPFL_ACC_APF_ID		4
+#define CPFL_HOST0_CPF_ID	8
+#define CPFL_ACC_CPF_ID		12
+
+#define CPFL_VPORT_LAN_PF	0
+#define CPFL_VPORT_LAN_VF	1
+
+/* bit[15:14] type
+ * bit[13] host/accelerator core
+ * bit[12] apf/cpf
+ * bit[11:0] vf
+ */
+#define CPFL_REPRESENTOR_ID(type, host_id, pf_id, vf_id)	\
+	((((type) & 0x3) << 14) + (((host_id) & 0x1) << 13) +	\
+	 (((pf_id) & 0x1) << 12) + ((vf_id) & 0xfff))
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -136,6 +157,13 @@  struct cpfl_vport {
 	bool p2p_manual_bind;
 };
 
+struct cpfl_repr {
+	struct cpfl_itf itf;
+	struct cpfl_repr_id repr_id;
+	struct rte_ether_addr mac_addr;
+	struct cpfl_vport_info *vport_info;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -153,10 +181,16 @@  struct cpfl_adapter_ext {
 
 	rte_spinlock_t vport_map_lock;
 	struct rte_hash *vport_map_hash;
+
+	rte_spinlock_t repr_lock;
+	struct rte_hash *repr_allowlist_hash;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_vport_id *vport_identity,
+			   struct cpchnl2_vport_info *vport_info);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_list_response *response);
@@ -171,6 +205,8 @@  int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 	container_of((p), struct cpfl_adapter_ext, base)
 #define CPFL_DEV_TO_VPORT(dev)					\
 	((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_REPR(dev)					\
+	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
new file mode 100644
index 0000000000..0cd92b1351
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -0,0 +1,586 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include "cpfl_representor.h"
+#include "cpfl_rxtx.h"
+
+static int
+cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
+			   struct cpfl_repr_id *repr_id,
+			   struct rte_eth_dev *dev)
+{
+	int ret;
+
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) < 0)
+		return -ENOENT;
+
+	ret = rte_hash_add_key_data(adapter->repr_allowlist_hash, repr_id, dev);
+
+	return ret;
+}
+
+static int
+cpfl_repr_allowlist_add(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+	if (rte_hash_lookup(adapter->repr_allowlist_hash, repr_id) >= 0) {
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = rte_hash_add_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0)
+		goto err;
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_devargs_process_one(struct cpfl_adapter_ext *adapter,
+			      struct rte_eth_devargs *eth_da)
+{
+	struct cpfl_repr_id repr_id;
+	int ret, c, p, v;
+
+	for (c = 0; c < eth_da->nb_mh_controllers; c++) {
+		for (p = 0; p < eth_da->nb_ports; p++) {
+			repr_id.type = eth_da->type;
+			if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+				repr_id.host_id = eth_da->mh_controllers[c];
+				repr_id.pf_id = eth_da->ports[p];
+				repr_id.vf_id = 0;
+				ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+				if (ret == -EEXIST)
+					continue;
+				if (ret) {
+					PMD_DRV_LOG(ERR, "Failed to add PF repr to allowlist, "
+							 "host_id = %d, pf_id = %d.",
+						    repr_id.host_id, repr_id.pf_id);
+					return ret;
+				}
+			} else if (eth_da->type == RTE_ETH_REPRESENTOR_VF) {
+				for (v = 0; v < eth_da->nb_representor_ports; v++) {
+					repr_id.host_id = eth_da->mh_controllers[c];
+					repr_id.pf_id = eth_da->ports[p];
+					repr_id.vf_id = eth_da->representor_ports[v];
+					ret = cpfl_repr_allowlist_add(adapter, &repr_id);
+					if (ret == -EEXIST)
+						continue;
+					if (ret) {
+						PMD_DRV_LOG(ERR, "Failed to add VF repr to allowlist, "
+								 "host_id = %d, pf_id = %d, vf_id = %d.",
+							    repr_id.host_id,
+							    repr_id.pf_id,
+							    repr_id.vf_id);
+						return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_devargs *devargs = &adapter->devargs;
+	int ret, i, j;
+
+	/* check and refine repr args */
+	for (i = 0; i < devargs->repr_args_num; i++) {
+		struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
+
+		/* set default host_id to xeon host */
+		if (eth_da->nb_mh_controllers == 0) {
+			eth_da->nb_mh_controllers = 1;
+			eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
+		} else {
+			for (j = 0; j < eth_da->nb_mh_controllers; j++) {
+				if (eth_da->mh_controllers[j] > CPFL_HOST_ID_ACC) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->mh_controllers[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		/* set default pf to APF */
+		if (eth_da->nb_ports == 0) {
+			eth_da->nb_ports = 1;
+			eth_da->ports[0] = CPFL_PF_TYPE_APF;
+		} else {
+			for (j = 0; j < eth_da->nb_ports; j++) {
+				if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
+					PMD_INIT_LOG(ERR, "Invalid Host ID %d",
+						     eth_da->ports[j]);
+					return -EINVAL;
+				}
+			}
+		}
+
+		ret = cpfl_repr_devargs_process_one(adapter, eth_da);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_repr_allowlist_del(struct cpfl_adapter_ext *adapter,
+			struct cpfl_repr_id *repr_id)
+{
+	int ret;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	ret = rte_hash_del_key(adapter->repr_allowlist_hash, repr_id);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Failed to delete repr from allowlist."
+				 "host_id = %d, type = %d, pf_id = %d, vf_id = %d",
+				 repr_id->host_id, repr_id->type,
+				 repr_id->pf_id, repr_id->vf_id);
+		goto err;
+	}
+
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return 0;
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	return ret;
+}
+
+static int
+cpfl_repr_uninit(struct rte_eth_dev *eth_dev)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+
+	eth_dev->data->mac_addrs = NULL;
+
+	cpfl_repr_allowlist_del(adapter, &repr->repr_id);
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_configure(struct rte_eth_dev *dev)
+{
+	/* now only 1 RX queue is supported */
+	if (dev->data->nb_rx_queues > 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_close(struct rte_eth_dev *dev)
+{
+	return cpfl_repr_uninit(dev);
+}
+
+static int
+cpfl_repr_dev_info_get(struct rte_eth_dev *ethdev,
+		       struct rte_eth_dev_info *dev_info)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+
+	dev_info->device = ethdev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = 1;
+	dev_info->max_tx_queues = 1;
+	dev_info->min_rx_bufsize = CPFL_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = CPFL_MAX_FRAME_SIZE;
+
+	dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL;
+
+	dev_info->rx_offload_capa =
+		RTE_ETH_RX_OFFLOAD_VLAN_STRIP		|
+		RTE_ETH_RX_OFFLOAD_QINQ_STRIP		|
+		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_RX_OFFLOAD_SCATTER		|
+		RTE_ETH_RX_OFFLOAD_VLAN_FILTER		|
+		RTE_ETH_RX_OFFLOAD_RSS_HASH		|
+		RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT		|
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT		|
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM		|
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM	|
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS		|
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = CPFL_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
+	dev_info->switch_info.name = ethdev->device->name;
+	dev_info->switch_info.domain_id = 0; /* the same domain*/
+	dev_info->switch_info.port_id = repr->vport_info->vport_info.vsi_id;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_start(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+cpfl_repr_dev_stop(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+static int
+cpfl_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *conf,
+			 __rte_unused struct rte_mempool *pool)
+{
+	/* Dummy */
+	return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+			 __rte_unused uint16_t queue_id,
+			 __rte_unused uint16_t nb_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_txconf *conf)
+{
+	/* Dummy */
+	return 0;
+}
+
+static const struct eth_dev_ops cpfl_repr_dev_ops = {
+	.dev_start		= cpfl_repr_dev_start,
+	.dev_stop		= cpfl_repr_dev_stop,
+	.dev_configure		= cpfl_repr_dev_configure,
+	.dev_close		= cpfl_repr_dev_close,
+	.dev_infos_get		= cpfl_repr_dev_info_get,
+
+	.rx_queue_setup		= cpfl_repr_rx_queue_setup,
+	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
+};
+
+static int
+cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
+{
+	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
+	struct cpfl_repr_param *param = init_param;
+	struct cpfl_adapter_ext *adapter = param->adapter;
+
+	repr->repr_id = param->repr_id;
+	repr->vport_info = param->vport_info;
+	repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
+	repr->itf.adapter = adapter;
+	repr->itf.data = eth_dev->data;
+
+	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	eth_dev->data->representor_id =
+		CPFL_REPRESENTOR_ID(repr->repr_id.type,
+				    repr->repr_id.host_id,
+				    repr->repr_id.pf_id,
+				    repr->repr_id.vf_id);
+
+	eth_dev->data->mac_addrs = &repr->mac_addr;
+
+	rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+	return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
+}
+
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+	if ((host_id != CPFL_HOST_ID_HOST &&
+	     host_id != CPFL_HOST_ID_ACC) ||
+	    (pf_id != CPFL_PF_TYPE_APF &&
+	     pf_id != CPFL_PF_TYPE_CPF))
+		return -EINVAL;
+
+	static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = {
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+		[CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+		[CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+	};
+
+	return func_id_map[host_id][pf_id];
+}
+
+static bool
+cpfl_match_repr_with_vport(const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_info *info)
+{
+	int func_id;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF &&
+	    info->func_type == CPFL_VPORT_LAN_PF) {
+		func_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		if (func_id < 0)
+			return false;
+		else
+			return true;
+	} else if (repr_id->type == RTE_ETH_REPRESENTOR_VF &&
+		   info->func_type == CPFL_VPORT_LAN_VF) {
+		if (repr_id->vf_id == info->vf_id)
+			return true;
+	}
+
+	return false;
+}
+
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_get_vport_list_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id,
+			   struct cpchnl2_vport_id *vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+		vi.vf_id = 0;
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+	return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+			   const struct cpfl_repr_id *repr_id, uint32_t vport_id,
+			   struct cpchnl2_get_vport_info_response *response)
+{
+	struct cpfl_vport_id vi;
+	int ret;
+
+	vi.vport_id = vport_id;
+	if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+		/* PF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+		vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+	} else {
+		/* VF */
+		vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+		vi.pf_id = CPFL_HOST0_APF;
+		vi.vf_id = repr_id->vf_id;
+	}
+
+	ret = cpfl_vport_info_create(adapter, &vi, &response->info);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Fail to update vport map hash for representor.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter)
+{
+	struct rte_eth_dev *dev;
+	uint32_t iter = 0;
+	const struct cpfl_repr_id *repr_id;
+	const struct cpfl_vport_id *vp_id;
+	struct cpchnl2_get_vport_list_response *vlist_resp;
+	struct cpchnl2_get_vport_info_response vinfo_resp;
+	int ret;
+
+	vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vlist_resp == NULL)
+		return -ENOMEM;
+
+	rte_spinlock_lock(&adapter->repr_lock);
+
+	while (rte_hash_iterate(adapter->repr_allowlist_hash,
+				(const void **)&repr_id, (void **)&dev, &iter) >= 0) {
+		struct cpfl_vport_info *vi;
+		char name[RTE_ETH_NAME_MAX_LEN];
+		uint32_t iter_iter = 0;
+		bool matched;
+		int i;
+
+		/* skip representor already be created */
+		if (dev != NULL)
+			continue;
+
+		if (repr_id->type == RTE_ETH_REPRESENTOR_VF)
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%dvf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id,
+				 repr_id->vf_id);
+		else
+			snprintf(name, sizeof(name), "net_%s_representor_c%dpf%d",
+				 pci_dev->name,
+				 repr_id->host_id,
+				 repr_id->pf_id);
+
+		/* get vport list for the port representor */
+		ret = cpfl_repr_vport_list_query(adapter, repr_id, vlist_resp);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d's vport list",
+				     repr_id->host_id, repr_id->pf_id, repr_id->vf_id);
+			goto err;
+		}
+
+		/* get all vport info for the port representor */
+		for (i = 0; i < vlist_resp->nof_vports; i++) {
+			ret = cpfl_repr_vport_info_query(adapter, repr_id,
+							 &vlist_resp->vports[i], &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to get host%d pf%d vf%d vport[%d]'s info",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+
+			ret = cpfl_repr_vport_map_update(adapter, repr_id,
+						 vlist_resp->vports[i].vport_id, &vinfo_resp);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to update  host%d pf%d vf%d vport[%d]'s info to vport_map_hash",
+					     repr_id->host_id, repr_id->pf_id, repr_id->vf_id,
+					     vlist_resp->vports[i].vport_id);
+				goto err;
+			}
+		}
+
+		/* find a matched vport */
+		rte_spinlock_lock(&adapter->vport_map_lock);
+
+		matched = false;
+		while (rte_hash_iterate(adapter->vport_map_hash,
+					(const void **)&vp_id, (void **)&vi, &iter_iter) >= 0) {
+			struct cpfl_repr_param param;
+
+			if (!cpfl_match_repr_with_vport(repr_id, &vi->vport_info))
+				continue;
+
+			matched = true;
+
+			param.adapter = adapter;
+			param.repr_id = *repr_id;
+			param.vport_info = vi;
+
+			ret = rte_eth_dev_create(&pci_dev->device,
+						 name,
+						 sizeof(struct cpfl_repr),
+						 NULL, NULL, cpfl_repr_init,
+						 &param);
+			if (ret != 0) {
+				PMD_INIT_LOG(ERR, "Failed to create representor %s", name);
+				rte_spinlock_unlock(&adapter->vport_map_lock);
+				goto err;
+			}
+			break;
+		}
+
+		/* warning if no match vport detected */
+		if (!matched)
+			PMD_INIT_LOG(WARNING, "No matched vport for representor %s "
+					      "creation will be deferred when vport is detected",
+					      name);
+
+		rte_spinlock_unlock(&adapter->vport_map_lock);
+	}
+
+err:
+	rte_spinlock_unlock(&adapter->repr_lock);
+	rte_free(vlist_resp);
+	return ret;
+}
diff --git a/drivers/net/cpfl/cpfl_representor.h b/drivers/net/cpfl/cpfl_representor.h
new file mode 100644
index 0000000000..d3a4de531e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_representor.h
@@ -0,0 +1,26 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_REPRESENTOR_H_
+#define _CPFL_REPRESENTOR_H_
+
+#include <ethdev_pci.h>
+#include <rte_ethdev.h>
+
+struct cpfl_repr_id {
+	uint8_t host_id;
+	uint8_t pf_id;
+	uint8_t type;
+	uint8_t vf_id;
+};
+
+struct cpfl_repr_param {
+	struct cpfl_adapter_ext *adapter;
+	struct cpfl_repr_id repr_id;
+	struct cpfl_vport_info *vport_info;
+};
+
+int cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter);
+int cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 2f0f5d8434..d8b92ae16a 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@  sources = files(
         'cpfl_ethdev.c',
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
+        'cpfl_representor.c',
 )
 
 if arch_subdir == 'x86'