@@ -818,6 +818,7 @@ M: Nithin Dabilpuram <ndabilpuram@marvell.com>
M: Kiran Kumar K <kirankumark@marvell.com>
M: Sunil Kumar Kori <skori@marvell.com>
M: Satha Rao <skoteshwar@marvell.com>
+M: Harman Kalra <hkalra@marvell.com>
T: git://dpdk.org/next/dpdk-next-net-mrvl
F: drivers/common/cnxk/
F: drivers/net/cnxk/
@@ -37,6 +37,7 @@ Features of the CNXK Ethdev PMD are:
- Inline IPsec processing support
- Ingress meter support
- Queue based priority flow control support
+- Port representors
Prerequisites
-------------
@@ -640,6 +641,40 @@ Runtime Config Options for inline device
With the above configuration, driver would enable packet inject from ARM cores
to crypto to process and send back in Rx path.
+Port Representors
+-----------------
+
+The CNXK driver supports port representor model by adding virtual ethernet
+ports providing a logical representation in DPDK for physical function(PF) or
+SR-IOV virtual function (VF) devices for control and monitoring.
+
+Base device or parent device underneath the representor ports is a eswitch
+device which is not a cnxk ethernet device but has NIC RX and TX capabilities.
+Each representor port is represented by a RQ and SQ pair of this eswitch
+device.
+
+Implementation supports representors for both physical function and virtual
+function.
+
+Port representor ethdev instances can be spawned on an as needed basis
+through configuration parameters passed to the driver of the underlying
+base device using devargs ``-a <base PCI BDF>,representor=pf*vf*``
+
+.. note::
+
+ Representor ports to be created for respective representees should be
+ defined via standard representor devargs patterns
+ Eg. To create a representor for representee PF1VF0, devargs to be passed
+ is ``-a <base PCI BDF>,representor=pf01vf0``
+
+ Implementation supports creation of multiple port representors with pattern:
+ ``-a <base PCI BDF>,representor=[pf0vf[1,2],pf1vf[2-5]]``
+
+Port representor PMD supports following operations:
+
+- Get PF/VF statistics
+- Flow operations - create, validate, destroy, query, flush, dump
+
Debugging Options
-----------------
@@ -3,6 +3,7 @@
*/
#include <cnxk_eswitch.h>
+#include <cnxk_rep.h>
#define CNXK_NIX_DEF_SQ_COUNT 512
@@ -62,6 +63,10 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
goto exit;
}
+ /* Remove representor devices associated with PF */
+ if (eswitch_dev->repr_cnt.nb_repr_created)
+ cnxk_rep_dev_remove(eswitch_dev);
+
/* Cleanup HW resources */
eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
@@ -648,6 +653,13 @@ cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pc
eswitch_dev->repr_cnt.max_repr, eswitch_dev->repr_cnt.nb_repr_created,
roc_nix_get_pf_func(&eswitch_dev->nix));
+ /* Probe representor ports */
+ rc = cnxk_rep_dev_probe(pci_dev, eswitch_dev);
+ if (rc) {
+ plt_err("Failed to probe representor ports");
+ goto rsrc_cleanup;
+ }
+
/* Spinlock for synchronization between representors traffic and control
* messages
*/
@@ -66,6 +66,11 @@ struct cnxk_eswitch_repr_cnt {
uint16_t nb_repr_started;
};
+struct cnxk_eswitch_switch_domain {
+ uint16_t switch_domain_id;
+ uint16_t pf;
+};
+
struct cnxk_rep_info {
struct rte_eth_dev *rep_eth_dev;
};
@@ -121,7 +126,8 @@ struct cnxk_eswitch_dev {
/* Port representor fields */
rte_spinlock_t rep_lock;
- uint16_t switch_domain_id;
+ uint16_t nb_switch_domain;
+ struct cnxk_eswitch_switch_domain sw_dom[RTE_MAX_ETHPORTS];
uint16_t eswitch_vdev;
struct cnxk_rep_info *rep_info;
};
new file mode 100644
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+#include <cnxk_rep.h>
+
+#define PF_SHIFT 10
+#define PF_MASK 0x3F
+
+static uint16_t
+get_pf(uint16_t hw_func)
+{
+ return (hw_func >> PF_SHIFT) & PF_MASK;
+}
+
+static uint16_t
+switch_domain_id_allocate(struct cnxk_eswitch_dev *eswitch_dev, uint16_t pf)
+{
+ int i = 0;
+
+ for (i = 0; i < eswitch_dev->nb_switch_domain; i++) {
+ if (eswitch_dev->sw_dom[i].pf == pf)
+ return eswitch_dev->sw_dom[i].switch_domain_id;
+ }
+
+ return RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+}
+
+int
+cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ plt_rep_dbg("Representor port:%d uninit", ethdev->data->port_id);
+ rte_free(ethdev->data->mac_addrs);
+ ethdev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+int
+cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < eswitch_dev->nb_switch_domain; i++) {
+ rc = rte_eth_switch_domain_free(eswitch_dev->sw_dom[i].switch_domain_id);
+ if (rc)
+ plt_err("Failed to alloc switch domain: %d", rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev)
+{
+ uint16_t pf, prev_pf = 0, switch_domain_id;
+ int rc, i, j = 0;
+
+ if (eswitch_dev->rep_info)
+ return 0;
+
+ eswitch_dev->rep_info =
+ plt_zmalloc(sizeof(eswitch_dev->rep_info[0]) * eswitch_dev->repr_cnt.max_repr, 0);
+ if (!eswitch_dev->rep_info) {
+ plt_err("Failed to alloc memory for rep info");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* Allocate switch domain for all PFs (VFs will be under same domain as PF) */
+ for (i = 0; i < eswitch_dev->repr_cnt.max_repr; i++) {
+ pf = get_pf(eswitch_dev->nix.rep_pfvf_map[i]);
+ if (pf == prev_pf)
+ continue;
+
+ rc = rte_eth_switch_domain_alloc(&switch_domain_id);
+ if (rc) {
+ plt_err("Failed to alloc switch domain: %d", rc);
+ goto fail;
+ }
+ plt_rep_dbg("Allocated switch domain id %d for pf %d\n", switch_domain_id, pf);
+ eswitch_dev->sw_dom[j].switch_domain_id = switch_domain_id;
+ eswitch_dev->sw_dom[j].pf = pf;
+ prev_pf = pf;
+ j++;
+ }
+ eswitch_dev->nb_switch_domain = j;
+
+ return 0;
+fail:
+ return rc;
+}
+
+static uint16_t
+cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ PLT_SET_USED(tx_queue);
+ PLT_SET_USED(tx_pkts);
+ PLT_SET_USED(nb_pkts);
+
+ return 0;
+}
+
+static uint16_t
+cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ PLT_SET_USED(rx_queue);
+ PLT_SET_USED(rx_pkts);
+ PLT_SET_USED(nb_pkts);
+
+ return 0;
+}
+
+static int
+cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)
+{
+ struct cnxk_rep_dev *rep_params = (struct cnxk_rep_dev *)params;
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+
+ rep_dev->port_id = rep_params->port_id;
+ rep_dev->switch_domain_id = rep_params->switch_domain_id;
+ rep_dev->parent_dev = rep_params->parent_dev;
+ rep_dev->hw_func = rep_params->hw_func;
+ rep_dev->rep_id = rep_params->rep_id;
+
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->representor_id = rep_params->port_id;
+ eth_dev->data->backer_port_id = eth_dev->data->port_id;
+
+ eth_dev->data->mac_addrs = plt_zmalloc(RTE_ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ plt_err("Failed to allocate memory for mac addr");
+ return -ENOMEM;
+ }
+
+ rte_eth_random_addr(rep_dev->mac_addr);
+ memcpy(eth_dev->data->mac_addrs, rep_dev->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ /* Set the device operations */
+ eth_dev->dev_ops = &cnxk_rep_dev_ops;
+
+ /* Rx/Tx functions stubs to avoid crashing */
+ eth_dev->rx_pkt_burst = cnxk_rep_rx_burst;
+ eth_dev->tx_pkt_burst = cnxk_rep_tx_burst;
+
+ /* Only single queues for representor devices */
+ eth_dev->data->nb_rx_queues = 1;
+ eth_dev->data->nb_tx_queues = 1;
+
+ eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+ eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
+ eth_dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED;
+
+ return 0;
+}
+
+static int
+create_representor_ethdev(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev,
+ struct cnxk_eswitch_devargs *esw_da, int idx)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *rep_eth_dev;
+ uint16_t hw_func;
+ int rc = 0;
+
+ struct cnxk_rep_dev rep = {.port_id = eswitch_dev->repr_cnt.nb_repr_probed,
+ .parent_dev = eswitch_dev};
+
+ if (esw_da->type == CNXK_ESW_DA_TYPE_PFVF) {
+ hw_func = esw_da->repr_hw_info[idx].hw_func;
+ rep.switch_domain_id = switch_domain_id_allocate(eswitch_dev, get_pf(hw_func));
+ if (rep.switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ plt_err("Failed to get a valid switch domain id");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ esw_da->repr_hw_info[idx].port_id = rep.port_id;
+ /* Representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_hw_%x_representor_%d", pci_dev->device.name,
+ hw_func, rep.port_id);
+
+ rep.hw_func = hw_func;
+ rep.rep_id = esw_da->repr_hw_info[idx].rep_id;
+
+ } else {
+ snprintf(name, sizeof(name), "net_%s_representor_%d", pci_dev->device.name,
+ rep.port_id);
+ rep.switch_domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+ }
+
+ rc = rte_eth_dev_create(&pci_dev->device, name, sizeof(struct cnxk_rep_dev), NULL, NULL,
+ cnxk_rep_dev_init, &rep);
+ if (rc) {
+ plt_err("Failed to create cnxk vf representor %s", name);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rep_eth_dev = rte_eth_dev_allocated(name);
+ if (!rep_eth_dev) {
+ plt_err("Failed to find the eth_dev for VF-Rep: %s.", name);
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ plt_rep_dbg("Representor portid %d (%s) type %d probe done", rep_eth_dev->data->port_id,
+ name, esw_da->da.type);
+ eswitch_dev->rep_info[rep.port_id].rep_eth_dev = rep_eth_dev;
+ eswitch_dev->repr_cnt.nb_repr_probed++;
+
+ return 0;
+fail:
+ return rc;
+}
+
+int
+cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev)
+{
+ struct cnxk_eswitch_devargs *esw_da;
+ uint16_t num_rep;
+ int i, j, rc;
+
+ if (eswitch_dev->repr_cnt.nb_repr_created > RTE_MAX_ETHPORTS) {
+ plt_err("nb_representor_ports %d > %d MAX ETHPORTS\n",
+ eswitch_dev->repr_cnt.nb_repr_created, RTE_MAX_ETHPORTS);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ /* Initialize the internals of representor ports */
+ rc = cnxk_rep_parent_setup(eswitch_dev);
+ if (rc) {
+ plt_err("Failed to setup the parent device, err %d", rc);
+ goto fail;
+ }
+
+ for (i = eswitch_dev->last_probed; i < eswitch_dev->nb_esw_da; i++) {
+ esw_da = &eswitch_dev->esw_da[i];
+ /* Check the representor devargs */
+ num_rep = esw_da->nb_repr_ports;
+ for (j = 0; j < num_rep; j++) {
+ rc = create_representor_ethdev(pci_dev, eswitch_dev, esw_da, j);
+ if (rc)
+ goto fail;
+ }
+ }
+ eswitch_dev->last_probed = i;
+
+ return 0;
+fail:
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+#include <cnxk_eswitch.h>
+#include <cnxk_ethdev.h>
+
+#ifndef __CNXK_REP_H__
+#define __CNXK_REP_H__
+
+/* Common ethdev ops */
+extern struct eth_dev_ops cnxk_rep_dev_ops;
+
+struct cnxk_rep_dev {
+ uint16_t port_id;
+ uint16_t rep_id;
+ uint16_t switch_domain_id;
+ struct cnxk_eswitch_dev *parent_dev;
+ uint16_t hw_func;
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+};
+
+static inline struct cnxk_rep_dev *
+cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)
+{
+ return eth_dev->data->dev_private;
+}
+
+int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev);
+int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev);
+int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev);
+int cnxk_rep_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info);
+int cnxk_rep_representor_info_get(struct rte_eth_dev *dev, struct rte_eth_representor_info *info);
+int cnxk_rep_dev_configure(struct rte_eth_dev *eth_dev);
+
+int cnxk_rep_link_update(struct rte_eth_dev *eth_dev, int wait_to_compl);
+int cnxk_rep_dev_start(struct rte_eth_dev *eth_dev);
+int cnxk_rep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int cnxk_rep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id, const struct rte_eth_txconf *tx_conf);
+void cnxk_rep_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
+void cnxk_rep_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
+int cnxk_rep_dev_stop(struct rte_eth_dev *eth_dev);
+int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev);
+int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats);
+int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);
+int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops);
+
+#endif /* __CNXK_REP_H__ */
new file mode 100644
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#include <cnxk_rep.h>
+
+int
+cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(wait_to_complete);
+ return 0;
+}
+
+int
+cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(devinfo);
+ return 0;
+}
+
+int
+cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
+{
+ PLT_SET_USED(ethdev);
+ return 0;
+}
+
+int
+cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
+{
+ PLT_SET_USED(ethdev);
+ return 0;
+}
+
+int
+cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
+{
+ PLT_SET_USED(ethdev);
+ return 0;
+}
+
+int
+cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
+{
+ PLT_SET_USED(ethdev);
+ return 0;
+}
+
+int
+cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16_t nb_rx_desc,
+ unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(rx_queue_id);
+ PLT_SET_USED(nb_rx_desc);
+ PLT_SET_USED(socket_id);
+ PLT_SET_USED(rx_conf);
+ PLT_SET_USED(mb_pool);
+ return 0;
+}
+
+void
+cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(queue_id);
+}
+
+int
+cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc,
+ unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(tx_queue_id);
+ PLT_SET_USED(nb_tx_desc);
+ PLT_SET_USED(socket_id);
+ PLT_SET_USED(tx_conf);
+ return 0;
+}
+
+void
+cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(queue_id);
+}
+
+int
+cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(stats);
+ return 0;
+}
+
+int
+cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)
+{
+ PLT_SET_USED(ethdev);
+ return 0;
+}
+
+int
+cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops)
+{
+ PLT_SET_USED(ethdev);
+ PLT_SET_USED(ops);
+ return 0;
+}
+
+/* CNXK platform representor dev ops */
+struct eth_dev_ops cnxk_rep_dev_ops = {
+ .dev_infos_get = cnxk_rep_dev_info_get,
+ .dev_configure = cnxk_rep_dev_configure,
+ .dev_start = cnxk_rep_dev_start,
+ .rx_queue_setup = cnxk_rep_rx_queue_setup,
+ .rx_queue_release = cnxk_rep_rx_queue_release,
+ .tx_queue_setup = cnxk_rep_tx_queue_setup,
+ .tx_queue_release = cnxk_rep_tx_queue_release,
+ .link_update = cnxk_rep_link_update,
+ .dev_close = cnxk_rep_dev_close,
+ .dev_stop = cnxk_rep_dev_stop,
+ .stats_get = cnxk_rep_stats_get,
+ .stats_reset = cnxk_rep_stats_reset,
+ .flow_ops_get = cnxk_rep_flow_ops_get
+};
@@ -34,6 +34,8 @@ sources = files(
'cnxk_lookup.c',
'cnxk_ptp.c',
'cnxk_flow.c',
+ 'cnxk_rep.c',
+ 'cnxk_rep_ops.c',
'cnxk_stats.c',
'cnxk_tm.c',
)