@@ -210,8 +210,38 @@ struct enic {
/* Flow manager API */
struct enic_flowman *fm;
+ /* switchdev */
+ uint8_t switchdev_mode;
+ uint16_t switch_domain_id;
+ uint16_t max_vf_id;
+ /*
+ * Lock to serialize devcmds from PF, VF representors as they all share
+ * the same PF devcmd instance in firmware.
+ */
+ rte_spinlock_t devcmd_lock;
+};
+
+struct enic_vf_representor {
+ struct enic enic;
+ struct vnic_enet_config config;
+ struct rte_eth_dev *eth_dev;
+ struct rte_ether_addr mac_addr;
+ struct rte_pci_addr bdf;
+ struct enic *pf;
+ uint16_t switch_domain_id;
+ uint16_t vf_id;
+ int allmulti;
+ int promisc;
};
+#define VF_ENIC_TO_VF_REP(vf_enic) \
+ container_of(vf_enic, struct enic_vf_representor, enic)
+
+static inline int enic_is_vf_rep(struct enic *enic)
+{
+ return !!(enic->rte_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR);
+}
+
/* Compute ethdev's max packet size from MTU */
static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
{
@@ -364,6 +394,10 @@ void enic_pick_rx_handler(struct rte_eth_dev *eth_dev);
void enic_pick_tx_handler(struct rte_eth_dev *eth_dev);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
+int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params);
+int enic_vf_representor_uninit(struct rte_eth_dev *ethdev);
+int enic_fm_allocate_switch_domain(struct enic *pf);
extern const struct rte_flow_ops enic_flow_ops;
extern const struct rte_flow_ops enic_fm_flow_ops;
+
#endif /* _ENIC_H_ */
@@ -68,6 +68,7 @@ static const struct vic_speed_capa {
#define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
#define ENIC_DEVARG_GENEVE_OPT "geneve-opt"
#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
+#define ENIC_DEVARG_REPRESENTOR "representor"
RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
@@ -1236,6 +1237,7 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
ENIC_DEVARG_ENABLE_AVX2_RX,
ENIC_DEVARG_GENEVE_OPT,
ENIC_DEVARG_IG_VLAN_REWRITE,
+ ENIC_DEVARG_REPRESENTOR,
NULL};
struct enic *enic = pmd_priv(dev);
struct rte_kvargs *kvlist;
@@ -1266,10 +1268,9 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
return 0;
}
-/* Initialize the driver
- * It returns 0 on success.
- */
-static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
+/* Initialize the driver for PF */
+static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
+ void *init_params __rte_unused)
{
struct rte_pci_device *pdev;
struct rte_pci_addr *addr;
@@ -1277,7 +1278,6 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
int err;
ENICPMD_FUNC_TRACE();
-
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
@@ -1305,19 +1305,108 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
err = enic_check_devargs(eth_dev);
if (err)
return err;
- return enic_probe(enic);
+ err = enic_probe(enic);
+ if (!err && enic->fm) {
+ err = enic_fm_allocate_switch_domain(enic);
+ if (err)
+ ENICPMD_LOG(ERR, "failed to allocate switch domain id");
+ }
+ return err;
+}
+
+static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int err;
+
+ ENICPMD_FUNC_TRACE();
+ eth_dev->device = NULL;
+ eth_dev->intr_handle = NULL;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+ err = rte_eth_switch_domain_free(enic->switch_domain_id);
+ if (err)
+ ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err);
+ return 0;
}
static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
- eth_enicpmd_dev_init);
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ struct rte_eth_dev *pf_ethdev;
+ struct enic *pf_enic;
+ int i, retval;
+
+ ENICPMD_FUNC_TRACE();
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ ð_da);
+ if (retval)
+ return retval;
+ }
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct enic),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_enic_dev_init, NULL);
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ /* Probe VF representor */
+ pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+ /* Representors require flowman */
+ pf_enic = pmd_priv(pf_ethdev);
+ if (pf_enic->fm == NULL) {
+ ENICPMD_LOG(ERR, "VF representors require flowman");
+ return -ENOTSUP;
+ }
+ /*
+ * For now representors imply switchdev, as firmware does not support
+ * legacy mode SR-IOV
+ */
+ pf_enic->switchdev_mode = 1;
+ /* Calculate max VF ID before initializing representor*/
+ pf_enic->max_vf_id = 0;
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id,
+ eth_da.representor_ports[i]);
+ }
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct enic_vf_representor representor;
+
+ representor.vf_id = eth_da.representor_ports[i];
+ representor.switch_domain_id =
+ pmd_priv(pf_ethdev)->switch_domain_id;
+ representor.pf = pmd_priv(pf_ethdev);
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name, eth_da.representor_ports[i]);
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct enic_vf_representor), NULL, NULL,
+ enic_vf_representor_init, &representor);
+ if (retval) {
+ ENICPMD_LOG(ERR, "failed to create enic vf representor %s",
+ name);
+ return retval;
+ }
+ }
+ return 0;
}
static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ struct rte_eth_dev *ethdev;
+
+ ENICPMD_FUNC_TRACE();
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit);
}
static struct rte_pci_driver rte_enic_pmd = {
@@ -1073,7 +1073,8 @@ enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr,
args[1] = bdf;
rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
if (rc != 0) {
- ENICPMD_LOG(ERR, "allocating counters rc=%d", rc);
+ /* Expected to fail if BDF is not on the adapter */
+ ENICPMD_LOG(DEBUG, "cannot find vnic handle: rc=%d", rc);
return rc;
}
*handle = args[0];
@@ -2522,6 +2523,58 @@ enic_fm_destroy(struct enic *enic)
enic->fm = NULL;
}
+int
+enic_fm_allocate_switch_domain(struct enic *pf)
+{
+ const struct rte_pci_addr *cur_a, *prev_a;
+ struct rte_eth_dev *dev;
+ struct enic *cur, *prev;
+ uint16_t domain_id;
+ uint64_t vnic_h;
+ uint16_t pid;
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ if (enic_is_vf_rep(pf))
+ return -EINVAL;
+ cur = pf;
+ cur_a = &RTE_ETH_DEV_TO_PCI(cur->rte_dev)->addr;
+ /* Go through ports and find another PF that is on the same adapter */
+ RTE_ETH_FOREACH_DEV(pid) {
+ dev = &rte_eth_devices[pid];
+ if (!dev_is_enic(dev))
+ continue;
+ if (dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ continue;
+ if (dev == cur->rte_dev)
+ continue;
+ /* dev is another PF. Is it on the same adapter? */
+ prev = pmd_priv(dev);
+ prev_a = &RTE_ETH_DEV_TO_PCI(dev)->addr;
+ if (!enic_fm_find_vnic(cur, prev_a, &vnic_h)) {
+ ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) and port %u (PF BDF %x:%x:%x domain %u) are on the same VIC",
+ cur->rte_dev->data->port_id,
+ cur_a->bus, cur_a->devid, cur_a->function,
+ dev->data->port_id,
+ prev_a->bus, prev_a->devid, prev_a->function,
+ prev->switch_domain_id);
+ cur->switch_domain_id = prev->switch_domain_id;
+ return 0;
+ }
+ }
+ ret = rte_eth_switch_domain_alloc(&domain_id);
+ if (ret) {
+ ENICPMD_LOG(WARNING, "failed to allocate switch domain for device %d",
+ ret);
+ }
+ cur->switch_domain_id = domain_id;
+ ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) is the 1st PF on the VIC. Allocated switch domain id %u",
+ cur->rte_dev->data->port_id,
+ cur_a->bus, cur_a->devid, cur_a->function,
+ domain_id);
+ return ret;
+}
+
const struct rte_flow_ops enic_fm_flow_ops = {
.validate = enic_fm_flow_validate,
.create = enic_fm_flow_create,
@@ -608,7 +608,8 @@ int enic_enable(struct enic *enic)
dev_warning(enic, "Init of hash table for clsf failed."\
"Flow director feature will not work\n");
- if (enic_fm_init(enic))
+ /* Initialize flowman if not already initialized during probe */
+ if (enic->fm == NULL && enic_fm_init(enic))
dev_warning(enic, "Init of flowman failed.\n");
for (index = 0; index < enic->rq_count; index++) {
@@ -1268,6 +1269,18 @@ int enic_setup_finish(struct enic *enic)
{
enic_init_soft_stats(enic);
+ /* switchdev: enable promisc mode on PF */
+ if (enic->switchdev_mode) {
+ vnic_dev_packet_filter(enic->vdev,
+ 0 /* directed */,
+ 0 /* multicast */,
+ 0 /* broadcast */,
+ 1 /* promisc */,
+ 0 /* allmulti */);
+ enic->promisc = 1;
+ enic->allmulti = 0;
+ return 0;
+ }
/* Default conf */
vnic_dev_packet_filter(enic->vdev,
1 /* directed */,
@@ -1393,6 +1406,11 @@ int enic_set_vlan_strip(struct enic *enic)
int enic_add_packet_filter(struct enic *enic)
{
+ /* switchdev ignores packet filters */
+ if (enic->switchdev_mode) {
+ ENICPMD_LOG(DEBUG, " switchdev: ignore packet filter");
+ return 0;
+ }
/* Args -> directed, multicast, broadcast, promisc, allmulti */
return vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
enic->promisc, enic->allmulti);
@@ -1785,10 +1803,26 @@ static int enic_dev_init(struct enic *enic)
}
}
+ if (enic_fm_init(enic))
+ dev_warning(enic, "Init of flowman failed.\n");
return 0;
}
+static void lock_devcmd(void *priv)
+{
+ struct enic *enic = priv;
+
+ rte_spinlock_lock(&enic->devcmd_lock);
+}
+
+static void unlock_devcmd(void *priv)
+{
+ struct enic *enic = priv;
+
+ rte_spinlock_unlock(&enic->devcmd_lock);
+}
+
int enic_probe(struct enic *enic)
{
struct rte_pci_device *pdev = enic->pdev;
@@ -1864,6 +1898,11 @@ int enic_probe(struct enic *enic)
goto err_out_dev_close;
}
+ /* Use a PF spinlock to serialize devcmd from PF and VF representors */
+ if (enic->switchdev_mode) {
+ rte_spinlock_init(&enic->devcmd_lock);
+ vnic_register_lock(enic->vdev, lock_devcmd, unlock_devcmd);
+ }
return 0;
err_out_dev_close:
new file mode 100644
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_flow_driver.h>
+#include <rte_kvargs.h>
+#include <rte_pci.h>
+#include <rte_string_fns.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "vnic_dev.h"
+#include "vnic_enet.h"
+#include "vnic_intr.h"
+#include "vnic_cq.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+
+static uint16_t enic_vf_recv_pkts(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+static uint16_t enic_vf_xmit_pkts(void *tx_queue __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev __rte_unused,
+ uint16_t queue_idx __rte_unused,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ return 0;
+}
+
+static void enic_vf_dev_tx_queue_release(void *txq __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+}
+
+static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev __rte_unused,
+ uint16_t queue_idx __rte_unused,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ return 0;
+}
+
+static void enic_vf_dev_rx_queue_release(void *rxq __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+}
+
+static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ return 0;
+}
+
+static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct enic_vf_representor *vf;
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ vf = eth_dev->data->dev_private;
+ /* Remove all packet filters so no ingress packets go to VF.
+ * When PF enables switchdev, it will ensure packet filters
+ * are removed. So, this is not technically needed.
+ */
+ ENICPMD_LOG(DEBUG, "Clear packet filters");
+ ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
+ if (ret) {
+ ENICPMD_LOG(ERR, "Cannot clear packet filters");
+ return ret;
+ }
+ return 0;
+}
+
+static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+}
+
+/*
+ * "close" is no-op for now and solely exists so that rte_eth_dev_close()
+ * can finish its own cleanup without errors.
+ */
+static void enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+}
+
+static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete __rte_unused)
+{
+ struct enic_vf_representor *vf;
+ struct rte_eth_link link;
+ struct enic *pf;
+
+ ENICPMD_FUNC_TRACE();
+ vf = eth_dev->data->dev_private;
+ pf = vf->pf;
+ /*
+ * Link status and speed are same as PF. Update PF status and then
+ * copy it to VF.
+ */
+ enic_link_update(pf->rte_dev);
+ rte_eth_linkstatus_get(pf->rte_dev, &link);
+ rte_eth_linkstatus_set(eth_dev, &link);
+ return 0;
+}
+
+static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct enic_vf_representor *vf;
+ struct vnic_stats *vs;
+ int err;
+
+ ENICPMD_FUNC_TRACE();
+ vf = eth_dev->data->dev_private;
+ /* Get VF stats via PF */
+ err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
+ if (err) {
+ ENICPMD_LOG(ERR, "error in getting stats\n");
+ return err;
+ }
+ stats->ipackets = vs->rx.rx_frames_ok;
+ stats->opackets = vs->tx.tx_frames_ok;
+ stats->ibytes = vs->rx.rx_bytes_ok;
+ stats->obytes = vs->tx.tx_bytes_ok;
+ stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
+ stats->oerrors = vs->tx.tx_errors;
+ stats->imissed = vs->rx.rx_no_bufs;
+ return 0;
+}
+
+static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct enic_vf_representor *vf;
+ int err;
+
+ ENICPMD_FUNC_TRACE();
+ vf = eth_dev->data->dev_private;
+ /* Ask PF to clear VF stats */
+ err = vnic_dev_stats_clear(vf->enic.vdev);
+ if (err)
+ ENICPMD_LOG(ERR, "error in clearing stats\n");
+ return err;
+}
+
+static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
+{
+ struct enic_vf_representor *vf;
+ struct enic *pf;
+
+ ENICPMD_FUNC_TRACE();
+ vf = eth_dev->data->dev_private;
+ pf = vf->pf;
+ device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
+ device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
+ device_info->min_rx_bufsize = ENIC_MIN_MTU;
+ /* Max packet size is same as PF */
+ device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
+ device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
+ /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
+ device_info->rx_offload_capa = 0;
+ device_info->tx_offload_capa = 0;
+ device_info->switch_info.name = pf->rte_dev->device->name;
+ device_info->switch_info.domain_id = vf->switch_domain_id;
+ device_info->switch_info.port_id = vf->vf_id;
+ return 0;
+}
+
+static void set_vf_packet_filter(struct enic_vf_representor *vf)
+{
+ /* switchdev: packet filters are ignored */
+ if (vf->enic.switchdev_mode)
+ return;
+ /* Ask PF to apply filters on VF */
+ vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
+ 1 /* bcast */, vf->promisc, vf->allmulti);
+}
+
+static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct enic_vf_representor *vf;
+
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ vf = eth_dev->data->dev_private;
+ vf->promisc = 1;
+ set_vf_packet_filter(vf);
+ return 0;
+}
+
+static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct enic_vf_representor *vf;
+
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ vf = eth_dev->data->dev_private;
+ vf->promisc = 0;
+ set_vf_packet_filter(vf);
+ return 0;
+}
+
+static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct enic_vf_representor *vf;
+
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ vf = eth_dev->data->dev_private;
+ vf->allmulti = 1;
+ set_vf_packet_filter(vf);
+ return 0;
+}
+
+static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct enic_vf_representor *vf;
+
+ ENICPMD_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+ vf = eth_dev->data->dev_private;
+ vf->allmulti = 0;
+ set_vf_packet_filter(vf);
+ return 0;
+}
+
+/*
+ * A minimal set of handlers.
+ * The representor can get/set a small set of VF settings via "proxy" devcmd.
+ * With proxy devcmd, the PF driver basically tells the VIC firmware to
+ * "perform this devcmd on that VF".
+ */
+static const struct eth_dev_ops enic_vf_representor_dev_ops = {
+ .allmulticast_enable = enic_vf_allmulticast_enable,
+ .allmulticast_disable = enic_vf_allmulticast_disable,
+ .dev_configure = enic_vf_dev_configure,
+ .dev_infos_get = enic_vf_dev_infos_get,
+ .dev_start = enic_vf_dev_start,
+ .dev_stop = enic_vf_dev_stop,
+ .dev_close = enic_vf_dev_close,
+ .link_update = enic_vf_link_update,
+ .promiscuous_enable = enic_vf_promiscuous_enable,
+ .promiscuous_disable = enic_vf_promiscuous_disable,
+ .stats_get = enic_vf_stats_get,
+ .stats_reset = enic_vf_stats_reset,
+ .rx_queue_setup = enic_vf_dev_rx_queue_setup,
+ .rx_queue_release = enic_vf_dev_rx_queue_release,
+ .tx_queue_setup = enic_vf_dev_tx_queue_setup,
+ .tx_queue_release = enic_vf_dev_tx_queue_release,
+};
+
+static int get_vf_config(struct enic_vf_representor *vf)
+{
+ struct vnic_enet_config *c;
+ struct enic *pf;
+ int switch_mtu;
+ int err;
+
+ c = &vf->config;
+ pf = vf->pf;
+ /* VF MAC */
+ err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
+ if (err) {
+ ENICPMD_LOG(ERR, "error in getting MAC address\n");
+ return err;
+ }
+ rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
+
+ /* VF MTU per its vNIC setting */
+ err = vnic_dev_spec(vf->enic.vdev,
+ offsetof(struct vnic_enet_config, mtu),
+ sizeof(c->mtu), &c->mtu);
+ if (err) {
+ ENICPMD_LOG(ERR, "error in getting MTU\n");
+ return err;
+ }
+ /*
+ * Blade switch (fabric interconnect) port's MTU. Assume the kernel
+ * enic driver runs on VF. That driver automatically adjusts its MTU
+ * according to the switch MTU.
+ */
+ switch_mtu = vnic_dev_mtu(pf->vdev);
+ vf->eth_dev->data->mtu = c->mtu;
+ if (switch_mtu > c->mtu)
+ vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
+ return 0;
+}
+
+int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
+{
+ struct enic_vf_representor *vf, *params;
+ struct rte_pci_device *pdev;
+ struct enic *pf, *vf_enic;
+ struct rte_pci_addr *addr;
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ params = init_params;
+ vf = eth_dev->data->dev_private;
+ vf->switch_domain_id = params->switch_domain_id;
+ vf->vf_id = params->vf_id;
+ vf->eth_dev = eth_dev;
+ vf->pf = params->pf;
+ vf->allmulti = 1;
+ vf->promisc = 0;
+ pf = vf->pf;
+ vf->enic.switchdev_mode = pf->switchdev_mode;
+ /* Only switchdev is supported now */
+ RTE_ASSERT(vf->enic.switchdev_mode);
+
+ /* Check for non-existent VFs */
+ pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
+ if (vf->vf_id >= pdev->max_vfs) {
+ ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
+ vf->vf_id, pdev->max_vfs);
+ return -ENODEV;
+ }
+
+ eth_dev->device->driver = pf->rte_dev->device->driver;
+ eth_dev->dev_ops = &enic_vf_representor_dev_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR
+ | RTE_ETH_DEV_CLOSE_REMOVE;
+ eth_dev->data->representor_id = vf->vf_id;
+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
+ sizeof(struct rte_ether_addr) *
+ ENIC_UNICAST_PERFECT_FILTERS, 0);
+ if (eth_dev->data->mac_addrs == NULL)
+ return -ENOMEM;
+ /* Use 1 RX queue and 1 TX queue for representor path */
+ eth_dev->data->nb_rx_queues = 1;
+ eth_dev->data->nb_tx_queues = 1;
+ eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
+ eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
+ /* Initial link state copied from PF */
+ eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
+ /* Representor vdev to perform devcmd */
+ vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
+ if (vf->enic.vdev == NULL)
+ return -ENOMEM;
+ ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
+ if (ret)
+ return ret;
+ /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
+ ret = get_vf_config(vf);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate VF BDF. The firmware ensures that PF BDF is always
+ * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
+ */
+ vf->bdf = pdev->addr;
+ vf->bdf.function += vf->vf_id + 1;
+
+ /* Copy a few fields used by enic_fm_flow */
+ vf_enic = &vf->enic;
+ vf_enic->switch_domain_id = vf->switch_domain_id;
+ vf_enic->flow_filter_mode = pf->flow_filter_mode;
+ vf_enic->rte_dev = eth_dev;
+ vf_enic->dev_data = eth_dev->data;
+ LIST_INIT(&vf_enic->flows);
+ LIST_INIT(&vf_enic->memzone_list);
+ rte_spinlock_init(&vf_enic->memzone_list_lock);
+ addr = &vf->bdf;
+ snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
+ addr->domain, addr->bus, addr->devid, addr->function);
+ return 0;
+}
+
+int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct enic_vf_representor *vf;
+
+ ENICPMD_FUNC_TRACE();
+ vf = eth_dev->data->dev_private;
+ vnic_dev_unregister(vf->enic.vdev);
+ return 0;
+}
@@ -14,6 +14,7 @@ sources = files(
'enic_main.c',
'enic_res.c',
'enic_rxtx.c',
+ 'enic_vf_representor.c',
)
deps += ['hash']
includes += include_directories('base')