@@ -22,6 +22,7 @@
#include "nfp_flower.h"
#include "nfp_flower_ovs_compat.h"
#include "nfp_flower_ctrl.h"
+#include "nfp_flower_representor.h"
#define MAX_PKT_BURST 32
#define MEMPOOL_CACHE_SIZE 512
@@ -939,8 +940,13 @@
unsigned int numa_node;
struct nfp_net_hw *pf_hw;
struct nfp_net_hw *ctrl_hw;
+ struct rte_pci_device *pci_dev;
struct nfp_app_flower *app_flower;
+ struct rte_eth_devargs eth_da = {
+ .nb_representor_ports = 0
+ };
+ pci_dev = pf_dev->pci_dev;
numa_node = rte_socket_id();
/* Allocate memory for the Flower app */
@@ -1033,6 +1039,59 @@
goto ctrl_vnic_cleanup;
}
+ /* Allocate a switch domain for the flower app */
+ if (app_flower->switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID &&
+ rte_eth_switch_domain_alloc(&app_flower->switch_domain_id)) {
+ PMD_INIT_LOG(WARNING,
+ "failed to allocate switch domain for device");
+ }
+
+ /* Now parse PCI device args passed for representor info */
+ if (pci_dev->device.devargs) {
+ ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ ð_da);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "devarg parse failed");
+ goto ctrl_vnic_cleanup;
+ }
+ }
+
+ if (eth_da.nb_representor_ports == 0) {
+ PMD_INIT_LOG(DEBUG, "No representor port need to create.");
+ ret = 0;
+ goto done;
+ }
+
+ /* There always exist phy repr */
+ if (eth_da.nb_representor_ports < app_flower->nfp_eth_table->count) {
+ PMD_INIT_LOG(DEBUG, "Should also create phy representor port.");
+ ret = -ERANGE;
+ goto ctrl_vnic_cleanup;
+ }
+
+ /* Only support VF representor creation via the command line */
+ if (eth_da.type != RTE_ETH_REPRESENTOR_VF) {
+ PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
+ pci_dev->device.devargs->args);
+ ret = -ENOTSUP;
+ goto ctrl_vnic_cleanup;
+ }
+
+ /* Fill in flower app with repr counts */
+ app_flower->num_phyport_reprs = (uint8_t)app_flower->nfp_eth_table->count;
+ app_flower->num_vf_reprs = eth_da.nb_representor_ports -
+ app_flower->nfp_eth_table->count;
+
+ PMD_INIT_LOG(INFO, "%d number of VF reprs", app_flower->num_vf_reprs);
+ PMD_INIT_LOG(INFO, "%d number of phyport reprs", app_flower->num_phyport_reprs);
+
+ ret = nfp_flower_repr_alloc(app_flower);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "representors allocation for NFP_REPR_TYPE_VF error");
+ goto ctrl_vnic_cleanup;
+ }
+
return 0;
ctrl_vnic_cleanup:
@@ -19,8 +19,20 @@ enum nfp_flower_service {
*/
#define FLOWER_PKT_DATA_OFFSET 8
+#define MAX_FLOWER_PHYPORTS 8
+#define MAX_FLOWER_VFS 64
+
/* The flower application's private structure */
struct nfp_app_flower {
+ /* switch domain for this app */
+ uint16_t switch_domain_id;
+
+ /* Number of VF representors */
+ uint8_t num_vf_reprs;
+
+ /* Number of phyport representors */
+ uint8_t num_phyport_reprs;
+
/* List of rte_service ID's for the flower app */
uint32_t flower_services_ids[NFP_FLOWER_SERVICE_MAX];
@@ -44,6 +56,12 @@ struct nfp_app_flower {
/* Ctrl vNIC Tx counter */
uint64_t ctrl_vnic_tx_count;
+
+ /* Array of phyport representors */
+ struct nfp_flower_representor *phy_reprs[MAX_FLOWER_PHYPORTS];
+
+ /* Array of VF representors */
+ struct nfp_flower_representor *vf_reprs[MAX_FLOWER_VFS];
};
int nfp_init_app_flower(struct nfp_pf_dev *pf_dev);
new file mode 100644
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_logs.h"
+#include "../nfp_common.h"
+#include "nfp_flower.h"
+#include "nfp_flower_cmsg.h"
+#include "nfp_flower_ctrl.h"
+#include "nfp_flower_representor.h"
+
+static void *
+nfp_flower_cmsg_init(__rte_unused struct rte_mbuf *m,
+ __rte_unused enum nfp_flower_cmsg_type type,
+ __rte_unused uint32_t size)
+{
+ char *pkt;
+ uint32_t data;
+ uint32_t new_size = size;
+ struct nfp_flower_cmsg_hdr *hdr;
+
+ pkt = rte_pktmbuf_mtod(m, char *);
+ PMD_DRV_LOG(DEBUG, "flower_cmsg_init using pkt at %p", pkt);
+
+ data = rte_cpu_to_be_32(NFP_NET_META_PORTID);
+ rte_memcpy(pkt, &data, 4);
+ pkt += 4;
+ new_size += 4;
+
+ /* First the metadata as flower requires it */
+ data = rte_cpu_to_be_32(NFP_META_PORT_ID_CTRL);
+ rte_memcpy(pkt, &data, 4);
+ pkt += 4;
+ new_size += 4;
+
+ /* Now the ctrl header */
+ hdr = (struct nfp_flower_cmsg_hdr *)pkt;
+ hdr->pad = 0;
+ hdr->type = type;
+ hdr->version = NFP_FLOWER_CMSG_VER1;
+
+ pkt = (char *)hdr + NFP_FLOWER_CMSG_HLEN;
+ new_size += NFP_FLOWER_CMSG_HLEN;
+
+ m->pkt_len = new_size;
+ m->data_len = m->pkt_len;
+
+ return pkt;
+}
+
+static void
+nfp_flower_cmsg_mac_repr_init(struct rte_mbuf *m, int num_ports)
+{
+ uint32_t size;
+ struct nfp_flower_cmsg_mac_repr *msg;
+ enum nfp_flower_cmsg_type type = NFP_FLOWER_CMSG_TYPE_MAC_REPR;
+
+ size = sizeof(*msg) + (num_ports * sizeof(msg->ports[0]));
+ PMD_INIT_LOG(DEBUG, "mac repr cmsg init with size: %u", size);
+ msg = (struct nfp_flower_cmsg_mac_repr *)nfp_flower_cmsg_init(m,
+ type, size);
+
+ memset(msg->reserved, 0, sizeof(msg->reserved));
+ msg->num_ports = num_ports;
+}
+
+static void
+nfp_flower_cmsg_mac_repr_fill(struct rte_mbuf *m,
+ unsigned int idx,
+ unsigned int nbi,
+ unsigned int nbi_port,
+ unsigned int phys_port)
+{
+ struct nfp_flower_cmsg_mac_repr *msg;
+
+ msg = (struct nfp_flower_cmsg_mac_repr *)nfp_flower_cmsg_get_data(m);
+ msg->ports[idx].idx = idx;
+ msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
+ msg->ports[idx].nbi_port = nbi_port;
+ msg->ports[idx].phys_port = phys_port;
+}
+
+int
+nfp_flower_cmsg_mac_repr(struct nfp_app_flower *app_flower)
+{
+ int i;
+ unsigned int nbi;
+ unsigned int nbi_port;
+ unsigned int phys_port;
+ struct rte_mbuf *mac_repr_cmsg;
+ struct nfp_eth_table *nfp_eth_table;
+
+ nfp_eth_table = app_flower->nfp_eth_table;
+
+ mac_repr_cmsg = rte_pktmbuf_alloc(app_flower->ctrl_pktmbuf_pool);
+ if (mac_repr_cmsg == NULL) {
+ PMD_INIT_LOG(ERR, "Could not allocate mac repr cmsg");
+ return -ENOMEM;
+ }
+
+ nfp_flower_cmsg_mac_repr_init(mac_repr_cmsg,
+ app_flower->num_phyport_reprs);
+
+ /* Fill in the mac repr cmsg */
+ for (i = 0; i < app_flower->num_phyport_reprs; i++) {
+ nbi = nfp_eth_table->ports[i].nbi;
+ nbi_port = nfp_eth_table->ports[i].base;
+ phys_port = nfp_eth_table->ports[i].index;
+
+ nfp_flower_cmsg_mac_repr_fill(mac_repr_cmsg, i, nbi, nbi_port,
+ phys_port);
+ }
+
+ /* Send the cmsg via the ctrl vNIC */
+ return nfp_flower_ctrl_vnic_xmit(app_flower, mac_repr_cmsg);
+}
+
+int
+nfp_flower_cmsg_repr_reify(struct nfp_app_flower *app_flower,
+ struct nfp_flower_representor *repr)
+{
+ struct rte_mbuf *mbuf;
+ struct nfp_flower_cmsg_port_reify *msg;
+
+ mbuf = rte_pktmbuf_alloc(app_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(DEBUG, "alloc mbuf for repr reify failed");
+ return -ENOMEM;
+ }
+
+ msg = (struct nfp_flower_cmsg_port_reify *)nfp_flower_cmsg_init(mbuf,
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY, sizeof(*msg));
+
+ msg->portnum = rte_cpu_to_be_32(repr->port_id);
+ msg->reserved = 0;
+ msg->info = rte_cpu_to_be_16(1);
+
+ return nfp_flower_ctrl_vnic_xmit(app_flower, mbuf);
+}
+
+int
+nfp_flower_cmsg_port_mod(struct nfp_app_flower *app_flower,
+ uint32_t port_id, bool carrier_ok)
+{
+ struct nfp_flower_cmsg_port_mod *msg;
+ struct rte_mbuf *mbuf;
+
+ mbuf = rte_pktmbuf_alloc(app_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(DEBUG, "alloc mbuf for repr portmod failed");
+ return -ENOMEM;
+ }
+
+ msg = (struct nfp_flower_cmsg_port_mod *)nfp_flower_cmsg_init(mbuf,
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD, sizeof(*msg));
+
+ msg->portnum = rte_cpu_to_be_32(port_id);
+ msg->reserved = 0;
+ msg->info = carrier_ok;
+ msg->mtu = 9000;
+
+ return nfp_flower_ctrl_vnic_xmit(app_flower, mbuf);
+}
new file mode 100644
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_CMSG_H_
+#define _NFP_CMSG_H_
+
+#include <rte_byteorder.h>
+#include <rte_ether.h>
+
+struct nfp_flower_cmsg_hdr {
+ rte_be16_t pad;
+ uint8_t type;
+ uint8_t version;
+};
+
+/* Types defined for control messages */
+enum nfp_flower_cmsg_type {
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
+ NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
+ NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
+ NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
+ NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
+ NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25,
+ NFP_FLOWER_CMSG_TYPE_MAX = 32,
+};
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_MAC_REPR
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +---------------+-----------+---+---------------+---------------+
+ * 0 | spare |Number of ports|
+ * +---------------+-----------+---+---------------+---------------+
+ * 1 | Index | spare |NBI| Port on NBI | Chip-wide port|
+ * +---------------+-----------+---+---------------+---------------+
+ * ....
+ * +---------------+-----------+---+---------------+---------------+
+ * N-1 | Index | spare |NBI| Port on NBI | Chip-wide port|
+ * +---------------+-----------+---+---------------+---------------+
+ * N | Index | spare |NBI| Port on NBI | Chip-wide port|
+ * +---------------+-----------+---+---------------+---------------+
+ *
+ * Index: index into the eth table
+ * NBI (bits 17-16): NBI number (0-3)
+ * Port on NBI (bits 15-8): “base” in the driver
+ * this forms NBIX.PortY notation as the NSP eth table.
+ * "Chip-wide" port (bits 7-0):
+ */
+struct nfp_flower_cmsg_mac_repr {
+ uint8_t reserved[3];
+ uint8_t num_ports;
+ struct {
+ uint8_t idx;
+ uint8_t info;
+ uint8_t nbi_port;
+ uint8_t phys_port;
+ } ports[0];
+};
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_PORT_REIFY
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +-------+-------+---+---+-------+---+---+-----------+-----------+
+ * 0 |Port Ty|Sys ID |NIC|Rsv| Spare |PCI|typ| vNIC | queue |
+ * +-------+-----+-+---+---+-------+---+---+-----------+---------+-+
+ * 1 | Spare |E|
+ * +-------------------------------------------------------------+-+
+ * E: 1 = Representor exists, 0 = Representor does not exist
+ */
+struct nfp_flower_cmsg_port_reify {
+ rte_be32_t portnum;
+ rte_be16_t reserved;
+ rte_be16_t info;
+};
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_PORT_MOD
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +-------+-------+---+---+-------+---+---+-------+---+-----------+
+ * 0 |Port Ty|Sys ID |NIC|Rsv| Reserved | Port |
+ * +-------+-------+---+---+-----+-+---+---+-------+---+-----------+
+ * 1 | Spare |L| MTU |
+ * +-----------------------------+-+-------------------------------+
+ * L: Link or Admin state bit. When message is generated by host, this
+ * bit indicates the admin state (0=down, 1=up). When generated by
+ * NFP, it indicates the link state (0=down, 1=up)
+ *
+ * Port Type (word 1, bits 31 to 28) = 1 (Physical Network)
+ * Port: “Chip-wide number” as assigned by BSP
+ *
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +-------+-------+---+---+-------+---+---+-------+---+-----------+
+ * 0 |Port Ty|Sys ID |NIC|Rsv| Spare |PCI|typ| vNIC | queue |
+ * +-------+-----+-+---+---+---+-+-+---+---+-------+---+-----------+
+ * 1 | Spare |L| MTU |
+ * +-----------------------------+-+-------------------------------+
+ * L: Link or Admin state bit. When message is generated by host, this
+ * bit indicates the admin state (0=down, 1=up). When generated by
+ * NFP, it indicates the link state (0=down, 1=up)
+ *
+ * Port Type (word 1, bits 31 to 28) = 2 (PCIE)
+ */
+struct nfp_flower_cmsg_port_mod {
+ rte_be32_t portnum;
+ uint8_t reserved;
+ uint8_t info;
+ rte_be16_t mtu;
+};
+
+enum nfp_flower_cmsg_port_type {
+ NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT = 0x3,
+};
+
+enum nfp_flower_cmsg_port_vnic_type {
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF = 0x0,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF = 0x1,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_CTRL = 0x2,
+};
+
+#define NFP_FLOWER_CMSG_MAC_REPR_NBI (0x3)
+
+#define NFP_FLOWER_CMSG_HLEN sizeof(struct nfp_flower_cmsg_hdr)
+#define NFP_FLOWER_CMSG_VER1 1
+#define NFP_NET_META_PORTID 5
+#define NFP_META_PORT_ID_CTRL ~0U
+
+#define NFP_FLOWER_CMSG_PORT_TYPE(x) (((x) >> 28) & 0xf) /* [31,28] */
+#define NFP_FLOWER_CMSG_PORT_SYS_ID(x) (((x) >> 24) & 0xf) /* [24,27] */
+#define NFP_FLOWER_CMSG_PORT_NFP_ID(x) (((x) >> 22) & 0x3) /* [22,23] */
+#define NFP_FLOWER_CMSG_PORT_PCI(x) (((x) >> 14) & 0x3) /* [14,15] */
+#define NFP_FLOWER_CMSG_PORT_VNIC_TYPE(x) (((x) >> 12) & 0x3) /* [12,13] */
+#define NFP_FLOWER_CMSG_PORT_VNIC(x) (((x) >> 6) & 0x3f) /* [6,11] */
+#define NFP_FLOWER_CMSG_PORT_PCIE_Q(x) ((x) & 0x3f) /* [0,5] */
+#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(x) ((x) & 0xff) /* [0,7] */
+
+static inline char*
+nfp_flower_cmsg_get_data(struct rte_mbuf *m)
+{
+ return rte_pktmbuf_mtod(m, char *) + 4 + 4 + NFP_FLOWER_CMSG_HLEN;
+}
+
+int nfp_flower_cmsg_mac_repr(struct nfp_app_flower *app_flower);
+int nfp_flower_cmsg_repr_reify(struct nfp_app_flower *app_flower,
+ struct nfp_flower_representor *repr);
+int nfp_flower_cmsg_port_mod(struct nfp_app_flower *app_flower,
+ uint32_t port_id, bool carrier_ok);
+
+#endif /* _NFP_CMSG_H_ */
new file mode 100644
@@ -0,0 +1,508 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <ethdev_pci.h>
+
+#include "../nfp_common.h"
+#include "../nfp_logs.h"
+#include "../nfp_ctrl.h"
+#include "../nfp_rxtx.h"
+#include "../nfpcore/nfp_mip.h"
+#include "../nfpcore/nfp_rtsym.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "nfp_flower.h"
+#include "nfp_flower_representor.h"
+#include "nfp_flower_ctrl.h"
+#include "nfp_flower_cmsg.h"
+
+static int
+nfp_flower_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
+ __rte_unused int wait_to_complete)
+{
+ return 0;
+}
+
+static int
+nfp_flower_repr_dev_infos_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ /* Hardcoded pktlen and queues for now */
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+ dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = 9000;
+
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->max_mac_addrs = 1;
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+nfp_flower_repr_dev_start(struct rte_eth_dev *dev)
+{
+ struct nfp_app_flower *app_flower;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ app_flower = repr->app_flower;
+
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+ nfp_eth_set_configured(app_flower->pf_hw->pf_dev->cpp,
+ repr->nfp_idx, 1);
+
+ nfp_flower_cmsg_port_mod(app_flower, repr->port_id, true);
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_dev_stop(struct rte_eth_dev *dev)
+{
+ struct nfp_app_flower *app_flower;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ app_flower = repr->app_flower;
+
+ nfp_flower_cmsg_port_mod(app_flower, repr->port_id, false);
+
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+ nfp_eth_set_configured(app_flower->pf_hw->pf_dev->cpp,
+ repr->nfp_idx, 0);
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_hw *pf_hw;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ pf_hw = repr->app_flower->pf_hw;
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+
+ rxq->hw = pf_hw;
+ rxq->qidx = rx_queue_id;
+ rxq->port_id = dev->data->port_id;
+ dev->data->rx_queues[rx_queue_id] = rxq;
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ __rte_unused uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ struct nfp_net_txq *txq;
+ struct nfp_net_hw *pf_hw;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ pf_hw = repr->app_flower->pf_hw;
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ return -ENOMEM;
+
+ txq->hw = pf_hw;
+ txq->qidx = tx_queue_id;
+ txq->port_id = dev->data->port_id;
+ dev->data->tx_queues[tx_queue_id] = txq;
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_stats_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_stats *stats)
+{
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ rte_memcpy(stats, &repr->repr_stats, sizeof(struct rte_eth_stats));
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_stats_reset(__rte_unused struct rte_eth_dev *ethdev)
+{
+ return 0;
+}
+
+static int
+nfp_flower_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
+{
+ return 0;
+}
+
+static int
+nfp_flower_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
+{
+ return 0;
+}
+
+static void
+nfp_flower_repr_mac_addr_remove(__rte_unused struct rte_eth_dev *ethdev,
+ __rte_unused uint32_t index)
+{
+}
+
+static int
+nfp_flower_repr_mac_addr_set(__rte_unused struct rte_eth_dev *ethdev,
+ __rte_unused struct rte_ether_addr *mac_addr)
+{
+ return 0;
+}
+
+static uint16_t
+nfp_flower_repr_rx_burst(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ unsigned int available = 0;
+ unsigned int total_dequeue;
+ struct nfp_net_rxq *rxq;
+ struct rte_eth_dev *dev;
+ struct nfp_flower_representor *repr;
+
+ rxq = rx_queue;
+ if (unlikely(rxq == NULL)) {
+ PMD_RX_LOG(ERR, "RX Bad queue");
+ return 0;
+ }
+
+ dev = &rte_eth_devices[rxq->port_id];
+ repr = dev->data->dev_private;
+ if (unlikely(repr->ring == NULL)) {
+ PMD_RX_LOG(ERR, "representor %s has no ring configured!",
+ repr->name);
+ return 0;
+ }
+
+ total_dequeue = rte_ring_dequeue_burst(repr->ring, (void *)rx_pkts,
+ nb_pkts, &available);
+ if (total_dequeue) {
+ PMD_RX_LOG(DEBUG, "Representor Rx burst for %s, port_id: 0x%x, "
+ "received: %u, available: %u", repr->name,
+ repr->port_id, total_dequeue, available);
+
+ repr->repr_stats.ipackets += total_dequeue;
+ }
+
+ return total_dequeue;
+}
+
+static uint16_t
+nfp_flower_repr_tx_burst(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+ uint16_t sent;
+ char *meta_offset;
+ struct nfp_net_txq *txq;
+ struct nfp_net_hw *pf_hw;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev *repr_dev;
+ struct nfp_flower_representor *repr;
+
+ txq = tx_queue;
+ if (unlikely(txq == NULL)) {
+ PMD_RX_LOG(ERR, "TX Bad queue");
+ return 0;
+ }
+
+ /* This points to the PF vNIC that owns this representor */
+ pf_hw = txq->hw;
+ dev = pf_hw->eth_dev;
+
+ /* Grab a handle to the representor struct */
+ repr_dev = &rte_eth_devices[txq->port_id];
+ repr = repr_dev->data->dev_private;
+
+ for (i = 0; i < nb_pkts; i++) {
+ meta_offset = rte_pktmbuf_prepend(tx_pkts[i], FLOWER_PKT_DATA_OFFSET);
+ *(uint32_t *)meta_offset = rte_cpu_to_be_32(NFP_NET_META_PORTID);
+ meta_offset += 4;
+ *(uint32_t *)meta_offset = rte_cpu_to_be_32(repr->port_id);
+ }
+
+ /* Only using Tx queue 0 for now. */
+ sent = rte_eth_tx_burst(dev->data->port_id, 0, tx_pkts, nb_pkts);
+ if (sent) {
+ PMD_TX_LOG(DEBUG, "Representor Tx burst for %s, port_id: 0x%x "
+ "transmitted: %u\n", repr->name, repr->port_id, sent);
+ repr->repr_stats.opackets += sent;
+ }
+
+ return sent;
+}
+
+static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
+ .dev_infos_get = nfp_flower_repr_dev_infos_get,
+
+ .dev_start = nfp_flower_repr_dev_start,
+ .dev_configure = nfp_flower_repr_dev_configure,
+ .dev_stop = nfp_flower_repr_dev_stop,
+
+ .rx_queue_setup = nfp_flower_repr_rx_queue_setup,
+ .tx_queue_setup = nfp_flower_repr_tx_queue_setup,
+
+ .link_update = nfp_flower_repr_link_update,
+
+ .stats_get = nfp_flower_repr_stats_get,
+ .stats_reset = nfp_flower_repr_stats_reset,
+
+ .promiscuous_enable = nfp_flower_repr_promiscuous_enable,
+ .promiscuous_disable = nfp_flower_repr_promiscuous_disable,
+
+ .mac_addr_remove = nfp_flower_repr_mac_addr_remove,
+ .mac_addr_set = nfp_flower_repr_mac_addr_set,
+};
+
+static uint32_t
+nfp_flower_get_phys_port_id(uint8_t port)
+{
+ return (NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT << 28) | port;
+}
+
+static uint32_t
+nfp_get_pcie_port_id(struct nfp_cpp *cpp,
+ int type,
+ uint8_t vnic,
+ uint8_t queue)
+{
+ uint8_t nfp_pcie;
+ uint32_t port_id;
+
+ nfp_pcie = NFP_CPP_INTERFACE_UNIT_of(nfp_cpp_interface(cpp));
+ port_id = ((nfp_pcie & 0x3) << 14) |
+ ((type & 0x3) << 12) |
+ ((vnic & 0x3f) << 6) |
+ (queue & 0x3f) |
+ ((NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT & 0xf) << 28);
+
+ return port_id;
+}
+
+static int
+nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
+ void *init_params)
+{
+ int ret;
+ unsigned int numa_node;
+ char ring_name[RTE_ETH_NAME_MAX_LEN];
+ struct nfp_app_flower *app_flower;
+ struct nfp_flower_representor *repr;
+ struct nfp_flower_representor *init_repr_data;
+
+ /* Cast the input representor data to the correct struct here */
+ init_repr_data = (struct nfp_flower_representor *)init_params;
+
+ app_flower = init_repr_data->app_flower;
+
+ /* Memory has been allocated in the eth_dev_create() function */
+ repr = eth_dev->data->dev_private;
+
+ /*
+ * We need multiproduce rings as we can have multiple PF ports.
+ * On the other hand, we need single consumer rings, as just one
+ * representor PMD will try to read from the ring.
+ */
+ snprintf(ring_name, sizeof(ring_name), "%s_%s",
+ init_repr_data->name, "ring");
+ numa_node = rte_socket_id();
+ repr->ring = rte_ring_create(ring_name, 256, numa_node, RING_F_SC_DEQ);
+ if (repr->ring == NULL) {
+ PMD_INIT_LOG(ERR, "rte_ring_create failed for %s\n", ring_name);
+ return -ENOMEM;
+ }
+
+ /* Copy data here from the input representor template*/
+ repr->vf_id = init_repr_data->vf_id;
+ repr->switch_domain_id = init_repr_data->switch_domain_id;
+ repr->port_id = init_repr_data->port_id;
+ repr->nfp_idx = init_repr_data->nfp_idx;
+ repr->repr_type = init_repr_data->repr_type;
+ repr->app_flower = init_repr_data->app_flower;
+
+ snprintf(repr->name, sizeof(repr->name), "%s", init_repr_data->name);
+
+ eth_dev->dev_ops = &nfp_flower_repr_dev_ops;
+
+ eth_dev->rx_pkt_burst = nfp_flower_repr_rx_burst;
+ eth_dev->tx_pkt_burst = nfp_flower_repr_tx_burst;
+
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+ eth_dev->data->representor_id = repr->vf_id;
+ else
+ eth_dev->data->representor_id = repr->vf_id +
+ app_flower->num_phyport_reprs;
+
+ /* This backer port is that of the eth_device created for the PF vNIC */
+ eth_dev->data->backer_port_id = app_flower->pf_hw->eth_dev->data->port_id;
+
+ /* Only single queues for representor devices */
+ eth_dev->data->nb_rx_queues = 1;
+ eth_dev->data->nb_tx_queues = 1;
+
+ /* Allocating memory for mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ RTE_ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC");
+ ret = -ENOMEM;
+ goto ring_cleanup;
+ }
+
+ rte_ether_addr_copy(&init_repr_data->mac_addr, &repr->mac_addr);
+ rte_ether_addr_copy(&init_repr_data->mac_addr, eth_dev->data->mac_addrs);
+
+ /* Send reify message to hardware to inform it about the new repr */
+ ret = nfp_flower_cmsg_repr_reify(app_flower, repr);
+ if (ret) {
+ PMD_INIT_LOG(WARNING, "Failed to send repr reify message");
+ goto mac_cleanup;
+ }
+
+ /* Add repr to correct array */
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+ app_flower->phy_reprs[repr->nfp_idx] = repr;
+ else
+ app_flower->vf_reprs[repr->vf_id] = repr;
+
+ return 0;
+
+mac_cleanup:
+ rte_free(eth_dev->data->mac_addrs);
+ring_cleanup:
+ rte_ring_free(repr->ring);
+
+ return ret;
+}
+
+int
+nfp_flower_repr_alloc(struct nfp_app_flower *app_flower)
+{
+ int i;
+ int ret;
+ struct rte_eth_dev *eth_dev;
+ struct nfp_eth_table *nfp_eth_table;
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_flower_representor flower_repr = {
+ .switch_domain_id = app_flower->switch_domain_id,
+ .app_flower = app_flower,
+ };
+
+ nfp_eth_table = app_flower->nfp_eth_table;
+ eth_dev = app_flower->pf_hw->eth_dev;
+
+ /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware*/
+ ret = nfp_flower_cmsg_mac_repr(app_flower);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs");
+ return ret;
+ }
+
+ /* Create a rte_eth_dev for every phyport representor */
+ for (i = 0; i < app_flower->num_phyport_reprs; i++) {
+ eth_port = &nfp_eth_table->ports[i];
+ flower_repr.repr_type = NFP_REPR_TYPE_PHYS_PORT;
+ flower_repr.port_id = nfp_flower_get_phys_port_id(eth_port->index);
+ flower_repr.nfp_idx = eth_port->eth_index;
+ flower_repr.vf_id = i;
+
+ /* Copy the real mac of the interface to the representor struct */
+ rte_ether_addr_copy((struct rte_ether_addr *)eth_port->mac_addr,
+ &flower_repr.mac_addr);
+ sprintf(flower_repr.name, "flower_repr_p%d", i);
+
+ /*
+ * Create a eth_dev for this representor
+ * This will also allocate private memory for the device
+ */
+ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
+ sizeof(struct nfp_flower_representor),
+ NULL, NULL, nfp_flower_repr_init, &flower_repr);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr");
+ break;
+ }
+ }
+
+ if (i < app_flower->num_phyport_reprs)
+ return ret;
+
+ /*
+ * Now allocate eth_dev's for VF representors.
+ * Also send reify messages
+ */
+ for (i = 0; i < app_flower->num_vf_reprs; i++) {
+ flower_repr.repr_type = NFP_REPR_TYPE_VF;
+ flower_repr.port_id = nfp_get_pcie_port_id(app_flower->pf_hw->cpp,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, i, 0);
+ flower_repr.nfp_idx = 0;
+ flower_repr.vf_id = i;
+
+ /* VF reprs get a random MAC address */
+ rte_eth_random_addr(flower_repr.mac_addr.addr_bytes);
+
+ sprintf(flower_repr.name, "flower_repr_vf%d", i);
+
+ /* This will also allocate private memory for the device*/
+ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
+ sizeof(struct nfp_flower_representor),
+ NULL, NULL, nfp_flower_repr_init, &flower_repr);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr");
+ break;
+ }
+ }
+
+ if (i < app_flower->num_vf_reprs)
+ return ret;
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_FLOWER_REPRESENTOR_H_
+#define _NFP_FLOWER_REPRESENTOR_H_
+
+/*
+ * enum nfp_repr_type - type of representor
+ * @NFP_REPR_TYPE_PHYS_PORT: external NIC port
+ * @NFP_REPR_TYPE_PF: physical function
+ * @NFP_REPR_TYPE_VF: virtual function
+ * @NFP_REPR_TYPE_MAX: number of representor types
+ */
+enum nfp_repr_type {
+ NFP_REPR_TYPE_PHYS_PORT = 0,
+ NFP_REPR_TYPE_PF,
+ NFP_REPR_TYPE_VF,
+ NFP_REPR_TYPE_MAX,
+};
+
+struct nfp_flower_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ uint32_t repr_type;
+ uint32_t port_id;
+ uint32_t nfp_idx; /* only valid for the repr of physical port */
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_ether_addr mac_addr;
+ struct nfp_app_flower *app_flower;
+ struct rte_ring *ring;
+ struct rte_eth_link *link;
+ struct rte_eth_stats repr_stats;
+};
+
+int nfp_flower_repr_alloc(struct nfp_app_flower *app_flower);
+
+#endif /* _NFP_FLOWER_REPRESENTOR_H_ */
@@ -7,7 +7,9 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
endif
sources = files(
'flower/nfp_flower.c',
+ 'flower/nfp_flower_cmsg.c',
'flower/nfp_flower_ctrl.c',
+ 'flower/nfp_flower_representor.c',
'nfpcore/nfp_cpp_pcie_ops.c',
'nfpcore/nfp_nsp.c',
'nfpcore/nfp_cppcore.c',
@@ -91,7 +91,10 @@
* @refcnt: number of current users
* @iomem: mapped IO memory
*/
+#define NFP_BAR_MIN 1
+#define NFP_BAR_MID 5
#define NFP_BAR_MAX 7
+
struct nfp_bar {
struct nfp_pcie_user *nfp;
uint32_t barcfg;
@@ -292,6 +295,7 @@ struct nfp_pcie_user {
* BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
*
* Halving PCItoCPPBars for primary and secondary processes.
+ * For CoreNIC firmware:
* NFP PMD just requires two fixed slots, one for configuration BAR,
* and another for accessing the hw queues. Another slot is needed
* for setting the link up or down. Secondary processes do not need
@@ -301,6 +305,9 @@ struct nfp_pcie_user {
* supported. Due to this requirement and future extensions requiring
* new slots per process, only one secondary process is supported by
* now.
+ * For Flower firmware:
+ * NFP PMD need another fixed slots, used as the configureation BAR
+ * for ctrl vNIC.
*/
static int
nfp_enable_bars(struct nfp_pcie_user *nfp)
@@ -309,11 +316,11 @@ struct nfp_pcie_user {
int x, start, end;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- start = 4;
- end = 1;
+ start = NFP_BAR_MID;
+ end = NFP_BAR_MIN;
} else {
- start = 7;
- end = 4;
+ start = NFP_BAR_MAX;
+ end = NFP_BAR_MID;
}
for (x = start; x > end; x--) {
bar = &nfp->bar[x - 1];
@@ -341,11 +348,11 @@ struct nfp_pcie_user {
int x, start, end;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- start = 4;
- end = 1;
+ start = NFP_BAR_MID;
+ end = NFP_BAR_MIN;
} else {
- start = 7;
- end = 4;
+ start = NFP_BAR_MAX;
+ end = NFP_BAR_MID;
}
for (x = start; x > end; x--) {
bar = &nfp->bar[x - 1];
@@ -364,11 +371,11 @@ struct nfp_pcie_user {
int x, start, end;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- start = 4;
- end = 1;
+ start = NFP_BAR_MID;
+ end = NFP_BAR_MIN;
} else {
- start = 7;
- end = 4;
+ start = NFP_BAR_MAX;
+ end = NFP_BAR_MID;
}
for (x = start; x > end; x--) {