@@ -194,3 +194,9 @@ The flower firmware application requires the PMD running two services:
* PF vNIC service: handling the feedback traffic.
* ctrl vNIC service: communicate between PMD and firmware through
control message.
+
+To achieve the offload of flow, the representor ports are exposed to OVS.
+The flower firmware application support representor port for VF and physical
+port. There will always exist a representor port for each physical port,
+and the number of the representor port for VF is specified by the user through
+parameter.
@@ -62,6 +62,7 @@ New Features
* Added the support of flower firmware.
* Added the flower service infrastructure.
* Added the control message interactive channels between PMD and firmware.
+ * Added the support of representor port.
* **Updated Wangxun ngbe driver.**
@@ -20,6 +20,7 @@
#include "../nfpcore/nfp_nsp.h"
#include "nfp_flower.h"
#include "nfp_flower_ctrl.h"
+#include "nfp_flower_representor.h"
#define CTRL_VNIC_NB_DESC 512
#define DEFAULT_FLBUF_SIZE 9216
@@ -569,6 +570,12 @@
goto ctrl_vnic_cleanup;
}
+ ret = nfp_flower_repr_create(app_fw_flower);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Could not create representor ports");
+ goto ctrl_vnic_cleanup;
+ }
+
return 0;
ctrl_vnic_cleanup:
@@ -14,8 +14,20 @@
*/
#define FLOWER_PKT_DATA_OFFSET 8
+#define MAX_FLOWER_PHYPORTS 8
+#define MAX_FLOWER_VFS 64
+
/* The flower application's private structure */
struct nfp_app_fw_flower {
+ /* switch domain for this app */
+ uint16_t switch_domain_id;
+
+ /* Number of VF representors */
+ uint8_t num_vf_reprs;
+
+ /* Number of phyport representors */
+ uint8_t num_phyport_reprs;
+
/* Pointer to the PF vNIC */
struct nfp_net_hw *pf_hw;
@@ -30,6 +42,15 @@ struct nfp_app_fw_flower {
/* Ctrl vNIC Tx counter */
uint64_t ctrl_vnic_tx_count;
+
+ /* Array of phyport representors */
+ struct nfp_flower_representor *phy_reprs[MAX_FLOWER_PHYPORTS];
+
+ /* Array of VF representors */
+ struct nfp_flower_representor *vf_reprs[MAX_FLOWER_VFS];
+
+ /* PF representor */
+ struct nfp_flower_representor *pf_repr;
};
int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
new file mode 100644
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include "../nfpcore/nfp_nsp.h"
+#include "../nfp_logs.h"
+#include "../nfp_common.h"
+#include "nfp_flower.h"
+#include "nfp_flower_cmsg.h"
+#include "nfp_flower_ctrl.h"
+#include "nfp_flower_representor.h"
+
+static void *
+nfp_flower_cmsg_init(struct rte_mbuf *m,
+ enum nfp_flower_cmsg_type type,
+ uint32_t size)
+{
+ char *pkt;
+ uint32_t data;
+ uint32_t new_size = size;
+ struct nfp_flower_cmsg_hdr *hdr;
+
+ pkt = rte_pktmbuf_mtod(m, char *);
+ PMD_DRV_LOG(DEBUG, "flower_cmsg_init using pkt at %p", pkt);
+
+ data = rte_cpu_to_be_32(NFP_NET_META_PORTID);
+ rte_memcpy(pkt, &data, 4);
+ pkt += 4;
+ new_size += 4;
+
+ /* First the metadata as flower requires it */
+ data = rte_cpu_to_be_32(NFP_META_PORT_ID_CTRL);
+ rte_memcpy(pkt, &data, 4);
+ pkt += 4;
+ new_size += 4;
+
+ /* Now the ctrl header */
+ hdr = (struct nfp_flower_cmsg_hdr *)pkt;
+ hdr->pad = 0;
+ hdr->type = type;
+ hdr->version = NFP_FLOWER_CMSG_VER1;
+
+ pkt = (char *)hdr + NFP_FLOWER_CMSG_HLEN;
+ new_size += NFP_FLOWER_CMSG_HLEN;
+
+ m->pkt_len = new_size;
+ m->data_len = m->pkt_len;
+
+ return pkt;
+}
+
+static void
+nfp_flower_cmsg_mac_repr_init(struct rte_mbuf *mbuf, int num_ports)
+{
+ uint32_t size;
+ struct nfp_flower_cmsg_mac_repr *msg;
+ enum nfp_flower_cmsg_type type = NFP_FLOWER_CMSG_TYPE_MAC_REPR;
+
+ size = sizeof(*msg) + (num_ports * sizeof(msg->ports[0]));
+ msg = nfp_flower_cmsg_init(mbuf, type, size);
+ memset(msg->reserved, 0, sizeof(msg->reserved));
+ msg->num_ports = num_ports;
+}
+
+static void
+nfp_flower_cmsg_mac_repr_fill(struct rte_mbuf *m,
+ unsigned int idx,
+ unsigned int nbi,
+ unsigned int nbi_port,
+ unsigned int phys_port)
+{
+ struct nfp_flower_cmsg_mac_repr *msg;
+
+ msg = (struct nfp_flower_cmsg_mac_repr *)nfp_flower_cmsg_get_data(m);
+ msg->ports[idx].idx = idx;
+ msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
+ msg->ports[idx].nbi_port = nbi_port;
+ msg->ports[idx].phys_port = phys_port;
+}
+
+int
+nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower)
+{
+ int i;
+ uint16_t cnt;
+ unsigned int nbi;
+ unsigned int nbi_port;
+ unsigned int phys_port;
+ struct rte_mbuf *mbuf;
+ struct nfp_eth_table *nfp_eth_table;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "Could not allocate mac repr cmsg");
+ return -ENOMEM;
+ }
+
+ nfp_flower_cmsg_mac_repr_init(mbuf, app_fw_flower->num_phyport_reprs);
+
+ /* Fill in the mac repr cmsg */
+ nfp_eth_table = app_fw_flower->pf_hw->pf_dev->nfp_eth_table;
+ for (i = 0; i < app_fw_flower->num_phyport_reprs; i++) {
+ nbi = nfp_eth_table->ports[i].nbi;
+ nbi_port = nfp_eth_table->ports[i].base;
+ phys_port = nfp_eth_table->ports[i].index;
+
+ nfp_flower_cmsg_mac_repr_fill(mbuf, i, nbi, nbi_port, phys_port);
+ }
+
+ /* Send the cmsg via the ctrl vNIC */
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+nfp_flower_cmsg_repr_reify(struct nfp_app_fw_flower *app_fw_flower,
+ struct nfp_flower_representor *repr)
+{
+ uint16_t cnt;
+ struct rte_mbuf *mbuf;
+ struct nfp_flower_cmsg_port_reify *msg;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(DEBUG, "alloc mbuf for repr reify failed");
+ return -ENOMEM;
+ }
+
+ msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_PORT_REIFY, sizeof(*msg));
+ msg->portnum = rte_cpu_to_be_32(repr->port_id);
+ msg->reserved = 0;
+ msg->info = rte_cpu_to_be_16(1);
+
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+nfp_flower_cmsg_port_mod(struct nfp_app_fw_flower *app_fw_flower,
+ uint32_t port_id, bool carrier_ok)
+{
+ uint16_t cnt;
+ struct rte_mbuf *mbuf;
+ struct nfp_flower_cmsg_port_mod *msg;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(DEBUG, "alloc mbuf for repr portmod failed");
+ return -ENOMEM;
+ }
+
+ msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_PORT_MOD, sizeof(*msg));
+ msg->portnum = rte_cpu_to_be_32(port_id);
+ msg->reserved = 0;
+ msg->info = carrier_ok;
+ msg->mtu = 9000;
+
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_CMSG_H_
+#define _NFP_CMSG_H_
+
+#include <rte_byteorder.h>
+#include <rte_ether.h>
+
+struct nfp_flower_cmsg_hdr {
+ rte_be16_t pad;
+ uint8_t type;
+ uint8_t version;
+};
+
+/* Types defined for control messages */
+enum nfp_flower_cmsg_type {
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
+ NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
+ NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
+ NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
+ NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
+ NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6 = 22,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6 = 23,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 = 24,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6 = 25,
+ NFP_FLOWER_CMSG_TYPE_MAX = 32,
+};
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_MAC_REPR
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +---------------+-----------+---+---------------+---------------+
+ * 0 | spare |Number of ports|
+ * +---------------+-----------+---+---------------+---------------+
+ * 1 | Index | spare |NBI| Port on NBI | Chip-wide port|
+ * +---------------+-----------+---+---------------+---------------+
+ * ....
+ * +---------------+-----------+---+---------------+---------------+
+ * N-1 | Index | spare |NBI| Port on NBI | Chip-wide port|
+ * +---------------+-----------+---+---------------+---------------+
+ * N | Index | spare |NBI| Port on NBI | Chip-wide port|
+ * +---------------+-----------+---+---------------+---------------+
+ *
+ * Index: index into the eth table
+ * NBI (bits 17-16): NBI number (0-3)
+ * Port on NBI (bits 15-8): “base” in the driver
+ * this forms NBIX.PortY notation as the NSP eth table.
+ * "Chip-wide" port (bits 7-0):
+ */
+struct nfp_flower_cmsg_mac_repr {
+ uint8_t reserved[3];
+ uint8_t num_ports;
+ struct {
+ uint8_t idx;
+ uint8_t info;
+ uint8_t nbi_port;
+ uint8_t phys_port;
+ } ports[0];
+};
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_PORT_REIFY
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +-------+-------+---+---+-------+---+---+-----------+-----------+
+ * 0 |Port Ty|Sys ID |NIC|Rsv| Spare |PCI|typ| vNIC | queue |
+ * +-------+-----+-+---+---+-------+---+---+-----------+---------+-+
+ * 1 | Spare |E|
+ * +-------------------------------------------------------------+-+
+ * E: 1 = Representor exists, 0 = Representor does not exist
+ */
+struct nfp_flower_cmsg_port_reify {
+ rte_be32_t portnum;
+ rte_be16_t reserved;
+ rte_be16_t info;
+};
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_PORT_MOD
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +-------+-------+---+---+-------+---+---+-------+---+-----------+
+ * 0 |Port Ty|Sys ID |NIC|Rsv| Reserved | Port |
+ * +-------+-------+---+---+-----+-+---+---+-------+---+-----------+
+ * 1 | Spare |L| MTU |
+ * +-----------------------------+-+-------------------------------+
+ * L: Link or Admin state bit. When message is generated by host, this
+ * bit indicates the admin state (0=down, 1=up). When generated by
+ * NFP, it indicates the link state (0=down, 1=up)
+ *
+ * Port Type (word 1, bits 31 to 28) = 1 (Physical Network)
+ * Port: “Chip-wide number” as assigned by BSP
+ *
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +-------+-------+---+---+-------+---+---+-------+---+-----------+
+ * 0 |Port Ty|Sys ID |NIC|Rsv| Spare |PCI|typ| vNIC | queue |
+ * +-------+-----+-+---+---+---+-+-+---+---+-------+---+-----------+
+ * 1 | Spare |L| MTU |
+ * +-----------------------------+-+-------------------------------+
+ * L: Link or Admin state bit. When message is generated by host, this
+ * bit indicates the admin state (0=down, 1=up). When generated by
+ * NFP, it indicates the link state (0=down, 1=up)
+ *
+ * Port Type (word 1, bits 31 to 28) = 2 (PCIE)
+ */
+struct nfp_flower_cmsg_port_mod {
+ rte_be32_t portnum;
+ uint8_t reserved;
+ uint8_t info;
+ rte_be16_t mtu;
+};
+
+enum nfp_flower_cmsg_port_type {
+ NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC,
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT,
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT,
+};
+
+enum nfp_flower_cmsg_port_vnic_type {
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_CTRL,
+};
+
+#define NFP_FLOWER_CMSG_MAC_REPR_NBI (0x3)
+
+#define NFP_FLOWER_CMSG_HLEN sizeof(struct nfp_flower_cmsg_hdr)
+#define NFP_FLOWER_CMSG_VER1 1
+#define NFP_NET_META_PORTID 5
+#define NFP_META_PORT_ID_CTRL ~0U
+
+#define NFP_FLOWER_CMSG_PORT_TYPE(x) (((x) >> 28) & 0xf) /* [31,28] */
+#define NFP_FLOWER_CMSG_PORT_SYS_ID(x) (((x) >> 24) & 0xf) /* [24,27] */
+#define NFP_FLOWER_CMSG_PORT_NFP_ID(x) (((x) >> 22) & 0x3) /* [22,23] */
+#define NFP_FLOWER_CMSG_PORT_PCI(x) (((x) >> 14) & 0x3) /* [14,15] */
+#define NFP_FLOWER_CMSG_PORT_VNIC_TYPE(x) (((x) >> 12) & 0x3) /* [12,13] */
+#define NFP_FLOWER_CMSG_PORT_VNIC(x) (((x) >> 6) & 0x3f) /* [6,11] */
+#define NFP_FLOWER_CMSG_PORT_PCIE_Q(x) ((x) & 0x3f) /* [0,5] */
+#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(x) ((x) & 0xff) /* [0,7] */
+
+static inline char*
+nfp_flower_cmsg_get_data(struct rte_mbuf *m)
+{
+ return rte_pktmbuf_mtod(m, char *) + 4 + 4 + NFP_FLOWER_CMSG_HLEN;
+}
+
+int nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower);
+int nfp_flower_cmsg_repr_reify(struct nfp_app_fw_flower *app_fw_flower,
+ struct nfp_flower_representor *repr);
+int nfp_flower_cmsg_port_mod(struct nfp_app_fw_flower *app_fw_flower,
+ uint32_t port_id, bool carrier_ok);
+
+#endif /* _NFP_CMSG_H_ */
new file mode 100644
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <ethdev_pci.h>
+
+#include "../nfp_common.h"
+#include "../nfp_logs.h"
+#include "../nfp_ctrl.h"
+#include "../nfp_rxtx.h"
+#include "../nfpcore/nfp_mip.h"
+#include "../nfpcore/nfp_rtsym.h"
+#include "../nfpcore/nfp_nsp.h"
+#include "nfp_flower.h"
+#include "nfp_flower_representor.h"
+#include "nfp_flower_ctrl.h"
+#include "nfp_flower_cmsg.h"
+
+static int
+nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct nfp_net_hw *hw;
+ struct nfp_net_rxq *rxq;
+ const struct rte_memzone *tz;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ hw = repr->app_fw_flower->pf_hw;
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ /* Hw queues mapping based on firmware configuration */
+ rxq->qidx = queue_idx;
+ rxq->fl_qcidx = queue_idx * hw->stride_rx;
+ rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
+ rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
+ rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
+
+ /*
+ * Tracking mbuf size for detecting a potential mbuf overflow due to
+ * RX offset
+ */
+ rxq->mem_pool = mp;
+ rxq->mbuf_size = rxq->mem_pool->elt_size;
+ rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+ hw->flbufsz = rxq->mbuf_size;
+
+ rxq->rx_count = nb_desc;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->drop_en = rx_conf->rx_drop_en;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC,
+ NFP_MEMZONE_ALIGN, socket_id);
+ if (tz == NULL) {
+ PMD_DRV_LOG(ERR, "Error allocating rx dma");
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
+ return -ENOMEM;
+ }
+
+ /* Saving physical and virtual addresses for the RX ring */
+ rxq->dma = (uint64_t)tz->iova;
+ rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
+
+ /* mbuf pointers array for referencing mbufs linked to RX descriptors */
+ rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
+ sizeof(*rxq->rxbufs) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->rxbufs == NULL) {
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
+ return -ENOMEM;
+ }
+
+ nfp_net_reset_rx_queue(rxq);
+ rxq->hw = hw;
+
+ /*
+ * Telling the HW about the physical address of the RX ring and number
+ * of descriptors in log2 format
+ */
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+
+ return 0;
+}
+
+static int
+nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct nfp_net_hw *hw;
+ struct nfp_net_txq *txq;
+ uint16_t tx_free_thresh;
+ const struct rte_memzone *tz;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ hw = repr->app_fw_flower->pf_hw;
+
+ tx_free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
+ DEFAULT_TX_FREE_THRESH;
+ if (tx_free_thresh > nb_desc)
+ return -EINVAL;
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
+ return -ENOMEM;
+ }
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC,
+ NFP_MEMZONE_ALIGN, socket_id);
+ if (tz == NULL) {
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
+ return -ENOMEM;
+ }
+
+ txq->tx_count = nb_desc;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
+ txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
+ txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
+
+ /* queue mapping based on firmware configuration */
+ txq->qidx = queue_idx;
+ txq->tx_qcidx = queue_idx * hw->stride_tx;
+ txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
+
+ txq->port_id = dev->data->port_id;
+
+ /* Saving physical and virtual addresses for the TX ring */
+ txq->dma = (uint64_t)tz->iova;
+ txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
+
+ /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+ txq->txbufs = rte_zmalloc_socket("txq->txbufs",
+ sizeof(*txq->txbufs) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->txbufs == NULL) {
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
+ return -ENOMEM;
+ }
+
+ nfp_net_reset_tx_queue(txq);
+ txq->hw = hw;
+
+ /*
+ * Telling the HW about the physical address of the TX ring and number
+ * of descriptors in log2 format
+ */
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+ nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ int ret;
+ uint32_t nn_link_status;
+ struct nfp_net_hw *pf_hw;
+ struct rte_eth_link *link;
+ struct nfp_flower_representor *repr;
+
+ static const uint32_t ls_to_ethtool[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = RTE_ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = RTE_ETH_SPEED_NUM_1G,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = RTE_ETH_SPEED_NUM_10G,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = RTE_ETH_SPEED_NUM_25G,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = RTE_ETH_SPEED_NUM_40G,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = RTE_ETH_SPEED_NUM_50G,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = RTE_ETH_SPEED_NUM_100G,
+ };
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ link = &repr->link;
+ pf_hw = repr->app_fw_flower->pf_hw;
+
+ memset(link, 0, sizeof(struct rte_eth_link));
+ nn_link_status = nn_cfg_readl(pf_hw, NFP_NET_CFG_STS);
+
+ if (nn_link_status & NFP_NET_CFG_STS_LINK)
+ link->link_status = RTE_ETH_LINK_UP;
+
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+
+ nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
+ NFP_NET_CFG_STS_LINK_RATE_MASK;
+
+ if (nn_link_status >= RTE_DIM(ls_to_ethtool))
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+ else
+ link->link_speed = ls_to_ethtool[nn_link_status];
+
+ ret = rte_eth_linkstatus_set(dev, link);
+ if (ret == 0) {
+ if (link->link_status)
+ PMD_DRV_LOG(INFO, "NIC Link is Up");
+ else
+ PMD_DRV_LOG(INFO, "NIC Link is Down");
+ }
+
+ return ret;
+}
+
+static int
+nfp_flower_repr_dev_infos_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ /* Hardcoded pktlen and queues for now */
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+ dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = 9000;
+
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->max_mac_addrs = 1;
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_dev_configure(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *pf_hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ pf_hw = repr->app_fw_flower->pf_hw;
+
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+
+ /* Checking MTU set */
+ if (rxmode->mtu > pf_hw->flbufsz) {
+ PMD_DRV_LOG(INFO, "MTU (%u) larger then current mbufsize (%u) not supported",
+ rxmode->mtu, pf_hw->flbufsz);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_dev_start(struct rte_eth_dev *dev)
+{
+ struct nfp_flower_representor *repr;
+ struct nfp_app_fw_flower *app_fw_flower;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ app_fw_flower = repr->app_fw_flower;
+
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) {
+ nfp_eth_set_configured(app_fw_flower->pf_hw->pf_dev->cpp,
+ repr->nfp_idx, 1);
+ }
+
+ nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, true);
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_dev_stop(struct rte_eth_dev *dev)
+{
+ struct nfp_flower_representor *repr;
+ struct nfp_app_fw_flower *app_fw_flower;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ app_fw_flower = repr->app_fw_flower;
+
+ nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, false);
+
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) {
+ nfp_eth_set_configured(app_fw_flower->pf_hw->pf_dev->cpp,
+ repr->nfp_idx, 0);
+ }
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_hw *pf_hw;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ pf_hw = repr->app_fw_flower->pf_hw;
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+
+ rxq->hw = pf_hw;
+ rxq->qidx = rx_queue_id;
+ rxq->port_id = dev->data->port_id;
+ dev->data->rx_queues[rx_queue_id] = rxq;
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ __rte_unused uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ struct nfp_net_txq *txq;
+ struct nfp_net_hw *pf_hw;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ pf_hw = repr->app_fw_flower->pf_hw;
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ return -ENOMEM;
+
+ txq->hw = pf_hw;
+ txq->qidx = tx_queue_id;
+ txq->port_id = dev->data->port_id;
+ dev->data->tx_queues[tx_queue_id] = txq;
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_stats_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_stats *stats)
+{
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ rte_memcpy(stats, &repr->repr_stats, sizeof(struct rte_eth_stats));
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_stats_reset(struct rte_eth_dev *ethdev)
+{
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ memset(&repr->repr_stats, 0, sizeof(struct rte_eth_stats));
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *pf_hw;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ pf_hw = repr->app_fw_flower->pf_hw;
+
+ if (!(pf_hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
+ PMD_DRV_LOG(INFO, "Promiscuous mode not supported");
+ return -ENOTSUP;
+ }
+
+ if (pf_hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
+ PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
+ return 0;
+ }
+
+ return nfp_net_promisc_enable(pf_hw->eth_dev);
+}
+
+static int
+nfp_flower_repr_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *pf_hw;
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ pf_hw = repr->app_fw_flower->pf_hw;
+
+ if ((pf_hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
+ PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
+ return 0;
+ }
+
+ return nfp_net_promisc_disable(pf_hw->eth_dev);
+}
+
+static int
+nfp_flower_repr_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ rte_ether_addr_copy(mac_addr, &repr->mac_addr);
+ rte_ether_addr_copy(mac_addr, ethdev->data->mac_addrs);
+
+ return 0;
+}
+
+static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = {
+ .dev_infos_get = nfp_flower_repr_dev_infos_get,
+
+ .dev_configure = nfp_flower_repr_dev_configure,
+
+ .rx_queue_setup = nfp_pf_repr_rx_queue_setup,
+ .tx_queue_setup = nfp_pf_repr_tx_queue_setup,
+
+ .link_update = nfp_flower_repr_link_update,
+
+ .stats_get = nfp_flower_repr_stats_get,
+ .stats_reset = nfp_flower_repr_stats_reset,
+
+ .promiscuous_enable = nfp_flower_repr_promiscuous_enable,
+ .promiscuous_disable = nfp_flower_repr_promiscuous_disable,
+
+ .mac_addr_set = nfp_flower_repr_mac_addr_set,
+};
+
+static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
+ .dev_infos_get = nfp_flower_repr_dev_infos_get,
+
+ .dev_start = nfp_flower_repr_dev_start,
+ .dev_configure = nfp_flower_repr_dev_configure,
+ .dev_stop = nfp_flower_repr_dev_stop,
+
+ .rx_queue_setup = nfp_flower_repr_rx_queue_setup,
+ .tx_queue_setup = nfp_flower_repr_tx_queue_setup,
+
+ .link_update = nfp_flower_repr_link_update,
+
+ .stats_get = nfp_flower_repr_stats_get,
+ .stats_reset = nfp_flower_repr_stats_reset,
+
+ .promiscuous_enable = nfp_flower_repr_promiscuous_enable,
+ .promiscuous_disable = nfp_flower_repr_promiscuous_disable,
+
+ .mac_addr_set = nfp_flower_repr_mac_addr_set,
+};
+
+static uint32_t
+nfp_flower_get_phys_port_id(uint8_t port)
+{
+ return (NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT << 28) | port;
+}
+
+static uint32_t
+nfp_get_pcie_port_id(struct nfp_cpp *cpp,
+ int type,
+ uint8_t vnic,
+ uint8_t queue)
+{
+ uint8_t nfp_pcie;
+ uint32_t port_id;
+
+ nfp_pcie = NFP_CPP_INTERFACE_UNIT_of(nfp_cpp_interface(cpp));
+ port_id = ((nfp_pcie & 0x3) << 14) |
+ ((type & 0x3) << 12) |
+ ((vnic & 0x3f) << 6) |
+ (queue & 0x3f) |
+ ((NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT & 0xf) << 28);
+
+ return port_id;
+}
+
+static int
+nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev,
+ void *init_params)
+{
+ struct nfp_flower_representor *repr;
+ struct nfp_flower_representor *init_repr_data;
+
+ /* Cast the input representor data to the correct struct here */
+ init_repr_data = (struct nfp_flower_representor *)init_params;
+
+ /* Memory has been allocated in the eth_dev_create() function */
+ repr = eth_dev->data->dev_private;
+
+ /* Copy data here from the input representor template*/
+ repr->vf_id = init_repr_data->vf_id;
+ repr->switch_domain_id = init_repr_data->switch_domain_id;
+ repr->repr_type = init_repr_data->repr_type;
+ repr->app_fw_flower = init_repr_data->app_fw_flower;
+
+ snprintf(repr->name, sizeof(repr->name), "%s", init_repr_data->name);
+
+ eth_dev->dev_ops = &nfp_flower_pf_repr_dev_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ eth_dev->data->representor_id = 0;
+
+ /* This backer port is that of the eth_device created for the PF vNIC */
+ eth_dev->data->backer_port_id = 0;
+
+ /* Only single queues for representor devices */
+ eth_dev->data->nb_rx_queues = 1;
+ eth_dev->data->nb_tx_queues = 1;
+
+ /* Allocating memory for mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC");
+ return -ENOMEM;
+ }
+
+ rte_ether_addr_copy(&init_repr_data->mac_addr, &repr->mac_addr);
+ rte_ether_addr_copy(&init_repr_data->mac_addr, eth_dev->data->mac_addrs);
+
+ repr->app_fw_flower->pf_repr = repr;
+ repr->app_fw_flower->pf_hw->eth_dev = eth_dev;
+
+ return 0;
+}
+
+static int
+nfp_flower_repr_init(struct rte_eth_dev *eth_dev,
+ void *init_params)
+{
+ int ret;
+ unsigned int numa_node;
+ char ring_name[RTE_ETH_NAME_MAX_LEN];
+ struct nfp_app_fw_flower *app_fw_flower;
+ struct nfp_flower_representor *repr;
+ struct nfp_flower_representor *init_repr_data;
+
+ /* Cast the input representor data to the correct struct here */
+ init_repr_data = (struct nfp_flower_representor *)init_params;
+ app_fw_flower = init_repr_data->app_fw_flower;
+
+ /* Memory has been allocated in the eth_dev_create() function */
+ repr = eth_dev->data->dev_private;
+
+ /*
+ * We need multiproduce rings as we can have multiple PF ports.
+ * On the other hand, we need single consumer rings, as just one
+ * representor PMD will try to read from the ring.
+ */
+ snprintf(ring_name, sizeof(ring_name), "%s_%s", init_repr_data->name, "ring");
+ numa_node = rte_socket_id();
+ repr->ring = rte_ring_create(ring_name, 256, numa_node, RING_F_SC_DEQ);
+ if (repr->ring == NULL) {
+ PMD_DRV_LOG(ERR, "rte_ring_create failed for %s", ring_name);
+ return -ENOMEM;
+ }
+
+ /* Copy data here from the input representor template*/
+ repr->vf_id = init_repr_data->vf_id;
+ repr->switch_domain_id = init_repr_data->switch_domain_id;
+ repr->port_id = init_repr_data->port_id;
+ repr->nfp_idx = init_repr_data->nfp_idx;
+ repr->repr_type = init_repr_data->repr_type;
+ repr->app_fw_flower = init_repr_data->app_fw_flower;
+
+ snprintf(repr->name, sizeof(repr->name), "%s", init_repr_data->name);
+
+ eth_dev->dev_ops = &nfp_flower_repr_dev_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+ eth_dev->data->representor_id = repr->vf_id;
+ else
+ eth_dev->data->representor_id = repr->vf_id +
+ app_fw_flower->num_phyport_reprs + 1;
+
+ /* This backer port is that of the eth_device created for the PF vNIC */
+ eth_dev->data->backer_port_id = 0;
+
+ /* Only single queues for representor devices */
+ eth_dev->data->nb_rx_queues = 1;
+ eth_dev->data->nb_tx_queues = 1;
+
+ /* Allocating memory for mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for repr MAC");
+ ret = -ENOMEM;
+ goto ring_cleanup;
+ }
+
+ rte_ether_addr_copy(&init_repr_data->mac_addr, &repr->mac_addr);
+ rte_ether_addr_copy(&init_repr_data->mac_addr, eth_dev->data->mac_addrs);
+
+ /* Send reify message to hardware to inform it about the new repr */
+ ret = nfp_flower_cmsg_repr_reify(app_fw_flower, repr);
+ if (ret != 0) {
+ PMD_INIT_LOG(WARNING, "Failed to send repr reify message");
+ goto mac_cleanup;
+ }
+
+ /* Add repr to correct array */
+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+ app_fw_flower->phy_reprs[repr->nfp_idx] = repr;
+ else
+ app_fw_flower->vf_reprs[repr->vf_id] = repr;
+
+ return 0;
+
+mac_cleanup:
+ rte_free(eth_dev->data->mac_addrs);
+ring_cleanup:
+ rte_ring_free(repr->ring);
+
+ return ret;
+}
+
+static int
+nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
+{
+ int i;
+ int ret;
+ struct rte_eth_dev *eth_dev;
+ struct nfp_eth_table *nfp_eth_table;
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_flower_representor flower_repr = {
+ .switch_domain_id = app_fw_flower->switch_domain_id,
+ .app_fw_flower = app_fw_flower,
+ };
+
+ nfp_eth_table = app_fw_flower->pf_hw->pf_dev->nfp_eth_table;
+ eth_dev = app_fw_flower->ctrl_hw->eth_dev;
+
+ /* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware*/
+ ret = nfp_flower_cmsg_mac_repr(app_fw_flower);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs");
+ return ret;
+ }
+
+ /* Create a rte_eth_dev for PF vNIC representor */
+ flower_repr.repr_type = NFP_REPR_TYPE_PF;
+
+ /* PF vNIC reprs get a random MAC address */
+ rte_eth_random_addr(flower_repr.mac_addr.addr_bytes);
+ sprintf(flower_repr.name, "flower_repr_pf");
+
+ /* Create a eth_dev for this representor */
+ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
+ sizeof(struct nfp_flower_representor),
+ NULL, NULL, nfp_flower_pf_repr_init, &flower_repr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init the pf repr");
+ return -EINVAL;
+ }
+
+ /* Create a rte_eth_dev for every phyport representor */
+ for (i = 0; i < app_fw_flower->num_phyport_reprs; i++) {
+ eth_port = &nfp_eth_table->ports[i];
+ flower_repr.repr_type = NFP_REPR_TYPE_PHYS_PORT;
+ flower_repr.port_id = nfp_flower_get_phys_port_id(eth_port->index);
+ flower_repr.nfp_idx = eth_port->eth_index;
+ flower_repr.vf_id = i + 1;
+
+ /* Copy the real mac of the interface to the representor struct */
+ rte_ether_addr_copy((struct rte_ether_addr *)eth_port->mac_addr,
+ &flower_repr.mac_addr);
+ sprintf(flower_repr.name, "flower_repr_p%d", i);
+
+ /*
+ * Create a eth_dev for this representor
+ * This will also allocate private memory for the device
+ */
+ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
+ sizeof(struct nfp_flower_representor),
+ NULL, NULL, nfp_flower_repr_init, &flower_repr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr");
+ break;
+ }
+ }
+
+ if (i < app_fw_flower->num_phyport_reprs)
+ return ret;
+
+ /*
+ * Now allocate eth_dev's for VF representors.
+ * Also send reify messages
+ */
+ for (i = 0; i < app_fw_flower->num_vf_reprs; i++) {
+ flower_repr.repr_type = NFP_REPR_TYPE_VF;
+ flower_repr.port_id = nfp_get_pcie_port_id(app_fw_flower->pf_hw->cpp,
+ NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, i, 0);
+ flower_repr.nfp_idx = 0;
+ flower_repr.vf_id = i;
+
+ /* VF reprs get a random MAC address */
+ rte_eth_random_addr(flower_repr.mac_addr.addr_bytes);
+ sprintf(flower_repr.name, "flower_repr_vf%d", i);
+
+ /* This will also allocate private memory for the device*/
+ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
+ sizeof(struct nfp_flower_representor),
+ NULL, NULL, nfp_flower_repr_init, &flower_repr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Cloud not create eth_dev for repr");
+ break;
+ }
+ }
+
+ if (i < app_fw_flower->num_vf_reprs)
+ return ret;
+
+ return 0;
+}
+
+int
+nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower)
+{
+ int ret;
+ struct nfp_pf_dev *pf_dev;
+ struct rte_pci_device *pci_dev;
+ struct nfp_eth_table *nfp_eth_table;
+ struct rte_eth_devargs eth_da = {
+ .nb_representor_ports = 0
+ };
+
+ pf_dev = app_fw_flower->pf_hw->pf_dev;
+ pci_dev = pf_dev->pci_dev;
+
+ /* Allocate a switch domain for the flower app */
+ if (app_fw_flower->switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID &&
+ rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id)) {
+ PMD_INIT_LOG(WARNING, "failed to allocate switch domain for device");
+ }
+
+ /* Now parse PCI device args passed for representor info */
+ if (pci_dev->device.devargs != NULL) {
+ ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "devarg parse failed");
+ return -EINVAL;
+ }
+ }
+
+ if (eth_da.nb_representor_ports == 0) {
+ PMD_INIT_LOG(DEBUG, "No representor port need to create.");
+ return 0;
+ }
+
+ /* There always exist phy repr */
+ nfp_eth_table = pf_dev->nfp_eth_table;
+ if (eth_da.nb_representor_ports < nfp_eth_table->count + 1) {
+ PMD_INIT_LOG(ERR, "Should also create repr port for phy port and PF vNIC.");
+ return -ERANGE;
+ }
+
+ /* Only support VF representor creation via the command line */
+ if (eth_da.type != RTE_ETH_REPRESENTOR_VF) {
+ PMD_INIT_LOG(ERR, "Unsupported representor type: %d", eth_da.type);
+ return -ENOTSUP;
+ }
+
+ /* Fill in flower app with repr counts */
+ app_fw_flower->num_phyport_reprs = (uint8_t)nfp_eth_table->count;
+ app_fw_flower->num_vf_reprs = eth_da.nb_representor_ports -
+ nfp_eth_table->count - 1;
+
+ PMD_INIT_LOG(INFO, "%d number of VF reprs", app_fw_flower->num_vf_reprs);
+ PMD_INIT_LOG(INFO, "%d number of phyport reprs", app_fw_flower->num_phyport_reprs);
+
+ ret = nfp_flower_repr_alloc(app_fw_flower);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "representors allocation failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_FLOWER_REPRESENTOR_H_
+#define _NFP_FLOWER_REPRESENTOR_H_
+
+/*
+ * enum nfp_repr_type - type of representor
+ * @NFP_REPR_TYPE_PHYS_PORT: external NIC port
+ * @NFP_REPR_TYPE_PF: physical function
+ * @NFP_REPR_TYPE_VF: virtual function
+ * @NFP_REPR_TYPE_MAX: number of representor types
+ */
+enum nfp_repr_type {
+ NFP_REPR_TYPE_PHYS_PORT = 0,
+ NFP_REPR_TYPE_PF,
+ NFP_REPR_TYPE_VF,
+ NFP_REPR_TYPE_MAX,
+};
+
+struct nfp_flower_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ uint32_t repr_type;
+ uint32_t port_id;
+ uint32_t nfp_idx; /* only valid for the repr of physical port */
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_ether_addr mac_addr;
+ struct nfp_app_fw_flower *app_fw_flower;
+ struct rte_ring *ring;
+ struct rte_eth_link link;
+ struct rte_eth_stats repr_stats;
+};
+
+int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower);
+
+#endif /* _NFP_FLOWER_REPRESENTOR_H_ */
@@ -7,7 +7,9 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
endif
sources = files(
'flower/nfp_flower.c',
+ 'flower/nfp_flower_cmsg.c',
'flower/nfp_flower_ctrl.c',
+ 'flower/nfp_flower_representor.c',
'nfpcore/nfp_cpp_pcie_ops.c',
'nfpcore/nfp_nsp.c',
'nfpcore/nfp_cppcore.c',