@@ -21,6 +21,7 @@
#include "../nfpcore/nfp_nsp.h"
#include "nfp_flower.h"
#include "nfp_flower_ovs_compat.h"
+#include "nfp_flower_ctrl.h"
#define MAX_PKT_BURST 32
#define MEMPOOL_CACHE_SIZE 512
@@ -216,7 +217,21 @@
.link_update = nfp_flower_pf_link_update,
};
+static int
+nfp_flower_ctrl_vnic_service(void *arg)
+{
+ struct nfp_app_flower *app_flower = arg;
+
+ nfp_flower_ctrl_vnic_poll(app_flower);
+
+ return 0;
+}
+
static struct rte_service_spec flower_services[NFP_FLOWER_SERVICE_MAX] = {
+ [NFP_FLOWER_SERVICE_CTRL] = {
+ .name = "flower_ctrl_vnic_service",
+ .callback = nfp_flower_ctrl_vnic_service,
+ },
};
static int
@@ -7,9 +7,18 @@
#define _NFP_FLOWER_H_
enum nfp_flower_service {
+ NFP_FLOWER_SERVICE_CTRL,
NFP_FLOWER_SERVICE_MAX
};
+/*
+ * Flower fallback and ctrl path always adds and removes
+ * 8 bytes of prepended data. Tx descriptors must point
+ * to the correct packet data offset after metadata has
+ * been added
+ */
+#define FLOWER_PKT_DATA_OFFSET 8
+
/* The flower application's private structure */
struct nfp_app_flower {
/* List of rte_service ID's for the flower app */
@@ -29,6 +38,12 @@ struct nfp_app_flower {
/* the eth table as reported by firmware */
struct nfp_eth_table *nfp_eth_table;
+
+ /* Ctrl vNIC Rx counter */
+ uint64_t ctrl_vnic_rx_count;
+
+ /* Ctrl vNIC Tx counter */
+ uint64_t ctrl_vnic_tx_count;
};
int nfp_init_app_flower(struct nfp_pf_dev *pf_dev);
new file mode 100644
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <ethdev_pci.h>
+
+#include "../nfp_common.h"
+#include "../nfp_logs.h"
+#include "../nfp_ctrl.h"
+#include "../nfp_rxtx.h"
+#include "nfp_flower.h"
+#include "nfp_flower_ctrl.h"
+
+#define MAX_PKT_BURST 32
+
+static uint16_t
+nfp_flower_ctrl_vnic_recv(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_rx_desc *rxds;
+ struct nfp_net_rx_buff *rxb;
+ struct nfp_net_hw *hw;
+ struct rte_mbuf *mb;
+ struct rte_mbuf *new_mb;
+ uint64_t dma_addr;
+ uint16_t avail = 0;
+ uint16_t nb_hold = 0;
+
+ rxq = rx_queue;
+ if (unlikely(rxq == NULL)) {
+ /*
+ * DPDK just checks the queue is lower than max queues
+ * enabled. But the queue needs to be configured
+ */
+ PMD_RX_LOG(ERR, "RX Bad queue");
+ return -EINVAL;
+ }
+
+ hw = rxq->hw;
+ while (avail < nb_pkts) {
+ rxb = &rxq->rxbufs[rxq->rd_p];
+ if (unlikely(rxb == NULL)) {
+ PMD_RX_LOG(ERR, "rxb does not exist!");
+ break;
+ }
+
+ rxds = &rxq->rxds[rxq->rd_p];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
+ /*
+ * Memory barrier to ensure that we won't do other
+ * reads before the DD bit.
+ */
+ rte_rmb();
+
+ /*
+ * We got a packet. Let's alloc a new mbuf for refilling the
+ * free descriptor ring as soon as possible
+ */
+ new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
+ if (unlikely(new_mb == NULL)) {
+ PMD_RX_LOG(ERR,
+ "RX mbuf alloc failed port_id=%u queue_id=%u",
+ rxq->port_id, (unsigned int)rxq->qidx);
+ nfp_net_mbuf_alloc_failed(rxq);
+ break;
+ }
+
+ nb_hold++;
+
+ /*
+ * Grab the mbuf and refill the descriptor with the
+ * previously allocated mbuf
+ */
+ mb = rxb->mbuf;
+ rxb->mbuf = new_mb;
+
+ /* Size of this segment */
+ mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+ /* Size of the whole packet. We just support 1 segment */
+ mb->pkt_len = mb->data_len;
+
+ if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) {
+ /*
+ * This should not happen and the user has the
+ * responsibility of avoiding it. But we have
+ * to give some info about the error
+ */
+ RTE_LOG_DP(ERR, PMD,
+ "mbuf overflow likely due to the RX offset.\n"
+ "\t\tYour mbuf size should have extra space for"
+ " RX offset=%u bytes.\n"
+ "\t\tCurrently you just have %u bytes available"
+ " but the received packet is %u bytes long",
+ hw->rx_offset,
+ rxq->mbuf_size - hw->rx_offset,
+ mb->data_len);
+ return -EINVAL;
+ }
+
+ /* Filling the received mbuf with packet info */
+ if (hw->rx_offset)
+ mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
+ else
+ mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds);
+
+ /* No scatter mode supported */
+ mb->nb_segs = 1;
+ mb->next = NULL;
+ mb->port = rxq->port_id;
+
+ rx_pkts[avail++] = mb;
+
+ /* Now resetting and updating the descriptor */
+ rxds->vals[0] = 0;
+ rxds->vals[1] = 0;
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
+ rxds->fld.dd = 0;
+ rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
+ rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
+
+ rxq->rd_p++;
+ if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+ rxq->rd_p = 0;
+ }
+
+ if (nb_hold == 0)
+ return 0;
+
+ nb_hold += rxq->nb_rx_hold;
+
+ /*
+ * FL descriptors needs to be written before incrementing the
+ * FL queue WR pointer
+ */
+ rte_wmb();
+ if (nb_hold >= rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port=%hu queue=%d nb_hold=%hu avail=%hu",
+ rxq->port_id, rxq->qidx, nb_hold, avail);
+ nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+
+ return avail;
+}
+
+uint16_t
+nfp_flower_ctrl_vnic_xmit(struct nfp_app_flower *app_flower,
+ struct rte_mbuf *mbuf)
+{
+ uint64_t dma_addr;
+ uint32_t free_descs;
+ struct rte_mbuf **lmbuf;
+ struct nfp_net_txq *txq;
+ struct nfp_net_hw *ctrl_hw;
+ struct rte_eth_dev *ctrl_dev;
+ struct nfp_net_nfd3_tx_desc *txds;
+
+ ctrl_hw = app_flower->ctrl_hw;
+ ctrl_dev = ctrl_hw->eth_dev;
+
+ /* Flower ctrl vNIC only has a single tx queue */
+ txq = ctrl_dev->data->tx_queues[0];
+ if (unlikely(txq == NULL)) {
+ /*
+ * DPDK just checks the queue is lower than max queues
+ * enabled. But the queue needs to be configured
+ */
+ PMD_TX_LOG(ERROR, "ctrl dev TX Bad queue");
+ return -EINVAL;
+ }
+
+ txds = &txq->txds[txq->wr_p];
+ txds->vals[0] = 0;
+ txds->vals[1] = 0;
+ txds->vals[2] = 0;
+ txds->vals[3] = 0;
+
+ if (nfp_net_nfd3_txq_full(txq))
+ nfp_net_tx_free_bufs(txq);
+
+ free_descs = nfp_net_nfd3_free_tx_desc(txq);
+ if (unlikely(free_descs == 0)) {
+ PMD_TX_LOG(ERROR, "ctrl dev no free descs");
+ return -EINVAL;
+ }
+
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+ RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
+ if (*lmbuf)
+ rte_pktmbuf_free_seg(*lmbuf);
+
+ *lmbuf = mbuf;
+ dma_addr = rte_mbuf_data_iova(mbuf);
+
+ txds->data_len = mbuf->pkt_len;
+ txds->dma_len = txds->data_len;
+ txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
+ txds->dma_addr_lo = (dma_addr & 0xffffffff);
+ txds->offset_eop = FLOWER_PKT_DATA_OFFSET | PCIE_DESC_TX_EOP;
+
+ txq->wr_p++;
+ if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+ txq->wr_p = 0;
+
+ rte_wmb();
+ nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, 1);
+
+ app_flower->ctrl_vnic_tx_count++;
+
+ return 0;
+}
+
+void
+nfp_flower_ctrl_vnic_poll(struct nfp_app_flower *app_flower)
+{
+ uint16_t i;
+ uint16_t count;
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_hw *ctrl_hw;
+ struct rte_eth_dev *ctrl_eth_dev;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+
+ ctrl_hw = app_flower->ctrl_hw;
+ ctrl_eth_dev = ctrl_hw->eth_dev;
+
+ /* ctrl vNIC only has a single Rx queue */
+ rxq = ctrl_eth_dev->data->rx_queues[0];
+ count = nfp_flower_ctrl_vnic_recv(rxq, pkts_burst, MAX_PKT_BURST);
+ if (count > MAX_PKT_BURST) {
+ PMD_RX_LOG(ERR, "nfp_net_ctrl_vnic_recv failed!");
+ return;
+ }
+
+ if (count) {
+ app_flower->ctrl_vnic_rx_count += count;
+ /* Process cmsgs here, only free for now */
+ for (i = 0; i < count; i++)
+ rte_pktmbuf_free(pkts_burst[i]);
+ }
+}
new file mode 100644
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_FLOWER_CTRL_H_
+#define _NFP_FLOWER_CTRL_H_
+
+void nfp_flower_ctrl_vnic_poll(struct nfp_app_flower *app_flower);
+uint16_t nfp_flower_ctrl_vnic_xmit(struct nfp_app_flower *app_flower,
+ struct rte_mbuf *mbuf);
+
+#endif /* _NFP_FLOWER_CTRL_H_ */
@@ -7,6 +7,7 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
endif
sources = files(
'flower/nfp_flower.c',
+ 'flower/nfp_flower_ctrl.c',
'nfpcore/nfp_cpp_pcie_ops.c',
'nfpcore/nfp_nsp.c',
'nfpcore/nfp_cppcore.c',