@@ -244,41 +244,6 @@ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
.dev_close = nfp_flower_pf_close,
};
-static inline void
-nfp_flower_parse_metadata(struct nfp_net_rxq *rxq,
- struct nfp_net_rx_desc *rxd,
- struct rte_mbuf *mbuf,
- uint32_t *portid)
-{
- uint32_t meta_info;
- uint8_t *meta_offset;
- struct nfp_net_hw *hw;
-
- hw = rxq->hw;
- if (!((hw->ctrl & NFP_NET_CFG_CTRL_RSS) ||
- (hw->ctrl & NFP_NET_CFG_CTRL_RSS2)))
- return;
-
- meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
- meta_offset -= NFP_DESC_META_LEN(rxd);
- meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
- meta_offset += 4;
-
- while (meta_info != 0) {
- switch (meta_info & NFP_NET_META_FIELD_MASK) {
- /* Expect flower firmware to only send packets with META_PORTID */
- case NFP_NET_META_PORTID:
- *portid = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
- meta_offset += 4;
- meta_info >>= NFP_NET_META_FIELD_SIZE;
- break;
- default:
- /* Unsupported metadata can be a performance issue */
- return;
- }
- }
-}
-
static inline struct nfp_flower_representor *
nfp_flower_get_repr(struct nfp_net_hw *hw,
uint32_t port_id)
@@ -303,186 +268,26 @@ nfp_flower_get_repr(struct nfp_net_hw *hw,
return NULL;
}
-uint16_t
-nfp_flower_pf_recv_pkts(void *rx_queue,
- struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+bool
+nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
+ struct rte_mbuf *mbuf,
+ uint32_t port_id)
{
- /*
- * We need different counters for packets given to the caller
- * and packets sent to representors
- */
- uint16_t avail = 0;
- uint16_t avail_multiplexed = 0;
- uint64_t dma_addr;
- uint32_t meta_portid;
- uint16_t nb_hold = 0;
- struct rte_mbuf *mb;
- struct nfp_net_hw *hw;
- struct rte_mbuf *new_mb;
- struct nfp_net_rxq *rxq;
- struct nfp_net_dp_buf *rxb;
- struct nfp_net_rx_desc *rxds;
struct nfp_flower_representor *repr;
- rxq = rx_queue;
- if (unlikely(rxq == NULL)) {
- /*
- * DPDK just checks the queue is lower than max queues
- * enabled. But the queue needs to be configured
- */
- PMD_RX_LOG(ERR, "RX Bad queue");
- return 0;
+ repr = nfp_flower_get_repr(hw, port_id);
+ if (repr == NULL) {
+ PMD_RX_LOG(ERR, "Can not get repr for port %u", hw->idx);
+ return false;
}
- hw = rxq->hw;
-
- /*
- * This is tunable as we could allow to receive more packets than
- * requested if most are multiplexed.
- */
- while (avail + avail_multiplexed < nb_pkts) {
- rxb = &rxq->rxbufs[rxq->rd_p];
- if (unlikely(rxb == NULL)) {
- PMD_RX_LOG(ERR, "rxb does not exist!");
- break;
- }
-
- rxds = &rxq->rxds[rxq->rd_p];
- if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
- break;
-
- /*
- * Memory barrier to ensure that we won't do other
- * reads before the DD bit.
- */
- rte_rmb();
-
- /*
- * We got a packet. Let's alloc a new mbuf for refilling the
- * free descriptor ring as soon as possible
- */
- new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
- if (unlikely(new_mb == NULL)) {
- PMD_RX_LOG(DEBUG,
- "RX mbuf alloc failed port_id=%hu queue_id=%hu",
- rxq->port_id, rxq->qidx);
- nfp_net_mbuf_alloc_failed(rxq);
- break;
- }
-
- /*
- * Grab the mbuf and refill the descriptor with the
- * previously allocated mbuf
- */
- mb = rxb->mbuf;
- rxb->mbuf = new_mb;
-
- PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
- rxds->rxd.data_len, rxq->mbuf_size);
-
- /* Size of this segment */
- mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
- /* Size of the whole packet. We just support 1 segment */
- mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
-
- if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) {
- /*
- * This should not happen and the user has the
- * responsibility of avoiding it. But we have
- * to give some info about the error
- */
- PMD_RX_LOG(ERR,
- "mbuf overflow likely due to the RX offset.\n"
- "\t\tYour mbuf size should have extra space for"
- " RX offset=%u bytes.\n"
- "\t\tCurrently you just have %u bytes available"
- " but the received packet is %u bytes long",
- hw->rx_offset,
- rxq->mbuf_size - hw->rx_offset,
- mb->data_len);
- rte_pktmbuf_free(mb);
- break;
- }
-
- /* Filling the received mbuf with packet info */
- if (hw->rx_offset != 0)
- mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
- else
- mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds);
-
- /* No scatter mode supported */
- mb->nb_segs = 1;
- mb->next = NULL;
-
- mb->port = rxq->port_id;
- meta_portid = 0;
-
- /* Checking the RSS flag */
- nfp_flower_parse_metadata(rxq, rxds, mb, &meta_portid);
- PMD_RX_LOG(DEBUG, "Received from port %u type %u",
- NFP_FLOWER_CMSG_PORT_VNIC(meta_portid),
- NFP_FLOWER_CMSG_PORT_VNIC_TYPE(meta_portid));
-
- /* Checking the checksum flag */
- nfp_net_rx_cksum(rxq, rxds, mb);
-
- if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
- (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
- mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.offload_info);
- mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
- }
-
- repr = nfp_flower_get_repr(hw, meta_portid);
- if (repr != NULL && repr->ring != NULL) {
- PMD_RX_LOG(DEBUG, "Using representor %s", repr->name);
- rte_ring_enqueue(repr->ring, (void *)mb);
- avail_multiplexed++;
- } else if (repr != NULL) {
- PMD_RX_LOG(ERR, "No ring available for repr_port %s", repr->name);
- rx_pkts[avail++] = mb;
- } else {
- PMD_RX_LOG(DEBUG, "Adding the mbuf to the mbuf array passed by the app");
- rx_pkts[avail++] = mb;
- }
-
- /* Now resetting and updating the descriptor */
- rxds->vals[0] = 0;
- rxds->vals[1] = 0;
- dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
- rxds->fld.dd = 0;
- rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff;
- rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
- nb_hold++;
-
- rxq->rd_p++;
- if (unlikely(rxq->rd_p == rxq->rx_count))
- rxq->rd_p = 0;
- }
-
- if (nb_hold == 0)
- return nb_hold;
-
- PMD_RX_LOG(DEBUG, "RX port_id=%hu queue_id=%hu, %hu packets received",
- rxq->port_id, rxq->qidx, nb_hold);
-
- nb_hold += rxq->nb_rx_hold;
-
- /*
- * FL descriptors needs to be written before incrementing the
- * FL queue WR pointer
- */
- rte_wmb();
- if (nb_hold > rxq->rx_free_thresh) {
- PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu",
- rxq->port_id, rxq->qidx, nb_hold, avail);
- nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
- nb_hold = 0;
+ if (repr->ring == NULL) {
+ PMD_RX_LOG(ERR, "No ring available for repr_port %s", repr->name);
+ return false;
}
- rxq->nb_rx_hold = nb_hold;
-
- return avail;
+ rte_ring_enqueue(repr->ring, (void *)mbuf);
+ return true;
}
static uint16_t
@@ -569,6 +374,8 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type)
return -EINVAL;
hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
+ nfp_net_init_metadata_format(hw);
+
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
/* Set the current MTU to the maximum supported */
hw->mtu = hw->max_mtu;
@@ -1164,7 +971,7 @@ nfp_secondary_init_app_fw_flower(struct nfp_cpp *cpp)
eth_dev->process_private = cpp;
eth_dev->dev_ops = &nfp_flower_pf_vnic_ops;
- eth_dev->rx_pkt_burst = nfp_flower_pf_recv_pkts;
+ eth_dev->rx_pkt_burst = nfp_net_recv_pkts;
eth_dev->tx_pkt_burst = nfp_flower_pf_xmit_pkts;
rte_eth_dev_probing_finish(eth_dev);
@@ -105,8 +105,9 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower)
int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
int nfp_secondary_init_app_fw_flower(struct nfp_cpp *cpp);
-uint16_t nfp_flower_pf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
+ struct rte_mbuf *mbuf,
+ uint32_t port_id);
uint16_t nfp_flower_pf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int nfp_flower_pf_start(struct rte_eth_dev *dev);
@@ -365,8 +365,6 @@ enum nfp_flower_cmsg_port_vnic_type {
#define NFP_FLOWER_CMSG_HLEN sizeof(struct nfp_flower_cmsg_hdr)
#define NFP_FLOWER_CMSG_VER1 1
-#define NFP_NET_META_PORTID 5
-#define NFP_META_PORT_ID_CTRL ~0U
#define NFP_FLOWER_CMSG_PORT_TYPE(x) (((x) >> 28) & 0xf) /* [31,28] */
#define NFP_FLOWER_CMSG_PORT_SYS_ID(x) (((x) >> 24) & 0xf) /* [24,27] */
@@ -596,7 +596,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev,
snprintf(repr->name, sizeof(repr->name), "%s", init_repr_data->name);
eth_dev->dev_ops = &nfp_flower_pf_repr_dev_ops;
- eth_dev->rx_pkt_burst = nfp_flower_pf_recv_pkts;
+ eth_dev->rx_pkt_burst = nfp_net_recv_pkts;
eth_dev->tx_pkt_burst = nfp_flower_pf_xmit_pkts;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
@@ -34,6 +34,9 @@
/* Prepend field types */
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_VLAN 4
+#define NFP_NET_META_PORTID 5
+
+#define NFP_META_PORT_ID_CTRL ~0U
/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
@@ -16,6 +16,7 @@
#include "nfdk/nfp_nfdk.h"
#include "nfpcore/nfp_mip.h"
#include "nfpcore/nfp_rtsym.h"
+#include "flower/nfp_flower.h"
static int
nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
@@ -124,6 +125,9 @@ nfp_net_parse_chained_meta(uint8_t *meta_base,
for (; meta_info != 0; meta_info >>= NFP_NET_META_FIELD_SIZE, meta_offset += 4) {
switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_PORTID:
+ meta->port_id = rte_be_to_cpu_32(*(rte_be32_t *)meta_offset);
+ break;
case NFP_NET_META_HASH:
/* Next field type is about the hash type */
meta_info >>= NFP_NET_META_FIELD_SIZE;
@@ -270,11 +274,11 @@ static void
nfp_net_parse_meta(struct nfp_net_rx_desc *rxds,
struct nfp_net_rxq *rxq,
struct nfp_net_hw *hw,
- struct rte_mbuf *mb)
+ struct rte_mbuf *mb,
+ struct nfp_meta_parsed *meta)
{
uint8_t *meta_base;
rte_be32_t meta_header;
- struct nfp_meta_parsed meta = {};
if (unlikely(NFP_DESC_META_LEN(rxds) == 0))
return;
@@ -285,18 +289,18 @@ nfp_net_parse_meta(struct nfp_net_rx_desc *rxds,
switch (hw->meta_format) {
case NFP_NET_METAFORMAT_CHAINED:
- if (nfp_net_parse_chained_meta(meta_base, meta_header, &meta)) {
- nfp_net_parse_meta_hash(&meta, rxq, mb);
- nfp_net_parse_meta_vlan(&meta, rxds, rxq, mb);
- nfp_net_parse_meta_qinq(&meta, rxq, mb);
+ if (nfp_net_parse_chained_meta(meta_base, meta_header, meta)) {
+ nfp_net_parse_meta_hash(meta, rxq, mb);
+ nfp_net_parse_meta_vlan(meta, rxds, rxq, mb);
+ nfp_net_parse_meta_qinq(meta, rxq, mb);
} else {
PMD_RX_LOG(DEBUG, "RX chained metadata format is wrong!");
}
break;
case NFP_NET_METAFORMAT_SINGLE:
if ((rxds->rxd.flags & PCIE_DESC_RX_RSS) != 0) {
- nfp_net_parse_single_meta(meta_base, meta_header, &meta);
- nfp_net_parse_meta_hash(&meta, rxq, mb);
+ nfp_net_parse_single_meta(meta_base, meta_header, meta);
+ nfp_net_parse_meta_hash(meta, rxq, mb);
}
break;
default:
@@ -493,6 +497,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t nb_hold;
uint64_t dma_addr;
uint16_t avail;
+ uint16_t avail_multiplexed = 0;
rxq = rx_queue;
if (unlikely(rxq == NULL)) {
@@ -508,7 +513,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
avail = 0;
nb_hold = 0;
- while (avail < nb_pkts) {
+ while (avail + avail_multiplexed < nb_pkts) {
rxb = &rxq->rxbufs[rxq->rd_p];
if (unlikely(rxb == NULL)) {
PMD_RX_LOG(ERR, "rxb does not exist!");
@@ -585,15 +590,22 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb->next = NULL;
mb->port = rxq->port_id;
- nfp_net_parse_meta(rxds, rxq, hw, mb);
+ struct nfp_meta_parsed meta = {};
+ nfp_net_parse_meta(rxds, rxq, hw, mb, &meta);
nfp_net_parse_ptype(rxds, hw, mb);
/* Checking the checksum flag */
nfp_net_rx_cksum(rxq, rxds, mb);
- /* Adding the mbuf to the mbuf array passed by the app */
- rx_pkts[avail++] = mb;
+ if (meta.port_id == 0) {
+ rx_pkts[avail++] = mb;
+ } else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) {
+ avail_multiplexed++;
+ } else {
+ rte_pktmbuf_free(mb);
+ break;
+ }
/* Now resetting and updating the descriptor */
rxds->vals[0] = 0;
@@ -44,6 +44,7 @@ struct nfp_net_meta_raw {
* Parsed NFP packet metadata are recorded in this struct. The content is
* read-only after it have been recorded during parsing by nfp_net_parse_meta().
*
+ * @port_id: Port id value
* @hash: RSS hash value
* @hash_type: RSS hash type
* @vlan_layer: The layers of VLAN info which are passed from nic.
@@ -62,6 +63,7 @@ struct nfp_net_meta_raw {
* @vlan.tci: Vlan TCI including PCP + Priority + VID
*/
struct nfp_meta_parsed {
+ uint32_t port_id;
uint32_t hash;
uint8_t hash_type;
uint8_t vlan_layer;