[dpdk-dev,v5,12/17] fm10k: Add scatter receive function

Message ID 1423815597-17819-13-git-send-email-jing.d.chen@intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Chen, Jing D Feb. 13, 2015, 8:19 a.m. UTC
From: Jeff Shaw <jeffrey.b.shaw@intel.com>

1. Add fm10k_recv_scattered_pkts function to receive jumbo frame
   and multi-segment packets.
2. Configure correct receive function in rx_init and dev_init.

Signed-off-by: Jeff Shaw <jeffrey.b.shaw@intel.com>
Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
---
 lib/librte_pmd_fm10k/fm10k.h        |    3 +
 lib/librte_pmd_fm10k/fm10k_ethdev.c |   15 ++++
 lib/librte_pmd_fm10k/fm10k_rxtx.c   |  143 +++++++++++++++++++++++++++++++++++
 3 files changed, 161 insertions(+), 0 deletions(-)
  

Comments

David Marchand Feb. 13, 2015, 11:55 a.m. UTC | #1
Hello,

On Fri, Feb 13, 2015 at 9:19 AM, Chen Jing D(Mark) <jing.d.chen@intel.com>
wrote:

[snip]

+               if (unlikely(ret != 0)) {
> +                       PMD_RX_LOG(ERR, "Failed to alloc mbuf");
>

Idem mono segment.
rx_mbuf_alloc_failed++ ?
  
Chen, Jing D Feb. 17, 2015, 1:11 p.m. UTC | #2
Hi,


From: David Marchand [mailto:david.marchand@6wind.com]

Sent: Friday, February 13, 2015 7:55 PM
To: Chen, Jing D
Cc: dev@dpdk.org; Zhang, Helin; Qiu, Michael; Neil Horman; Thomas Monjalon; Shaw, Jeffrey B
Subject: Re: [PATCH v5 12/17] fm10k: Add scatter receive function

Hello,

On Fri, Feb 13, 2015 at 9:19 AM, Chen Jing D(Mark) <jing.d.chen@intel.com<mailto:jing.d.chen@intel.com>> wrote:

[snip]

+               if (unlikely(ret != 0)) {
+                       PMD_RX_LOG(ERR, "Failed to alloc mbuf");

Idem mono segment.
rx_mbuf_alloc_failed++ ?

[Mark] Thanks, I’ll change that.


--
David Marchand
  

Patch

diff --git a/lib/librte_pmd_fm10k/fm10k.h b/lib/librte_pmd_fm10k/fm10k.h
index a9b19cd..8741936 100644
--- a/lib/librte_pmd_fm10k/fm10k.h
+++ b/lib/librte_pmd_fm10k/fm10k.h
@@ -285,6 +285,9 @@  fm10k_addr_alignment_valid(struct rte_mbuf *mb)
 uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t nb_pkts);
 
+uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
 uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t nb_pkts);
 #endif
diff --git a/lib/librte_pmd_fm10k/fm10k_ethdev.c b/lib/librte_pmd_fm10k/fm10k_ethdev.c
index 5858504..ead1585 100644
--- a/lib/librte_pmd_fm10k/fm10k_ethdev.c
+++ b/lib/librte_pmd_fm10k/fm10k_ethdev.c
@@ -423,6 +423,13 @@  fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
 				buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
 
+		/* It adds dual VLAN length for supporting dual VLAN */
+		if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+				2 * FM10K_VLAN_TAG_SIZE) > buf_size){
+			dev->data->scattered_rx = 1;
+			dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
+		}
+
 		/* Enable drop on empty, it's RO for VF */
 		if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
 			rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
@@ -431,6 +438,11 @@  fm10k_dev_rx_init(struct rte_eth_dev *dev)
 		FM10K_WRITE_FLUSH(hw);
 	}
 
+	if (dev->data->dev_conf.rxmode.enable_scatter) {
+		dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
+		dev->data->scattered_rx = 1;
+	}
+
 	/* Configure RSS if applicable */
 	fm10k_dev_mq_rx_configure(dev);
 	return 0;
@@ -1404,6 +1416,9 @@  eth_fm10k_dev_init(__rte_unused struct eth_driver *eth_drv,
 	dev->rx_pkt_burst = &fm10k_recv_pkts;
 	dev->tx_pkt_burst = &fm10k_xmit_pkts;
 
+	if (dev->data->scattered_rx)
+		dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
+
 	/* only initialize in the primary process */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c b/lib/librte_pmd_fm10k/fm10k_rxtx.c
index 9cead3a..2a2fa84 100644
--- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
+++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
@@ -194,6 +194,149 @@  fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return count;
 }
 
+uint16_t
+fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+				uint16_t nb_pkts)
+{
+	struct rte_mbuf *mbuf;
+	union fm10k_rx_desc desc;
+	struct fm10k_rx_queue *q = rx_queue;
+	uint16_t count = 0;
+	uint16_t nb_rcv, nb_seg;
+	int alloc = 0;
+	uint16_t next_dd;
+	struct rte_mbuf *first_seg = q->pkt_first_seg;
+	struct rte_mbuf *last_seg = q->pkt_last_seg;
+	int ret;
+
+	next_dd = q->next_dd;
+	nb_rcv = 0;
+
+	nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
+	for (count = 0; count < nb_seg; count++) {
+		mbuf = q->sw_ring[next_dd];
+		desc = q->hw_ring[next_dd];
+		if (!(desc.d.staterr & FM10K_RXD_STATUS_DD))
+			break;
+#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+		dump_rxd(&desc);
+#endif
+
+		if (++next_dd == q->nb_desc) {
+			next_dd = 0;
+			alloc = 1;
+		}
+
+		/* Prefetch next mbuf while processing current one. */
+		rte_prefetch0(q->sw_ring[next_dd]);
+
+		/*
+		 * When next RX descriptor is on a cache-line boundary,
+		 * prefetch the next 4 RX descriptors and the next 8 pointers
+		 * to mbufs.
+		 */
+		if ((next_dd & 0x3) == 0) {
+			rte_prefetch0(&q->hw_ring[next_dd]);
+			rte_prefetch0(&q->sw_ring[next_dd]);
+		}
+
+		/* Fill data length */
+		rte_pktmbuf_data_len(mbuf) = desc.w.length;
+
+		/*
+		 * If this is the first buffer of the received packet,
+		 * set the pointer to the first mbuf of the packet and
+		 * initialize its context.
+		 * Otherwise, update the total length and the number of segments
+		 * of the current scattered packet, and update the pointer to
+		 * the last mbuf of the current packet.
+		 */
+		if (!first_seg) {
+			first_seg = mbuf;
+			first_seg->pkt_len = desc.w.length;
+		} else {
+			first_seg->pkt_len =
+					(uint16_t)(first_seg->pkt_len +
+					rte_pktmbuf_data_len(mbuf));
+			first_seg->nb_segs++;
+			last_seg->next = mbuf;
+		}
+
+		/*
+		 * If this is not the last buffer of the received packet,
+		 * update the pointer to the last mbuf of the current scattered
+		 * packet and continue to parse the RX ring.
+		 */
+		if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
+			last_seg = mbuf;
+			continue;
+		}
+
+		first_seg->ol_flags = 0;
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
+		rx_desc_to_ol_flags(first_seg, &desc);
+#endif
+		first_seg->hash.rss = desc.d.rss;
+
+		/* Prefetch data of first segment, if configured to do so. */
+		rte_packet_prefetch((char *)first_seg->buf_addr +
+			first_seg->data_off);
+
+		/*
+		 * Store the mbuf address into the next entry of the array
+		 * of returned packets.
+		 */
+		rx_pkts[nb_rcv++] = first_seg;
+
+		/*
+		 * Setup receipt context for a new packet.
+		 */
+		first_seg = NULL;
+	}
+
+	q->next_dd = next_dd;
+
+	if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
+		ret = rte_mempool_get_bulk(q->mp,
+					(void **)&q->sw_ring[q->next_alloc],
+					q->alloc_thresh);
+
+		if (unlikely(ret != 0)) {
+			PMD_RX_LOG(ERR, "Failed to alloc mbuf");
+			/*
+			 * Need to restore next_dd if we cannot allocate new
+			 * buffers to replenish the old ones.
+			 */
+			q->next_dd = (q->next_dd + q->nb_desc - count) %
+								q->nb_desc;
+			return 0;
+		}
+
+		for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
+			mbuf = q->sw_ring[q->next_alloc];
+
+			/* setup static mbuf fields */
+			fm10k_pktmbuf_reset(mbuf, q->port_id);
+
+			/* write descriptor */
+			desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+			desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
+			q->hw_ring[q->next_alloc] = desc;
+		}
+		FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
+		q->next_trigger += q->alloc_thresh;
+		if (q->next_trigger >= q->nb_desc) {
+			q->next_trigger = q->alloc_thresh - 1;
+			q->next_alloc = 0;
+		}
+	}
+
+	q->pkt_first_seg = first_seg;
+	q->pkt_last_seg = last_seg;
+
+	return nb_rcv;
+}
+
 static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
 {
 	uint16_t next_rs, count = 0;