[dpdk-dev,08/17] net/virtio: implement receive path for packed queues

Message ID 20180316152120.13199-9-jfreimann@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Jens Freimann March 16, 2018, 3:21 p.m. UTC
  From: Yuanhan Liu <yuanhan.liu@linux.intel.com>

Implement the receive part here. No support for mergeable buffers yet.

Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
---
 drivers/net/virtio/virtio_ethdev.c |   5 +-
 drivers/net/virtio/virtio_ethdev.h |   2 +
 drivers/net/virtio/virtio_rxtx.c   | 134 +++++++++++++++++++++++++++++++++++++
 3 files changed, 140 insertions(+), 1 deletion(-)
  

Comments

Tiwei Bie March 19, 2018, 10:15 a.m. UTC | #1
On Fri, Mar 16, 2018 at 04:21:11PM +0100, Jens Freimann wrote:
[...]
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index 722a2cd..888cc49 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -1352,6 +1352,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
>  		PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
>  			eth_dev->data->port_id);
>  		eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
> +	} else if (vtpci_packed_queue(hw)) {
> +		eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;

If MRG_RXBUF isn't supported at this point, then we will
need to make sure that RING_PACKED and MRG_RXBUF won't be
negotiated at the same time. Otherwise this commit will
break the driver.

>  	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>  		PMD_INIT_LOG(INFO,
>  			"virtio: using mergeable buffer Rx path on port %u",
> @@ -1507,7 +1509,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
>  
>  	/* Setting up rx_header size for the device */
>  	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> -	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
> +	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> +	    vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
>  		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
>  	else
>  		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
[...]
>  #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
>  #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
> @@ -428,6 +429,34 @@
>  
>  	PMD_INIT_FUNC_TRACE();
>  
> +	if (vtpci_packed_queue(hw)) {
> +		struct vring_desc_packed *desc;
> +		struct vq_desc_extra *dxp;
> +
> +		for (desc_idx = 0; desc_idx < vq->vq_nentries;
> +				desc_idx++) {
> +			m = rte_mbuf_raw_alloc(rxvq->mpool);
> +			if (unlikely(m == NULL))
> +				return -ENOMEM;
> +
> +			dxp = &vq->vq_descx[desc_idx];
> +			dxp->cookie = m;
> +			dxp->ndescs = 1;
> +
> +			desc = &vq->vq_ring.desc_packed[desc_idx];
> +			desc->addr = VIRTIO_MBUF_ADDR(m, vq) +
> +				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +			desc->len = m->buf_len - RTE_PKTMBUF_HEADROOM +
> +				hw->vtnet_hdr_size;
> +			desc->flags |= VRING_DESC_F_WRITE;
> +			rte_smp_wmb();
> +			set_desc_avail(&vq->vq_ring, desc);
> +		}
> +		toggle_wrap_counter(&vq->vq_ring);
> +
> +		return 0;

Maybe it's better to give the debug code (which is at the
end of this function) a chance to run.

Thanks
  
Jason Wang March 26, 2018, 2:15 a.m. UTC | #2
On 2018年03月16日 23:21, Jens Freimann wrote:
> From: Yuanhan Liu <yuanhan.liu@linux.intel.com>
>
> Implement the receive part here. No support for mergeable buffers yet.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> ---
>   drivers/net/virtio/virtio_ethdev.c |   5 +-
>   drivers/net/virtio/virtio_ethdev.h |   2 +
>   drivers/net/virtio/virtio_rxtx.c   | 134 +++++++++++++++++++++++++++++++++++++
>   3 files changed, 140 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index 722a2cd..888cc49 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -1352,6 +1352,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
>   		PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
>   			eth_dev->data->port_id);
>   		eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
> +	} else if (vtpci_packed_queue(hw)) {
> +		eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
>   	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>   		PMD_INIT_LOG(INFO,
>   			"virtio: using mergeable buffer Rx path on port %u",
> @@ -1507,7 +1509,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
>   
>   	/* Setting up rx_header size for the device */
>   	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> -	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
> +	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> +	    vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
>   		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
>   	else
>   		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index cfefe4d..92c1c4f 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -72,6 +72,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>   
>   uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>   		uint16_t nb_pkts);
> +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> +		uint16_t nb_pkts);
>   
>   uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>   		uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index f1df004..7834747 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -31,6 +31,7 @@
>   #include "virtqueue.h"
>   #include "virtio_rxtx.h"
>   #include "virtio_rxtx_simple.h"
> +#include "virtio_ring.h"
>   
>   #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
>   #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
> @@ -428,6 +429,34 @@
>   
>   	PMD_INIT_FUNC_TRACE();
>   
> +	if (vtpci_packed_queue(hw)) {
> +		struct vring_desc_packed *desc;
> +		struct vq_desc_extra *dxp;
> +
> +		for (desc_idx = 0; desc_idx < vq->vq_nentries;
> +				desc_idx++) {
> +			m = rte_mbuf_raw_alloc(rxvq->mpool);
> +			if (unlikely(m == NULL))
> +				return -ENOMEM;
> +
> +			dxp = &vq->vq_descx[desc_idx];
> +			dxp->cookie = m;
> +			dxp->ndescs = 1;
> +
> +			desc = &vq->vq_ring.desc_packed[desc_idx];
> +			desc->addr = VIRTIO_MBUF_ADDR(m, vq) +
> +				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +			desc->len = m->buf_len - RTE_PKTMBUF_HEADROOM +
> +				hw->vtnet_hdr_size;
> +			desc->flags |= VRING_DESC_F_WRITE;
> +			rte_smp_wmb();
> +			set_desc_avail(&vq->vq_ring, desc);
> +		}
> +		toggle_wrap_counter(&vq->vq_ring);
> +
> +		return 0;
> +	}
> +
>   	/* Allocate blank mbufs for the each rx descriptor */
>   	nbufs = 0;
>   
> @@ -702,6 +731,111 @@
>   		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
>   }
>   
> +uint16_t
> +virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> +		     uint16_t nb_pkts)
> +{
> +	struct virtnet_rx *rxvq = rx_queue;
> +	struct virtqueue *vq = rxvq->vq;
> +	struct virtio_hw *hw = vq->hw;
> +	struct rte_mbuf *rxm, *nmb;
> +	uint16_t nb_rx;
> +	uint32_t len;
> +	uint32_t i;
> +	uint32_t hdr_size;
> +	int offload;
> +	struct virtio_net_hdr *hdr;
> +	struct vring_desc_packed *descs = vq->vq_ring.desc_packed;
> +	struct vring_desc_packed *desc;
> +	uint16_t used_idx = vq->vq_used_cons_idx;
> +	struct vq_desc_extra *dxp;
> +
> +	nb_rx = 0;
> +	if (unlikely(hw->started == 0))
> +		return nb_rx;
> +
> +	hdr_size = hw->vtnet_hdr_size;
> +	offload = rx_offload_enabled(hw);
> +
> +	for (i = 0; i < nb_pkts; i++) {
> +		desc = &descs[used_idx & (vq->vq_nentries - 1)];
> +		if (!desc_is_used(desc))
> +			break;
> +
> +		rte_smp_rmb();
> +
> +		nmb = rte_mbuf_raw_alloc(rxvq->mpool);
> +		if (unlikely(nmb == NULL)) {
> +			struct rte_eth_dev *dev
> +				= &rte_eth_devices[rxvq->port_id];
> +			dev->data->rx_mbuf_alloc_failed++;
> +			break;
> +		}
> +
> +		dxp = &vq->vq_descx[used_idx & (vq->vq_nentries - 1)];
> +
> +		len = desc->len;
> +		rxm = dxp->cookie;
> +		dxp->cookie = nmb;
> +		dxp->ndescs = 1;
> +
> +		desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) +
> +			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +		desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM +
> +			hw->vtnet_hdr_size;
> +		desc->flags |= VRING_DESC_F_WRITE;
> +
> +		PMD_RX_LOG(DEBUG, "packet len:%d", len);
> +
> +		if (unlikely(len < hdr_size + ETHER_HDR_LEN)) {
> +			PMD_RX_LOG(ERR, "Packet drop");
> +			rte_pktmbuf_free(rxm);
> +			rxvq->stats.errors++;
> +			continue;
> +		}
> +
> +		rxm->port = rxvq->port_id;
> +		rxm->data_off = RTE_PKTMBUF_HEADROOM;
> +		rxm->ol_flags = 0;
> +		rxm->vlan_tci = 0;
> +
> +		rxm->pkt_len = (uint32_t)(len - hdr_size);
> +		rxm->data_len = (uint16_t)(len - hdr_size);
> +
> +		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
> +			RTE_PKTMBUF_HEADROOM - hdr_size);
> +
> +		if (hw->vlan_strip)
> +			rte_vlan_strip(rxm);
> +
> +		if (offload && virtio_rx_offload(rxm, hdr) < 0) {
> +			rte_pktmbuf_free(rxm);
> +			rxvq->stats.errors++;
> +			continue;
> +		}
> +
> +		VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
> +
> +		rxvq->stats.bytes += rxm->pkt_len;
> +		virtio_update_packet_stats(&rxvq->stats, rxm);
> +
> +		rte_smp_wmb();
> +		set_desc_avail(&vq->vq_ring, desc);
> +
> +		rx_pkts[nb_rx++] = rxm;
> +
> +		used_idx++;
> +		if ((used_idx & (vq->vq_nentries - 1)) == 0)
> +			toggle_wrap_counter(&vq->vq_ring);
> +	}
> +
> +	rxvq->stats.packets += nb_rx;

I believe we need to kick virtqueue here since we could not assume the 
backend (vhost-net) is always polling for the virtqueue.

Thanks

> +
> +	vq->vq_used_cons_idx = used_idx;
> +
> +	return nb_rx;
> +}
> +
>   #define VIRTIO_MBUF_BURST_SZ 64
>   #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
>   uint16_t
  

Patch

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 722a2cd..888cc49 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1352,6 +1352,8 @@  static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
 		PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
 			eth_dev->data->port_id);
 		eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+	} else if (vtpci_packed_queue(hw)) {
+		eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
 	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
 		PMD_INIT_LOG(INFO,
 			"virtio: using mergeable buffer Rx path on port %u",
@@ -1507,7 +1509,8 @@  static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
 
 	/* Setting up rx_header size for the device */
 	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
-	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
+	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+	    vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	else
 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index cfefe4d..92c1c4f 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -72,6 +72,8 @@  int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
 
 uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
+uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
 
 uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index f1df004..7834747 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -31,6 +31,7 @@ 
 #include "virtqueue.h"
 #include "virtio_rxtx.h"
 #include "virtio_rxtx_simple.h"
+#include "virtio_ring.h"
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -428,6 +429,34 @@ 
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (vtpci_packed_queue(hw)) {
+		struct vring_desc_packed *desc;
+		struct vq_desc_extra *dxp;
+
+		for (desc_idx = 0; desc_idx < vq->vq_nentries;
+				desc_idx++) {
+			m = rte_mbuf_raw_alloc(rxvq->mpool);
+			if (unlikely(m == NULL))
+				return -ENOMEM;
+
+			dxp = &vq->vq_descx[desc_idx];
+			dxp->cookie = m;
+			dxp->ndescs = 1;
+
+			desc = &vq->vq_ring.desc_packed[desc_idx];
+			desc->addr = VIRTIO_MBUF_ADDR(m, vq) +
+				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+			desc->len = m->buf_len - RTE_PKTMBUF_HEADROOM +
+				hw->vtnet_hdr_size;
+			desc->flags |= VRING_DESC_F_WRITE;
+			rte_smp_wmb();
+			set_desc_avail(&vq->vq_ring, desc);
+		}
+		toggle_wrap_counter(&vq->vq_ring);
+
+		return 0;
+	}
+
 	/* Allocate blank mbufs for the each rx descriptor */
 	nbufs = 0;
 
@@ -702,6 +731,111 @@ 
 		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
 }
 
+uint16_t
+virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+		     uint16_t nb_pkts)
+{
+	struct virtnet_rx *rxvq = rx_queue;
+	struct virtqueue *vq = rxvq->vq;
+	struct virtio_hw *hw = vq->hw;
+	struct rte_mbuf *rxm, *nmb;
+	uint16_t nb_rx;
+	uint32_t len;
+	uint32_t i;
+	uint32_t hdr_size;
+	int offload;
+	struct virtio_net_hdr *hdr;
+	struct vring_desc_packed *descs = vq->vq_ring.desc_packed;
+	struct vring_desc_packed *desc;
+	uint16_t used_idx = vq->vq_used_cons_idx;
+	struct vq_desc_extra *dxp;
+
+	nb_rx = 0;
+	if (unlikely(hw->started == 0))
+		return nb_rx;
+
+	hdr_size = hw->vtnet_hdr_size;
+	offload = rx_offload_enabled(hw);
+
+	for (i = 0; i < nb_pkts; i++) {
+		desc = &descs[used_idx & (vq->vq_nentries - 1)];
+		if (!desc_is_used(desc))
+			break;
+
+		rte_smp_rmb();
+
+		nmb = rte_mbuf_raw_alloc(rxvq->mpool);
+		if (unlikely(nmb == NULL)) {
+			struct rte_eth_dev *dev
+				= &rte_eth_devices[rxvq->port_id];
+			dev->data->rx_mbuf_alloc_failed++;
+			break;
+		}
+
+		dxp = &vq->vq_descx[used_idx & (vq->vq_nentries - 1)];
+
+		len = desc->len;
+		rxm = dxp->cookie;
+		dxp->cookie = nmb;
+		dxp->ndescs = 1;
+
+		desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) +
+			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+		desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM +
+			hw->vtnet_hdr_size;
+		desc->flags |= VRING_DESC_F_WRITE;
+
+		PMD_RX_LOG(DEBUG, "packet len:%d", len);
+
+		if (unlikely(len < hdr_size + ETHER_HDR_LEN)) {
+			PMD_RX_LOG(ERR, "Packet drop");
+			rte_pktmbuf_free(rxm);
+			rxvq->stats.errors++;
+			continue;
+		}
+
+		rxm->port = rxvq->port_id;
+		rxm->data_off = RTE_PKTMBUF_HEADROOM;
+		rxm->ol_flags = 0;
+		rxm->vlan_tci = 0;
+
+		rxm->pkt_len = (uint32_t)(len - hdr_size);
+		rxm->data_len = (uint16_t)(len - hdr_size);
+
+		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
+			RTE_PKTMBUF_HEADROOM - hdr_size);
+
+		if (hw->vlan_strip)
+			rte_vlan_strip(rxm);
+
+		if (offload && virtio_rx_offload(rxm, hdr) < 0) {
+			rte_pktmbuf_free(rxm);
+			rxvq->stats.errors++;
+			continue;
+		}
+
+		VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
+
+		rxvq->stats.bytes += rxm->pkt_len;
+		virtio_update_packet_stats(&rxvq->stats, rxm);
+
+		rte_smp_wmb();
+		set_desc_avail(&vq->vq_ring, desc);
+
+		rx_pkts[nb_rx++] = rxm;
+
+		used_idx++;
+		if ((used_idx & (vq->vq_nentries - 1)) == 0)
+			toggle_wrap_counter(&vq->vq_ring);
+	}
+
+	rxvq->stats.packets += nb_rx;
+
+	vq->vq_used_cons_idx = used_idx;
+
+	return nb_rx;
+}
+
 #define VIRTIO_MBUF_BURST_SZ 64
 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
 uint16_t