[v2,1/2] net/virtio: update stats when in order xmit done

Message ID 20190910161446.36361-1-yong.liu@intel.com
State New
Delegated to: Maxime Coquelin
Headers show
Series
  • [v2,1/2] net/virtio: update stats when in order xmit done
Related show

Checks

Context Check Description
ci/mellanox-Performance success Performance Testing PASS
ci/intel-Performance success Performance Testing PASS
ci/iol-dpdk_compile_ovs success Compile Testing PASS
ci/iol-dpdk_compile success Compile Testing PASS
ci/iol-dpdk_compile_spdk success Compile Testing PASS
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Liu, Yong Sept. 10, 2019, 4:14 p.m.
When doing xmit in-order enqueue, packets are buffered and then flushed
into avail ring. Buffered packets can be dropped due to insufficient
space. Moving stats update action just after successful avail ring
updates can guarantee correctness.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
---
 drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
 1 file changed, 44 insertions(+), 43 deletions(-)

Comments

Tiwei Bie Sept. 18, 2019, 2:34 a.m. | #1
On Wed, Sep 11, 2019 at 12:14:45AM +0800, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. Buffered packets can be dropped due to insufficient
> space. Moving stats update action just after successful avail ring
> updates can guarantee correctness.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
>  drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
>  1 file changed, 44 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 27ead19fb..d3ca36831 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
>  	dxp->next = VQ_RING_DESC_CHAIN_END;
>  }
>  
> +static inline void
> +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> +{
> +	uint32_t s = mbuf->pkt_len;
> +	struct rte_ether_addr *ea;
> +
> +	stats->bytes += s;
> +
> +	if (s == 64) {
> +		stats->size_bins[1]++;
> +	} else if (s > 64 && s < 1024) {
> +		uint32_t bin;
> +
> +		/* count zeros, and offset into correct bin */
> +		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> +		stats->size_bins[bin]++;
> +	} else {
> +		if (s < 64)
> +			stats->size_bins[0]++;
> +		else if (s < 1519)
> +			stats->size_bins[6]++;
> +		else
> +			stats->size_bins[7]++;
> +	}
> +
> +	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> +	if (rte_is_multicast_ether_addr(ea)) {
> +		if (rte_is_broadcast_ether_addr(ea))
> +			stats->broadcast++;
> +		else
> +			stats->multicast++;
> +	}
> +}
> +
> +static inline void
> +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> +{
> +	VIRTIO_DUMP_PACKET(m, m->data_len);
> +
> +	virtio_update_packet_stats(&rxvq->stats, m);
> +}
> +
>  static uint16_t
>  virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
>  				  struct rte_mbuf **rx_pkts,
> @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
>  }
>  
>  /* Cleanup from completed inorder transmits. */
> -static void
> +static __rte_always_inline void
>  virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
>  {
>  	uint16_t i, idx = vq->vq_used_cons_idx;
> @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
>  		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
>  		dxp->cookie = (void *)cookies[i];
>  		dxp->ndescs = 1;
> +		virtio_update_packet_stats(&txvq->stats, cookies[i]);

The virtio_update_packet_stats() call in virtio_xmit_pkts_inorder()
should be removed.


>  
>  		hdr = (struct virtio_net_hdr *)
>  			rte_pktmbuf_prepend(cookies[i], head_size);
> @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
>  	}
>  }
>  
> -static inline void
> -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> -{
> -	uint32_t s = mbuf->pkt_len;
> -	struct rte_ether_addr *ea;
> -
> -	stats->bytes += s;
> -
> -	if (s == 64) {
> -		stats->size_bins[1]++;
> -	} else if (s > 64 && s < 1024) {
> -		uint32_t bin;
> -
> -		/* count zeros, and offset into correct bin */
> -		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> -		stats->size_bins[bin]++;
> -	} else {
> -		if (s < 64)
> -			stats->size_bins[0]++;
> -		else if (s < 1519)
> -			stats->size_bins[6]++;
> -		else
> -			stats->size_bins[7]++;
> -	}
> -
> -	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> -	if (rte_is_multicast_ether_addr(ea)) {
> -		if (rte_is_broadcast_ether_addr(ea))
> -			stats->broadcast++;
> -		else
> -			stats->multicast++;
> -	}
> -}
> -
> -static inline void
> -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> -{
> -	VIRTIO_DUMP_PACKET(m, m->data_len);
> -
> -	virtio_update_packet_stats(&rxvq->stats, m);
> -}
> -
>  /* Optionally fill offload information in structure */
>  static inline int
>  virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
> -- 
> 2.17.1
>
Liu, Yong Sept. 18, 2019, 3:19 a.m. | #2
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, September 18, 2019 10:35 AM
> To: Liu, Yong <yong.liu@intel.com>
> Cc: maxime.coquelin@redhat.com; dev@dpdk.org
> Subject: Re: [PATCH v2 1/2] net/virtio: update stats when in order xmit
> done
> 
> On Wed, Sep 11, 2019 at 12:14:45AM +0800, Marvin Liu wrote:
> > When doing xmit in-order enqueue, packets are buffered and then flushed
> > into avail ring. Buffered packets can be dropped due to insufficient
> > space. Moving stats update action just after successful avail ring
> > updates can guarantee correctness.
> >
> > Signed-off-by: Marvin Liu <yong.liu@intel.com>
> > ---
> >  drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
> >  1 file changed, 44 insertions(+), 43 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> > index 27ead19fb..d3ca36831 100644
> > --- a/drivers/net/virtio/virtio_rxtx.c
> > +++ b/drivers/net/virtio/virtio_rxtx.c
> > @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq,
> uint16_t id)
> >  	dxp->next = VQ_RING_DESC_CHAIN_END;
> >  }
> >
> > +static inline void
> > +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> *mbuf)
> > +{
> > +	uint32_t s = mbuf->pkt_len;
> > +	struct rte_ether_addr *ea;
> > +
> > +	stats->bytes += s;
> > +
> > +	if (s == 64) {
> > +		stats->size_bins[1]++;
> > +	} else if (s > 64 && s < 1024) {
> > +		uint32_t bin;
> > +
> > +		/* count zeros, and offset into correct bin */
> > +		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > +		stats->size_bins[bin]++;
> > +	} else {
> > +		if (s < 64)
> > +			stats->size_bins[0]++;
> > +		else if (s < 1519)
> > +			stats->size_bins[6]++;
> > +		else
> > +			stats->size_bins[7]++;
> > +	}
> > +
> > +	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > +	if (rte_is_multicast_ether_addr(ea)) {
> > +		if (rte_is_broadcast_ether_addr(ea))
> > +			stats->broadcast++;
> > +		else
> > +			stats->multicast++;
> > +	}
> > +}
> > +
> > +static inline void
> > +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > +{
> > +	VIRTIO_DUMP_PACKET(m, m->data_len);
> > +
> > +	virtio_update_packet_stats(&rxvq->stats, m);
> > +}
> > +
> >  static uint16_t
> >  virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> >  				  struct rte_mbuf **rx_pkts,
> > @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t
> num)
> >  }
> >
> >  /* Cleanup from completed inorder transmits. */
> > -static void
> > +static __rte_always_inline void
> >  virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> >  {
> >  	uint16_t i, idx = vq->vq_used_cons_idx;
> > @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx
> *txvq,
> >  		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
> >  		dxp->cookie = (void *)cookies[i];
> >  		dxp->ndescs = 1;
> > +		virtio_update_packet_stats(&txvq->stats, cookies[i]);
> 
> The virtio_update_packet_stats() call in virtio_xmit_pkts_inorder()
> should be removed.
> 

Hi Tiwei,
Function remained in virtio_xmit_pkts_inorder is for those packets not handled by burst enqueue function.
Statistic of packets which handled in burst in_order enqueue function is updated in inner loop.

Thanks,
Marvin

> 
> >
> >  		hdr = (struct virtio_net_hdr *)
> >  			rte_pktmbuf_prepend(cookies[i], head_size);
> > @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq,
> struct rte_mbuf *m)
> >  	}
> >  }
> >
> > -static inline void
> > -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> *mbuf)
> > -{
> > -	uint32_t s = mbuf->pkt_len;
> > -	struct rte_ether_addr *ea;
> > -
> > -	stats->bytes += s;
> > -
> > -	if (s == 64) {
> > -		stats->size_bins[1]++;
> > -	} else if (s > 64 && s < 1024) {
> > -		uint32_t bin;
> > -
> > -		/* count zeros, and offset into correct bin */
> > -		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > -		stats->size_bins[bin]++;
> > -	} else {
> > -		if (s < 64)
> > -			stats->size_bins[0]++;
> > -		else if (s < 1519)
> > -			stats->size_bins[6]++;
> > -		else
> > -			stats->size_bins[7]++;
> > -	}
> > -
> > -	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > -	if (rte_is_multicast_ether_addr(ea)) {
> > -		if (rte_is_broadcast_ether_addr(ea))
> > -			stats->broadcast++;
> > -		else
> > -			stats->multicast++;
> > -	}
> > -}
> > -
> > -static inline void
> > -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > -{
> > -	VIRTIO_DUMP_PACKET(m, m->data_len);
> > -
> > -	virtio_update_packet_stats(&rxvq->stats, m);
> > -}
> > -
> >  /* Optionally fill offload information in structure */
> >  static inline int
> >  virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
> > --
> > 2.17.1
> >
Tiwei Bie Sept. 18, 2019, 4:18 a.m. | #3
On Wed, Sep 18, 2019 at 11:19:03AM +0800, Liu, Yong wrote:
> > -----Original Message-----
> > From: Bie, Tiwei
> > Sent: Wednesday, September 18, 2019 10:35 AM
> > To: Liu, Yong <yong.liu@intel.com>
> > Cc: maxime.coquelin@redhat.com; dev@dpdk.org
> > Subject: Re: [PATCH v2 1/2] net/virtio: update stats when in order xmit
> > done
> > 
> > On Wed, Sep 11, 2019 at 12:14:45AM +0800, Marvin Liu wrote:
> > > When doing xmit in-order enqueue, packets are buffered and then flushed
> > > into avail ring. Buffered packets can be dropped due to insufficient
> > > space. Moving stats update action just after successful avail ring
> > > updates can guarantee correctness.
> > >
> > > Signed-off-by: Marvin Liu <yong.liu@intel.com>
> > > ---
> > >  drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
> > >  1 file changed, 44 insertions(+), 43 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio/virtio_rxtx.c
> > b/drivers/net/virtio/virtio_rxtx.c
> > > index 27ead19fb..d3ca36831 100644
> > > --- a/drivers/net/virtio/virtio_rxtx.c
> > > +++ b/drivers/net/virtio/virtio_rxtx.c
> > > @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq,
> > uint16_t id)
> > >  	dxp->next = VQ_RING_DESC_CHAIN_END;
> > >  }
> > >
> > > +static inline void
> > > +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> > *mbuf)
> > > +{
> > > +	uint32_t s = mbuf->pkt_len;
> > > +	struct rte_ether_addr *ea;
> > > +
> > > +	stats->bytes += s;
> > > +
> > > +	if (s == 64) {
> > > +		stats->size_bins[1]++;
> > > +	} else if (s > 64 && s < 1024) {
> > > +		uint32_t bin;
> > > +
> > > +		/* count zeros, and offset into correct bin */
> > > +		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > > +		stats->size_bins[bin]++;
> > > +	} else {
> > > +		if (s < 64)
> > > +			stats->size_bins[0]++;
> > > +		else if (s < 1519)
> > > +			stats->size_bins[6]++;
> > > +		else
> > > +			stats->size_bins[7]++;
> > > +	}
> > > +
> > > +	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > > +	if (rte_is_multicast_ether_addr(ea)) {
> > > +		if (rte_is_broadcast_ether_addr(ea))
> > > +			stats->broadcast++;
> > > +		else
> > > +			stats->multicast++;
> > > +	}
> > > +}
> > > +
> > > +static inline void
> > > +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > > +{
> > > +	VIRTIO_DUMP_PACKET(m, m->data_len);
> > > +
> > > +	virtio_update_packet_stats(&rxvq->stats, m);
> > > +}
> > > +
> > >  static uint16_t
> > >  virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> > >  				  struct rte_mbuf **rx_pkts,
> > > @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t
> > num)
> > >  }
> > >
> > >  /* Cleanup from completed inorder transmits. */
> > > -static void
> > > +static __rte_always_inline void
> > >  virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> > >  {
> > >  	uint16_t i, idx = vq->vq_used_cons_idx;
> > > @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx
> > *txvq,
> > >  		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
> > >  		dxp->cookie = (void *)cookies[i];
> > >  		dxp->ndescs = 1;
> > > +		virtio_update_packet_stats(&txvq->stats, cookies[i]);
> > 
> > The virtio_update_packet_stats() call in virtio_xmit_pkts_inorder()
> > should be removed.
> > 
> 
> Hi Tiwei,
> Function remained in virtio_xmit_pkts_inorder is for those packets not handled by burst enqueue function.
> Statistic of packets which handled in burst in_order enqueue function is updated in inner loop.

I mean below virtio_update_packet_stats() call in
virtio_xmit_pkts_inorder() should be removed while
doing above change:

https://github.com/DPDK/dpdk/blob/master/drivers/net/virtio/virtio_rxtx.c#L2201

I saw above line is removed by PATCH v2 2/2, but it
should be done in this patch.


> 
> Thanks,
> Marvin
> 
> > 
> > >
> > >  		hdr = (struct virtio_net_hdr *)
> > >  			rte_pktmbuf_prepend(cookies[i], head_size);
> > > @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq,
> > struct rte_mbuf *m)
> > >  	}
> > >  }
> > >
> > > -static inline void
> > > -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> > *mbuf)
> > > -{
> > > -	uint32_t s = mbuf->pkt_len;
> > > -	struct rte_ether_addr *ea;
> > > -
> > > -	stats->bytes += s;
> > > -
> > > -	if (s == 64) {
> > > -		stats->size_bins[1]++;
> > > -	} else if (s > 64 && s < 1024) {
> > > -		uint32_t bin;
> > > -
> > > -		/* count zeros, and offset into correct bin */
> > > -		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > > -		stats->size_bins[bin]++;
> > > -	} else {
> > > -		if (s < 64)
> > > -			stats->size_bins[0]++;
> > > -		else if (s < 1519)
> > > -			stats->size_bins[6]++;
> > > -		else
> > > -			stats->size_bins[7]++;
> > > -	}
> > > -
> > > -	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > > -	if (rte_is_multicast_ether_addr(ea)) {
> > > -		if (rte_is_broadcast_ether_addr(ea))
> > > -			stats->broadcast++;
> > > -		else
> > > -			stats->multicast++;
> > > -	}
> > > -}
> > > -
> > > -static inline void
> > > -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > > -{
> > > -	VIRTIO_DUMP_PACKET(m, m->data_len);
> > > -
> > > -	virtio_update_packet_stats(&rxvq->stats, m);
> > > -}
> > > -
> > >  /* Optionally fill offload information in structure */
> > >  static inline int
> > >  virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
> > > --
> > > 2.17.1
> > >

Patch

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 27ead19fb..d3ca36831 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -106,6 +106,48 @@  vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
 	dxp->next = VQ_RING_DESC_CHAIN_END;
 }
 
+static inline void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+	uint32_t s = mbuf->pkt_len;
+	struct rte_ether_addr *ea;
+
+	stats->bytes += s;
+
+	if (s == 64) {
+		stats->size_bins[1]++;
+	} else if (s > 64 && s < 1024) {
+		uint32_t bin;
+
+		/* count zeros, and offset into correct bin */
+		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+		stats->size_bins[bin]++;
+	} else {
+		if (s < 64)
+			stats->size_bins[0]++;
+		else if (s < 1519)
+			stats->size_bins[6]++;
+		else
+			stats->size_bins[7]++;
+	}
+
+	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+	if (rte_is_multicast_ether_addr(ea)) {
+		if (rte_is_broadcast_ether_addr(ea))
+			stats->broadcast++;
+		else
+			stats->multicast++;
+	}
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+	VIRTIO_DUMP_PACKET(m, m->data_len);
+
+	virtio_update_packet_stats(&rxvq->stats, m);
+}
+
 static uint16_t
 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
 				  struct rte_mbuf **rx_pkts,
@@ -317,7 +359,7 @@  virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
 }
 
 /* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
 {
 	uint16_t i, idx = vq->vq_used_cons_idx;
@@ -596,6 +638,7 @@  virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
 		dxp->cookie = (void *)cookies[i];
 		dxp->ndescs = 1;
+		virtio_update_packet_stats(&txvq->stats, cookies[i]);
 
 		hdr = (struct virtio_net_hdr *)
 			rte_pktmbuf_prepend(cookies[i], head_size);
@@ -1083,48 +1126,6 @@  virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
 	}
 }
 
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
-	uint32_t s = mbuf->pkt_len;
-	struct rte_ether_addr *ea;
-
-	stats->bytes += s;
-
-	if (s == 64) {
-		stats->size_bins[1]++;
-	} else if (s > 64 && s < 1024) {
-		uint32_t bin;
-
-		/* count zeros, and offset into correct bin */
-		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
-		stats->size_bins[bin]++;
-	} else {
-		if (s < 64)
-			stats->size_bins[0]++;
-		else if (s < 1519)
-			stats->size_bins[6]++;
-		else
-			stats->size_bins[7]++;
-	}
-
-	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
-	if (rte_is_multicast_ether_addr(ea)) {
-		if (rte_is_broadcast_ether_addr(ea))
-			stats->broadcast++;
-		else
-			stats->multicast++;
-	}
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
-	VIRTIO_DUMP_PACKET(m, m->data_len);
-
-	virtio_update_packet_stats(&rxvq->stats, m);
-}
-
 /* Optionally fill offload information in structure */
 static inline int
 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)