[v5,2/2] example/vhost: support to clear in-flight packets for async dequeue

Message ID 20220609173404.1769210-3-yuanx.wang@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers
Series support to clear in-flight packets for async |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Wang, YuanX June 9, 2022, 5:34 p.m. UTC
  This patch allows vring_state_changed() to clear in-flight
dequeue packets. It also clears the in-flight packets in
a thread-safe way in destroy_device().

Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
---
 examples/vhost/main.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)
  

Comments

Maxime Coquelin June 14, 2022, 1:28 p.m. UTC | #1
On 6/9/22 19:34, Yuan Wang wrote:
> This patch allows vring_state_changed() to clear in-flight
> dequeue packets. It also clears the in-flight packets in
> a thread-safe way in destroy_device().
> 
> Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
> ---
>   examples/vhost/main.c | 26 +++++++++++++++++++++-----
>   1 file changed, 21 insertions(+), 5 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime
  
Hu, Jiayu June 14, 2022, 11:56 p.m. UTC | #2
Reviewed-by: Jiayu Hu <jiayu.hu@intel.com>

Thanks,
Jiayu

> -----Original Message-----
> From: Wang, YuanX <yuanx.wang@intel.com>
> Sent: Friday, June 10, 2022 1:34 AM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> dev@dpdk.org
> Cc: Hu, Jiayu <jiayu.hu@intel.com>; Ding, Xuan <xuan.ding@intel.com>; Pai
> G, Sunil <sunil.pai.g@intel.com>; Wang, YuanX <yuanx.wang@intel.com>
> Subject: [PATCH v5 2/2] example/vhost: support to clear in-flight packets for
> async dequeue
> 
> This patch allows vring_state_changed() to clear in-flight dequeue packets. It
> also clears the in-flight packets in a thread-safe way in destroy_device().
> 
> Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
> ---
>  examples/vhost/main.c | 26 +++++++++++++++++++++-----
>  1 file changed, 21 insertions(+), 5 deletions(-)
> 
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c index
> e7fee5aa1b..a679ef738c 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -1543,6 +1543,25 @@ vhost_clear_queue_thread_unsafe(struct
> vhost_dev *vdev, uint16_t queue_id)
>  	}
>  }
> 
> +static void
> +vhost_clear_queue(struct vhost_dev *vdev, uint16_t queue_id) {
> +	uint16_t n_pkt = 0;
> +	int pkts_inflight;
> +
> +	int16_t dma_id = dma_bind[vid2socketid[vdev-
> >vid]].dmas[queue_id].dev_id;
> +	pkts_inflight = rte_vhost_async_get_inflight(vdev->vid, queue_id);
> +
> +	struct rte_mbuf *m_cpl[pkts_inflight];
> +
> +	while (pkts_inflight) {
> +		n_pkt = rte_vhost_clear_queue(vdev->vid, queue_id, m_cpl,
> +						pkts_inflight, dma_id, 0);
> +		free_pkts(m_cpl, n_pkt);
> +		pkts_inflight = rte_vhost_async_get_inflight(vdev->vid,
> queue_id);
> +	}
> +}
> +
>  /*
>   * Remove a device from the specific data core linked list and from the
>   * main linked list. Synchronization  occurs through the use of the @@ -
> 1600,13 +1619,13 @@ destroy_device(int vid)
>  		vdev->vid);
> 
>  	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
> -		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_RXQ);
> +		vhost_clear_queue(vdev, VIRTIO_RXQ);
>  		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
>  		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
>  	}
> 
>  	if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
> -		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_TXQ);
> +		vhost_clear_queue(vdev, VIRTIO_TXQ);
>  		rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
>  		dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
>  	}
> @@ -1765,9 +1784,6 @@ vring_state_changed(int vid, uint16_t queue_id, int
> enable)
>  	if (!vdev)
>  		return -1;
> 
> -	if (queue_id != VIRTIO_RXQ)
> -		return 0;
> -
>  	if (dma_bind[vid2socketid[vid]].dmas[queue_id].async_enabled) {
>  		if (!enable)
>  			vhost_clear_queue_thread_unsafe(vdev, queue_id);
> --
> 2.25.1
  

Patch

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index e7fee5aa1b..a679ef738c 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1543,6 +1543,25 @@  vhost_clear_queue_thread_unsafe(struct vhost_dev *vdev, uint16_t queue_id)
 	}
 }
 
+static void
+vhost_clear_queue(struct vhost_dev *vdev, uint16_t queue_id)
+{
+	uint16_t n_pkt = 0;
+	int pkts_inflight;
+
+	int16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
+	pkts_inflight = rte_vhost_async_get_inflight(vdev->vid, queue_id);
+
+	struct rte_mbuf *m_cpl[pkts_inflight];
+
+	while (pkts_inflight) {
+		n_pkt = rte_vhost_clear_queue(vdev->vid, queue_id, m_cpl,
+						pkts_inflight, dma_id, 0);
+		free_pkts(m_cpl, n_pkt);
+		pkts_inflight = rte_vhost_async_get_inflight(vdev->vid, queue_id);
+	}
+}
+
 /*
  * Remove a device from the specific data core linked list and from the
  * main linked list. Synchronization  occurs through the use of the
@@ -1600,13 +1619,13 @@  destroy_device(int vid)
 		vdev->vid);
 
 	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
-		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_RXQ);
+		vhost_clear_queue(vdev, VIRTIO_RXQ);
 		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
 		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
 	}
 
 	if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
-		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_TXQ);
+		vhost_clear_queue(vdev, VIRTIO_TXQ);
 		rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
 		dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
 	}
@@ -1765,9 +1784,6 @@  vring_state_changed(int vid, uint16_t queue_id, int enable)
 	if (!vdev)
 		return -1;
 
-	if (queue_id != VIRTIO_RXQ)
-		return 0;
-
 	if (dma_bind[vid2socketid[vid]].dmas[queue_id].async_enabled) {
 		if (!enable)
 			vhost_clear_queue_thread_unsafe(vdev, queue_id);