diff mbox series

[v2,2/2] example/vhost: support to clear in-flight packets for async dequeue

Message ID 20220513163524.1205551-3-yuanx.wang@intel.com (mailing list archive)
State New
Delegated to: Maxime Coquelin
Headers show
Series [v2,1/2] vhost: support clear in-flight packets for async dequeue | expand

Checks

Context Check Description
ci/Intel-compilation warning apply issues
ci/checkpatch success coding style OK

Commit Message

Yuan Wang May 13, 2022, 4:35 p.m. UTC
This patch allows vring_state_changed() to clear in-flight
dequeue packets. It also clears the in-flight packets in
a thread-safe way in destroy_device().

Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
---
 examples/vhost/main.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d070391727..a97ac23061 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1537,6 +1537,25 @@  vhost_clear_queue_thread_unsafe(struct vhost_dev *vdev, uint16_t queue_id)
 	}
 }
 
+static void
+vhost_clear_queue(struct vhost_dev *vdev, uint16_t queue_id)
+{
+	uint16_t n_pkt = 0;
+	int pkts_inflight;
+
+	uint16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
+	pkts_inflight = rte_vhost_async_get_inflight(vdev->vid, queue_id);
+
+	struct rte_mbuf *m_cpl[pkts_inflight];
+
+	while (pkts_inflight) {
+		n_pkt = rte_vhost_clear_queue(vdev->vid, queue_id, m_cpl,
+						pkts_inflight, dma_id, 0);
+		free_pkts(m_cpl, n_pkt);
+		pkts_inflight = rte_vhost_async_get_inflight(vdev->vid, queue_id);
+	}
+}
+
 /*
  * Remove a device from the specific data core linked list and from the
  * main linked list. Synchronization  occurs through the use of the
@@ -1594,13 +1613,13 @@  destroy_device(int vid)
 		vdev->vid);
 
 	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
-		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_RXQ);
+		vhost_clear_queue(vdev, VIRTIO_RXQ);
 		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
 		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
 	}
 
 	if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
-		vhost_clear_queue_thread_unsafe(vdev, VIRTIO_TXQ);
+		vhost_clear_queue(vdev, VIRTIO_TXQ);
 		rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
 		dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
 	}
@@ -1759,9 +1778,6 @@  vring_state_changed(int vid, uint16_t queue_id, int enable)
 	if (!vdev)
 		return -1;
 
-	if (queue_id != VIRTIO_RXQ)
-		return 0;
-
 	if (dma_bind[vid2socketid[vid]].dmas[queue_id].async_enabled) {
 		if (!enable)
 			vhost_clear_queue_thread_unsafe(vdev, queue_id);