[v4] vhost: support inflight share memory protocol feature

Message ID 1557046942-1151-1-git-send-email-chuckylinchuckylin@gmail.com
State New
Delegated to: Maxime Coquelin
Headers show
Series
  • [v4] vhost: support inflight share memory protocol feature
Related show

Checks

Context Check Description
ci/mellanox-Performance-Testing success Performance Testing PASS
ci/intel-Performance-Testing success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Li Lin May 5, 2019, 9:02 a.m.
From: Li Lin <lilin24@baidu.com>

This patch introduces two new messages VHOST_USER_GET_INFLIGHT_FD
and VHOST_USER_SET_INFLIGHT_FD to support transferring a shared
buffer between qemu and backend.

Firstly, qemu uses VHOST_USER_GET_INFLIGHT_FD to get the
shared buffer from backend. Then qemu should send it back
through VHOST_USER_SET_INFLIGHT_FD each time we start vhost-user.

This shared buffer is used to process inflight I/O when backend
reconnect.

Signed-off-by: Li Lin <lilin24@baidu.com>
Signed-off-by: Ni Xun <nixun@baidu.com>
Signed-off-by: Zhang Yu <zhangyu31@baidu.com>
---
v2 - add set&clr inflight desc functions
v3 - simplified some functions implementation
v4 - rework some data structures according to qemu doc

 lib/librte_vhost/rte_vhost.h           |  83 ++++++++++
 lib/librte_vhost/rte_vhost_version.map |   3 +
 lib/librte_vhost/vhost.c               | 105 ++++++++++++
 lib/librte_vhost/vhost.h               |  12 ++
 lib/librte_vhost/vhost_user.c          | 292 +++++++++++++++++++++++++++++++++
 lib/librte_vhost/vhost_user.h          |  13 +-
 6 files changed, 507 insertions(+), 1 deletion(-)

Comments

Maxime Coquelin May 17, 2019, 3:47 p.m. | #1
On 5/5/19 11:02 AM, Li Lin wrote:
> From: Li Lin <lilin24@baidu.com>
> 
> This patch introduces two new messages VHOST_USER_GET_INFLIGHT_FD
> and VHOST_USER_SET_INFLIGHT_FD to support transferring a shared
> buffer between qemu and backend.
> 
> Firstly, qemu uses VHOST_USER_GET_INFLIGHT_FD to get the
> shared buffer from backend. Then qemu should send it back
> through VHOST_USER_SET_INFLIGHT_FD each time we start vhost-user.
> 
> This shared buffer is used to process inflight I/O when backend
> reconnect.
> 
> Signed-off-by: Li Lin <lilin24@baidu.com>
> Signed-off-by: Ni Xun <nixun@baidu.com>
> Signed-off-by: Zhang Yu <zhangyu31@baidu.com>
> ---
> v2 - add set&clr inflight desc functions
> v3 - simplified some functions implementation
> v4 - rework some data structures according to qemu doc
> 
>   lib/librte_vhost/rte_vhost.h           |  83 ++++++++++
>   lib/librte_vhost/rte_vhost_version.map |   3 +
>   lib/librte_vhost/vhost.c               | 105 ++++++++++++
>   lib/librte_vhost/vhost.h               |  12 ++
>   lib/librte_vhost/vhost_user.c          | 292 +++++++++++++++++++++++++++++++++
>   lib/librte_vhost/vhost_user.h          |  13 +-
>   6 files changed, 507 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
> index d2c0c93f4..d4d709717 100644
> --- a/lib/librte_vhost/rte_vhost.h
> +++ b/lib/librte_vhost/rte_vhost.h
> @@ -71,6 +71,10 @@ extern "C" {
>   #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
>   #endif
>   
> +#ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
> +#define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
> +#endif
> +
>   /** Indicate whether protocol features negotiation is supported. */
>   #ifndef VHOST_USER_F_PROTOCOL_FEATURES
>   #define VHOST_USER_F_PROTOCOL_FEATURES	30
> @@ -98,12 +102,41 @@ struct rte_vhost_memory {
>   	struct rte_vhost_mem_region regions[];
>   };
>   
> +struct inflight_desc {
> +	uint8_t inflight;
> +	uint8_t padding[5];
> +	uint16_t next;
> +	uint64_t counter;
> +};
> +
> +struct inflight_info {
> +	uint64_t features;
> +	uint16_t version;
> +	uint16_t desc_num;
> +	uint16_t last_inflight_io;
> +	uint16_t used_idx;
> +	struct inflight_desc desc[0];
> +};
> +
> +struct resubmit_desc {
> +	uint16_t index;
> +	uint64_t counter;
> +};
> +
> +struct resubmit_info {
> +	struct resubmit_desc *resubmit_list;
> +	uint16_t resubmit_num;
> +};
> +
>   struct rte_vhost_vring {
>   	struct vring_desc	*desc;
>   	struct vring_avail	*avail;
>   	struct vring_used	*used;
>   	uint64_t		log_guest_addr;
>   
> +	struct inflight_info	*inflight;
> +	struct resubmit_info	*resubmit;
> +
>   	/** Deprecated, use rte_vhost_vring_call() instead. */
>   	int			callfd;
>   
> @@ -632,6 +665,56 @@ int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
>   int rte_vhost_vring_call(int vid, uint16_t vring_idx);
>   
>   /**
> + * set inflight flag for a desc.
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param idx
> + *  inflight entry index
> + * @return
> + *  0 on success, -1 on failure
> + */
> +int __rte_experimental
> +rte_vhost_set_inflight_desc(int vid, uint16_t vring_idx,
> +		uint16_t idx);
> +
> +/**
> + * clear inflight flag for a desc.
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param last_used_idx
> + *  next free used_idx
> + * @param idx
> + *  inflight entry index
> + * @return
> + *  0 on success, -1 on failure
> + */
> +int __rte_experimental
> +rte_vhost_clr_inflight_desc(int vid, uint16_t vring_idx,
> +		uint16_t last_used_idx, uint16_t idx);
> +
> +/**
> + * set last inflight io index.
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param idx
> + *  inflight entry index
> + * @return
> + *  0 on success, -1 on failure
> + */
> +int __rte_experimental
> +rte_vhost_set_last_inflight_io(int vid, uint16_t vring_idx,
> +		uint16_t idx);
> +
> +/**
>    * Get vhost RX queue avail count.
>    *
>    * @param vid
> diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
> index 5f1d4a75c..a992b3935 100644
> --- a/lib/librte_vhost/rte_vhost_version.map
> +++ b/lib/librte_vhost/rte_vhost_version.map
> @@ -87,4 +87,7 @@ EXPERIMENTAL {
>   	rte_vdpa_relay_vring_used;
>   	rte_vhost_extern_callback_register;
>   	rte_vhost_driver_set_protocol_features;
> +	rte_vhost_set_inflight_desc;
> +	rte_vhost_clr_inflight_desc;
> +	rte_vhost_set_last_inflight_io;
>   };
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index 163f4595e..03eab3551 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -76,6 +76,16 @@ cleanup_vq(struct vhost_virtqueue *vq, int destroy)
>   		close(vq->callfd);
>   	if (vq->kickfd >= 0)
>   		close(vq->kickfd);
> +	if (vq->inflight)
> +		vq->inflight = NULL;
> +	if (vq->resubmit->resubmit_list) {
> +		free(vq->resubmit->resubmit_list);
> +		vq->resubmit->resubmit_list = NULL;
> +	}
> +	if (vq->resubmit) {
> +		free(vq->resubmit);
> +		vq->resubmit = NULL;
> +	}
>   }
>   
>   /*
> @@ -589,6 +599,9 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
>   	vring->kickfd  = vq->kickfd;
>   	vring->size    = vq->size;
>   
> +	vring->inflight = vq->inflight;
> +	vring->resubmit = vq->resubmit;
> +
>   	return 0;
>   }
>   
> @@ -617,6 +630,98 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
>   	return 0;
>   }
>   
> +int
> +rte_vhost_set_inflight_desc(int vid, uint16_t vring_idx, uint16_t idx)
> +{
> +	struct virtio_net *dev;
> +	struct vhost_virtqueue *vq;
> +
> +	dev = get_device(vid);
> +	if (!dev)
> +		return -1;
> +
> +	if (!(dev->features &
> +		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
> +		return 0;

Shouldn't -1 be returned here also?
I think this function should be called only if the feature negotiation
succeeded.

Returning an error here would help the calling application to know
something went wrong.

Same comment applies for the clear function.
> +
> +	if (vring_idx >= VHOST_MAX_VRING)
> +		return -1;
> +
> +	vq = dev->virtqueue[vring_idx];
> +	if (!vq)
> +		return -1;
> +
> +	if (unlikely(!vq->inflight))
> +		return -1;
> +
> +	vq->inflight->desc[idx].counter = vq->counter++;
> +	vq->inflight->desc[idx].inflight = 1;
> +	return 0;
> +}
> +
> +int
> +rte_vhost_clr_inflight_desc(int vid, uint16_t vring_idx,
> +	uint16_t last_used_idx, uint16_t idx)
> +{
> +	struct virtio_net *dev;
> +	struct vhost_virtqueue *vq;
> +
> +	dev = get_device(vid);
> +	if (!dev)
> +		return -1;
> +
> +	if (!(dev->features &
> +		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
> +		return 0;
> +
> +	if (vring_idx >= VHOST_MAX_VRING)
> +		return -1;
> +
> +	vq = dev->virtqueue[vring_idx];
> +	if (!vq)
> +		return -1;
> +
> +	if (unlikely(!vq->inflight))
> +		return -1;
> +
> +	rte_compiler_barrier();
> +
> +	vq->inflight->desc[idx].inflight = 0;
> +
> +	rte_compiler_barrier();
> +
> +	vq->inflight->used_idx = last_used_idx;
> +	return 0;
> +}
> +
> +int
> +rte_vhost_set_last_inflight_io(int vid, uint16_t vring_idx, uint16_t idx)
> +{
> +	struct virtio_net *dev;
> +	struct vhost_virtqueue *vq;
> +
> +	dev = get_device(vid);
> +	if (!dev)
> +		return -1;
> +
> +	if (!(dev->features &
> +		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
> +		return 0;
> +
> +	if (vring_idx >= VHOST_MAX_VRING)
> +		return -1;
> +
> +	vq = dev->virtqueue[vring_idx];
> +	if (!vq)
> +		return -1;
> +
> +	if (unlikely(!vq->inflight))
> +		return -1;
> +
> +	vq->inflight->last_inflight_io = idx;
> +	return 0;
> +}

Are above functions supposed to be called in the hot path?
If so, you might want to use unlikely for the error checks
everywhere.

> +
>   uint16_t
>   rte_vhost_avail_entries(int vid, uint16_t queue_id)
>   {
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index e9138dfab..3dace2ec3 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -128,6 +128,11 @@ struct vhost_virtqueue {
>   	/* Physical address of used ring, for logging */
>   	uint64_t		log_guest_addr;
>   
> +	/* Inflight share memory info */
> +	struct inflight_info	*inflight;
> +	struct resubmit_info	*resubmit;
> +	uint64_t counter;

counter and resubmit are too generic names.

> +
>   	uint16_t		nr_zmbuf;
>   	uint16_t		zmbuf_size;
>   	uint16_t		last_zmbuf_idx;
> @@ -286,6 +291,12 @@ struct guest_page {
>   	uint64_t size;
>   };
>   
> +struct inflight_mem_info {
> +	int fd;
> +	void *addr;
> +	uint64_t size;
> +};
> +
>   /**
>    * Device structure contains all configuration information relating
>    * to the device.
> @@ -303,6 +314,7 @@ struct virtio_net {
>   	uint32_t		nr_vring;
>   	int			dequeue_zero_copy;
>   	struct vhost_virtqueue	*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
> +	struct inflight_mem_info inflight_info;
>   #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
>   	char			ifname[IF_NAME_SZ];
>   	uint64_t		log_size;

Do you have some code example using these new APIs?
It would help for reviewing the patch.

Thanks,
Maxime
Tiwei Bie May 20, 2019, 2:13 a.m. | #2
On Fri, May 17, 2019 at 05:47:07PM +0200, Maxime Coquelin wrote:
> On 5/5/19 11:02 AM, Li Lin wrote:
[...]
> >   /**
> >    * Device structure contains all configuration information relating
> >    * to the device.
> > @@ -303,6 +314,7 @@ struct virtio_net {
> >   	uint32_t		nr_vring;
> >   	int			dequeue_zero_copy;
> >   	struct vhost_virtqueue	*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
> > +	struct inflight_mem_info inflight_info;
> >   #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
> >   	char			ifname[IF_NAME_SZ];
> >   	uint64_t		log_size;
> 
> Do you have some code example using these new APIs?
> It would help for reviewing the patch.

+1

Patch

diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index d2c0c93f4..d4d709717 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -71,6 +71,10 @@  extern "C" {
 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
 #endif
 
+#ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
+#define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
+#endif
+
 /** Indicate whether protocol features negotiation is supported. */
 #ifndef VHOST_USER_F_PROTOCOL_FEATURES
 #define VHOST_USER_F_PROTOCOL_FEATURES	30
@@ -98,12 +102,41 @@  struct rte_vhost_memory {
 	struct rte_vhost_mem_region regions[];
 };
 
+struct inflight_desc {
+	uint8_t inflight;
+	uint8_t padding[5];
+	uint16_t next;
+	uint64_t counter;
+};
+
+struct inflight_info {
+	uint64_t features;
+	uint16_t version;
+	uint16_t desc_num;
+	uint16_t last_inflight_io;
+	uint16_t used_idx;
+	struct inflight_desc desc[0];
+};
+
+struct resubmit_desc {
+	uint16_t index;
+	uint64_t counter;
+};
+
+struct resubmit_info {
+	struct resubmit_desc *resubmit_list;
+	uint16_t resubmit_num;
+};
+
 struct rte_vhost_vring {
 	struct vring_desc	*desc;
 	struct vring_avail	*avail;
 	struct vring_used	*used;
 	uint64_t		log_guest_addr;
 
+	struct inflight_info	*inflight;
+	struct resubmit_info	*resubmit;
+
 	/** Deprecated, use rte_vhost_vring_call() instead. */
 	int			callfd;
 
@@ -632,6 +665,56 @@  int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
 int rte_vhost_vring_call(int vid, uint16_t vring_idx);
 
 /**
+ * set inflight flag for a desc.
+ *
+ * @param vid
+ *  vhost device ID
+ * @param vring_idx
+ *  vring index
+ * @param idx
+ *  inflight entry index
+ * @return
+ *  0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_set_inflight_desc(int vid, uint16_t vring_idx,
+		uint16_t idx);
+
+/**
+ * clear inflight flag for a desc.
+ *
+ * @param vid
+ *  vhost device ID
+ * @param vring_idx
+ *  vring index
+ * @param last_used_idx
+ *  next free used_idx
+ * @param idx
+ *  inflight entry index
+ * @return
+ *  0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_clr_inflight_desc(int vid, uint16_t vring_idx,
+		uint16_t last_used_idx, uint16_t idx);
+
+/**
+ * set last inflight io index.
+ *
+ * @param vid
+ *  vhost device ID
+ * @param vring_idx
+ *  vring index
+ * @param idx
+ *  inflight entry index
+ * @return
+ *  0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_set_last_inflight_io(int vid, uint16_t vring_idx,
+		uint16_t idx);
+
+/**
  * Get vhost RX queue avail count.
  *
  * @param vid
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 5f1d4a75c..a992b3935 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -87,4 +87,7 @@  EXPERIMENTAL {
 	rte_vdpa_relay_vring_used;
 	rte_vhost_extern_callback_register;
 	rte_vhost_driver_set_protocol_features;
+	rte_vhost_set_inflight_desc;
+	rte_vhost_clr_inflight_desc;
+	rte_vhost_set_last_inflight_io;
 };
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 163f4595e..03eab3551 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -76,6 +76,16 @@  cleanup_vq(struct vhost_virtqueue *vq, int destroy)
 		close(vq->callfd);
 	if (vq->kickfd >= 0)
 		close(vq->kickfd);
+	if (vq->inflight)
+		vq->inflight = NULL;
+	if (vq->resubmit->resubmit_list) {
+		free(vq->resubmit->resubmit_list);
+		vq->resubmit->resubmit_list = NULL;
+	}
+	if (vq->resubmit) {
+		free(vq->resubmit);
+		vq->resubmit = NULL;
+	}
 }
 
 /*
@@ -589,6 +599,9 @@  rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
 	vring->kickfd  = vq->kickfd;
 	vring->size    = vq->size;
 
+	vring->inflight = vq->inflight;
+	vring->resubmit = vq->resubmit;
+
 	return 0;
 }
 
@@ -617,6 +630,98 @@  rte_vhost_vring_call(int vid, uint16_t vring_idx)
 	return 0;
 }
 
+int
+rte_vhost_set_inflight_desc(int vid, uint16_t vring_idx, uint16_t idx)
+{
+	struct virtio_net *dev;
+	struct vhost_virtqueue *vq;
+
+	dev = get_device(vid);
+	if (!dev)
+		return -1;
+
+	if (!(dev->features &
+		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+		return 0;
+
+	if (vring_idx >= VHOST_MAX_VRING)
+		return -1;
+
+	vq = dev->virtqueue[vring_idx];
+	if (!vq)
+		return -1;
+
+	if (unlikely(!vq->inflight))
+		return -1;
+
+	vq->inflight->desc[idx].counter = vq->counter++;
+	vq->inflight->desc[idx].inflight = 1;
+	return 0;
+}
+
+int
+rte_vhost_clr_inflight_desc(int vid, uint16_t vring_idx,
+	uint16_t last_used_idx, uint16_t idx)
+{
+	struct virtio_net *dev;
+	struct vhost_virtqueue *vq;
+
+	dev = get_device(vid);
+	if (!dev)
+		return -1;
+
+	if (!(dev->features &
+		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+		return 0;
+
+	if (vring_idx >= VHOST_MAX_VRING)
+		return -1;
+
+	vq = dev->virtqueue[vring_idx];
+	if (!vq)
+		return -1;
+
+	if (unlikely(!vq->inflight))
+		return -1;
+
+	rte_compiler_barrier();
+
+	vq->inflight->desc[idx].inflight = 0;
+
+	rte_compiler_barrier();
+
+	vq->inflight->used_idx = last_used_idx;
+	return 0;
+}
+
+int
+rte_vhost_set_last_inflight_io(int vid, uint16_t vring_idx, uint16_t idx)
+{
+	struct virtio_net *dev;
+	struct vhost_virtqueue *vq;
+
+	dev = get_device(vid);
+	if (!dev)
+		return -1;
+
+	if (!(dev->features &
+		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+		return 0;
+
+	if (vring_idx >= VHOST_MAX_VRING)
+		return -1;
+
+	vq = dev->virtqueue[vring_idx];
+	if (!vq)
+		return -1;
+
+	if (unlikely(!vq->inflight))
+		return -1;
+
+	vq->inflight->last_inflight_io = idx;
+	return 0;
+}
+
 uint16_t
 rte_vhost_avail_entries(int vid, uint16_t queue_id)
 {
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index e9138dfab..3dace2ec3 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -128,6 +128,11 @@  struct vhost_virtqueue {
 	/* Physical address of used ring, for logging */
 	uint64_t		log_guest_addr;
 
+	/* Inflight share memory info */
+	struct inflight_info	*inflight;
+	struct resubmit_info	*resubmit;
+	uint64_t counter;
+
 	uint16_t		nr_zmbuf;
 	uint16_t		zmbuf_size;
 	uint16_t		last_zmbuf_idx;
@@ -286,6 +291,12 @@  struct guest_page {
 	uint64_t size;
 };
 
+struct inflight_mem_info {
+	int fd;
+	void *addr;
+	uint64_t size;
+};
+
 /**
  * Device structure contains all configuration information relating
  * to the device.
@@ -303,6 +314,7 @@  struct virtio_net {
 	uint32_t		nr_vring;
 	int			dequeue_zero_copy;
 	struct vhost_virtqueue	*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
+	struct inflight_mem_info inflight_info;
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
 	char			ifname[IF_NAME_SZ];
 	uint64_t		log_size;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index c9e29ece8..eb41b9045 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -31,6 +31,8 @@ 
 #include <sys/stat.h>
 #include <sys/syscall.h>
 #include <assert.h>
+#include <sys/syscall.h>
+#include <asm/unistd.h>
 #ifdef RTE_LIBRTE_VHOST_NUMA
 #include <numaif.h>
 #endif
@@ -49,6 +51,15 @@ 
 #define VIRTIO_MIN_MTU 68
 #define VIRTIO_MAX_MTU 65535
 
+#define INFLIGHT_ALIGNMENT 64
+#define INFLIGHT_VERSION 0xabcd
+#define VIRTQUEUE_MAX_SIZE 1024
+
+#define CLOEXEC 0x0001U
+
+#define ALIGN_DOWN(n, m) ((n) / (m) * (m))
+#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
+
 static const char *vhost_message_str[VHOST_USER_MAX] = {
 	[VHOST_USER_NONE] = "VHOST_USER_NONE",
 	[VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
@@ -78,6 +89,8 @@  static const char *vhost_message_str[VHOST_USER_MAX] = {
 	[VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
 	[VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
 	[VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
+	[VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
+	[VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
 };
 
 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
@@ -160,6 +173,16 @@  vhost_backend_cleanup(struct virtio_net *dev)
 		dev->log_addr = 0;
 	}
 
+	if (dev->inflight_info.addr) {
+		munmap(dev->inflight_info.addr, dev->inflight_info.size);
+		dev->inflight_info.addr = NULL;
+	}
+
+	if (dev->inflight_info.fd > 0) {
+		close(dev->inflight_info.fd);
+		dev->inflight_info.fd = -1;
+	}
+
 	if (dev->slave_req_fd >= 0) {
 		close(dev->slave_req_fd);
 		dev->slave_req_fd = -1;
@@ -1165,6 +1188,184 @@  virtio_is_ready(struct virtio_net *dev)
 	return 1;
 }
 
+static int mem_create(const char *name, unsigned int flags)
+{
+#ifdef __NR_memfd_create
+	return syscall(__NR_memfd_create, name, flags);
+#else
+	return -1;
+#endif
+}
+
+void *inflight_mem_alloc(const char *name, size_t size, int *fd)
+{
+	void *ptr;
+	int mfd = -1;
+	char fname[20] = "/tmp/memfd-XXXXXX";
+
+	*fd = -1;
+	mfd = mem_create(name, CLOEXEC);
+	if (mfd != -1) {
+		if (ftruncate(mfd, size) == -1) {
+			RTE_LOG(ERR, VHOST_CONFIG,
+					"ftruncate fail for alloc inflight buffer\n");
+			close(mfd);
+			return NULL;
+		}
+	} else {
+		mfd = mkstemp(fname);
+		unlink(fname);
+
+		if (mfd == -1) {
+			RTE_LOG(ERR, VHOST_CONFIG,
+					"mkstemp fail for alloc inflight buffer\n");
+			return NULL;
+		}
+
+		if (ftruncate(mfd, size) == -1) {
+			RTE_LOG(ERR, VHOST_CONFIG,
+					"ftruncate fail for alloc inflight buffer\n");
+			close(mfd);
+			return NULL;
+		}
+	}
+
+	ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
+	if (ptr == MAP_FAILED) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+				"mmap fail for alloc inflight buffer\n");
+		close(mfd);
+		return NULL;
+	}
+
+	*fd = mfd;
+	return ptr;
+}
+
+static uint32_t get_pervq_shm_size(uint16_t queue_size)
+{
+	return ALIGN_UP(sizeof(struct inflight_desc) * queue_size +
+		sizeof(uint64_t) * 1 + sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
+}
+
+static int
+vhost_user_get_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
+		int main_fd __rte_unused)
+{
+	int fd;
+	uint64_t mmap_size;
+	void *addr;
+	uint16_t num_queues, queue_size;
+	struct virtio_net *dev = *pdev;
+
+	if (msg->size != sizeof(msg->payload.inflight)) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"Invalid get_inflight_fd message size is %d",
+			msg->size);
+		return RTE_VHOST_MSG_RESULT_ERR;
+	}
+
+	num_queues = msg->payload.inflight.num_queues;
+	queue_size = msg->payload.inflight.queue_size;
+
+	RTE_LOG(INFO, VHOST_CONFIG, "get_inflight_fd num_queues: %u\n",
+			msg->payload.inflight.num_queues);
+	RTE_LOG(INFO, VHOST_CONFIG, "get_inflight_fd queue_size: %u\n",
+			msg->payload.inflight.queue_size);
+	mmap_size = num_queues * get_pervq_shm_size(queue_size);
+
+	addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
+	if (!addr) {
+		RTE_LOG(ERR, VHOST_CONFIG, "Failed to alloc vhost inflight area");
+			msg->payload.inflight.mmap_size = 0;
+		return RTE_VHOST_MSG_RESULT_ERR;
+	}
+	memset(addr, 0, mmap_size);
+
+	dev->inflight_info.addr = addr;
+	dev->inflight_info.size = msg->payload.inflight.mmap_size = mmap_size;
+	dev->inflight_info.fd = msg->fds[0] = fd;
+	msg->payload.inflight.mmap_offset = 0;
+	msg->fd_num = 1;
+
+	RTE_LOG(INFO, VHOST_CONFIG,
+			"send inflight mmap_size: %lu\n",
+			msg->payload.inflight.mmap_size);
+	RTE_LOG(INFO, VHOST_CONFIG,
+			"send inflight mmap_offset: %lu\n",
+			msg->payload.inflight.mmap_offset);
+	RTE_LOG(INFO, VHOST_CONFIG,
+			"send inflight fd: %d\n", msg->fds[0]);
+
+	return RTE_VHOST_MSG_RESULT_REPLY;
+}
+
+static int
+vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
+		int main_fd __rte_unused)
+{
+	int fd, i;
+	uint64_t mmap_size, mmap_offset;
+	uint16_t num_queues, queue_size;
+	uint32_t pervq_inflight_size;
+	void *rc;
+	struct vhost_virtqueue *vq;
+	struct virtio_net *dev = *pdev;
+
+	fd = msg->fds[0];
+	if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
+		RTE_LOG(ERR, VHOST_CONFIG, "Invalid set_inflight_fd message size is %d,fd is %d\n",
+			msg->size, fd);
+		return RTE_VHOST_MSG_RESULT_ERR;
+	}
+
+	mmap_size = msg->payload.inflight.mmap_size;
+	mmap_offset = msg->payload.inflight.mmap_offset;
+	num_queues = msg->payload.inflight.num_queues;
+	queue_size = msg->payload.inflight.queue_size;
+	pervq_inflight_size = get_pervq_shm_size(queue_size);
+
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"set_inflight_fd mmap_size: %lu\n", mmap_size);
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"set_inflight_fd mmap_offset: %lu\n", mmap_offset);
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"set_inflight_fd num_queues: %u\n", num_queues);
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"set_inflight_fd queue_size: %u\n", queue_size);
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"set_inflight_fd fd: %d\n", fd);
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"set_inflight_fd pervq_inflight_size: %d\n",
+		pervq_inflight_size);
+
+	if (dev->inflight_info.addr)
+		munmap(dev->inflight_info.addr, dev->inflight_info.size);
+
+	rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+			fd, mmap_offset);
+	if (rc == MAP_FAILED) {
+		RTE_LOG(ERR, VHOST_CONFIG, "failed to mmap share memory.\n");
+		return RTE_VHOST_MSG_RESULT_ERR;
+	}
+
+	if (dev->inflight_info.fd)
+		close(dev->inflight_info.fd);
+
+	dev->inflight_info.fd = fd;
+	dev->inflight_info.addr = rc;
+	dev->inflight_info.size = mmap_size;
+
+	for (i = 0; i < num_queues; i++) {
+		vq = dev->virtqueue[i];
+		vq->inflight = (struct inflight_info *)rc;
+		vq->inflight->desc_num = queue_size;
+		rc = (void *)((char *)rc + pervq_inflight_size);
+	}
+
+	return RTE_VHOST_MSG_RESULT_OK;
+}
+
 static int
 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			int main_fd __rte_unused)
@@ -1202,6 +1403,89 @@  static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
 }
 
 static int
+resubmit_desc_compare(const void *a, const void *b)
+{
+	const struct resubmit_desc *desc0 = (const struct resubmit_desc *)a;
+	const struct resubmit_desc *desc1 = (const struct resubmit_desc *)b;
+
+	if (desc1->counter > desc0->counter &&
+		(desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2)
+		return 1;
+
+	return -1;
+}
+
+static int
+vhost_check_queue_inflights(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+	struct vring_used *used = vq->used;
+	uint16_t i = 0;
+	uint16_t resubmit_num = 0;
+	struct resubmit_info *resubmit = NULL;
+
+	if (!(dev->features &
+		(1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+		return RTE_VHOST_MSG_RESULT_OK;
+
+	if ((!vq->inflight))
+		return RTE_VHOST_MSG_RESULT_ERR;
+
+	if (!vq->inflight->version) {
+		vq->inflight->version = INFLIGHT_VERSION;
+		return RTE_VHOST_MSG_RESULT_OK;
+	}
+
+	vq->resubmit = NULL;
+	vq->counter = 0;
+
+	if (vq->inflight->used_idx != used->idx) {
+		vq->inflight->desc[vq->inflight->last_inflight_io].inflight = 0;
+		rte_compiler_barrier();
+		vq->inflight->used_idx = used->idx;
+	}
+
+	for (i = 0; i < vq->inflight->desc_num; i++) {
+		if (vq->inflight->desc[i].inflight == 1)
+			resubmit_num++;
+	}
+
+	vq->last_avail_idx += resubmit_num;
+
+	if (resubmit_num) {
+		resubmit  = calloc(1, sizeof(struct resubmit_info));
+		if (!resubmit) {
+			RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for resubmit info.\n");
+			return RTE_VHOST_MSG_RESULT_ERR;
+		}
+
+		resubmit->resubmit_list = calloc(resubmit_num,
+			sizeof(struct resubmit_desc));
+		if (!resubmit->resubmit_list) {
+			RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for inflight desc.\n");
+			return RTE_VHOST_MSG_RESULT_ERR;
+		}
+
+		for (i = 0; i < vq->inflight->desc_num; i++) {
+			if (vq->inflight->desc[i].inflight == 1) {
+				resubmit->resubmit_list[resubmit->resubmit_num].index = i;
+				resubmit->resubmit_list[resubmit->resubmit_num].counter =
+					vq->inflight->desc[i].counter;
+				resubmit->resubmit_num++;
+			}
+		}
+
+		if (resubmit->resubmit_num > 1)
+			qsort(resubmit->resubmit_list, resubmit->resubmit_num,
+				sizeof(struct resubmit_desc), resubmit_desc_compare);
+
+		vq->counter = resubmit->resubmit_list[0].counter + 1;
+		vq->resubmit = resubmit;
+	}
+
+	return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			int main_fd __rte_unused)
 {
@@ -1242,6 +1526,12 @@  vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
 		close(vq->kickfd);
 	vq->kickfd = file.fd;
 
+	if (vhost_check_queue_inflights(dev, vq)) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"Failed to inflights for vq: %d\n", file.index);
+		return RTE_VHOST_MSG_RESULT_ERR;
+	}
+
 	return RTE_VHOST_MSG_RESULT_OK;
 }
 
@@ -1762,6 +2052,8 @@  static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
 	[VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
 	[VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
 	[VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
+	[VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
+	[VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
 };
 
 
diff --git a/lib/librte_vhost/vhost_user.h b/lib/librte_vhost/vhost_user.h
index 2a650fe4b..99a773910 100644
--- a/lib/librte_vhost/vhost_user.h
+++ b/lib/librte_vhost/vhost_user.h
@@ -54,7 +54,9 @@  typedef enum VhostUserRequest {
 	VHOST_USER_POSTCOPY_ADVISE = 28,
 	VHOST_USER_POSTCOPY_LISTEN = 29,
 	VHOST_USER_POSTCOPY_END = 30,
-	VHOST_USER_MAX = 31
+	VHOST_USER_GET_INFLIGHT_FD = 31,
+	VHOST_USER_SET_INFLIGHT_FD = 32,
+	VHOST_USER_MAX = 33
 } VhostUserRequest;
 
 typedef enum VhostUserSlaveRequest {
@@ -112,6 +114,13 @@  typedef struct VhostUserVringArea {
 	uint64_t offset;
 } VhostUserVringArea;
 
+typedef struct VhostUserInflight {
+	uint64_t mmap_size;
+	uint64_t mmap_offset;
+	uint16_t num_queues;
+	uint16_t queue_size;
+} VhostUserInflight;
+
 typedef struct VhostUserMsg {
 	union {
 		uint32_t master; /* a VhostUserRequest value */
@@ -131,6 +140,7 @@  typedef struct VhostUserMsg {
 		struct vhost_vring_addr addr;
 		VhostUserMemory memory;
 		VhostUserLog    log;
+		VhostUserInflight inflight;
 		struct vhost_iotlb_msg iotlb;
 		VhostUserCryptoSessionParam crypto_session;
 		VhostUserVringArea area;
@@ -148,6 +158,7 @@  typedef struct VhostUserMsg {
 /* vhost_user.c */
 int vhost_user_msg_handler(int vid, int fd);
 int vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm);
+void *inflight_mem_alloc(const char *name, size_t size, int *fd);
 
 /* socket.c */
 int read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds,