[v6,06/10] vhost: add the APIs to operate inflight ring
Checks
Commit Message
This patch introduces three APIs to operate the inflight
ring. Three APIs are set, set last and clear. It includes
split and packed ring.
Signed-off-by: Lin Li <lilin24@baidu.com>
Signed-off-by: Xun Ni <nixun@baidu.com>
Signed-off-by: Yu Zhang <zhangyu31@baidu.com>
Signed-off-by: Jin Yu <jin.yu@intel.com>
---
lib/librte_vhost/rte_vhost.h | 116 ++++++++++++++++
lib/librte_vhost/vhost.c | 252 +++++++++++++++++++++++++++++++++++
2 files changed, 368 insertions(+)
Comments
On 29/08/2019 15:12, JinYu wrote:
> This patch introduces three APIs to operate the inflight
> ring. Three APIs are set, set last and clear. It includes
> split and packed ring.
>
> Signed-off-by: Lin Li <lilin24@baidu.com>
> Signed-off-by: Xun Ni <nixun@baidu.com>
> Signed-off-by: Yu Zhang <zhangyu31@baidu.com>
> Signed-off-by: Jin Yu <jin.yu@intel.com>
> ---
> lib/librte_vhost/rte_vhost.h | 116 ++++++++++++++++
> lib/librte_vhost/vhost.c | 252 +++++++++++++++++++++++++++++++++++
> 2 files changed, 368 insertions(+)
Hi - you need to update the version map for the new APIs, or you will
see the following types of message with make,
rte_vhost_set_inflight_desc_split is flagged as experimental
but is not listed in version map
Please add rte_vhost_set_inflight_desc_split to the version map
On 8/29/19 4:12 PM, JinYu wrote:
> This patch introduces three APIs to operate the inflight
> ring. Three APIs are set, set last and clear. It includes
> split and packed ring.
>
> Signed-off-by: Lin Li <lilin24@baidu.com>
> Signed-off-by: Xun Ni <nixun@baidu.com>
> Signed-off-by: Yu Zhang <zhangyu31@baidu.com>
> Signed-off-by: Jin Yu <jin.yu@intel.com>
> ---
> lib/librte_vhost/rte_vhost.h | 116 ++++++++++++++++
> lib/librte_vhost/vhost.c | 252 +++++++++++++++++++++++++++++++++++
> 2 files changed, 368 insertions(+)
>
> diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
> index e090cdfee..d3b6eda21 100644
> --- a/lib/librte_vhost/rte_vhost.h
> +++ b/lib/librte_vhost/rte_vhost.h
> @@ -693,6 +693,122 @@ int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
> int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
> struct rte_vhost_vring *vring);
>
> +/**
> + * Set split inflight descriptor.
> + *
> + * This function save descriptors that has been comsumed in available
> + * ring
> + *
> + * @param vid
> + * vhost device ID
> + * @param vring_idx
> + * vring index
> + * @param idx
> + * inflight entry index
> + * @return
> + * 0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
> + uint16_t idx);
> +
> +/**
> + * Set packed inflight descriptor and get corresponding inflight entry
> + *
> + * This function save descriptors that has been comsumed
> + *
> + * @param vid
> + * vhost device ID
> + * @param vring_idx
> + * vring index
> + * @param head
> + * head of descriptors
> + * @param last
> + * last of descriptors
> + * @param inflight_entry
> + * corresponding inflight entry
> + * @return
> + * 0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
> + uint16_t head, uint16_t last, uint16_t *inflight_entry);
> +
> +/**
> + * Save the head of list that the last batch of used descriptors.
> + *
> + * @param vid
> + * vhost device ID
> + * @param vring_idx
> + * vring index
> + * @param idx
> + * descriptor entry index
> + * @return
> + * 0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_last_inflight_io_split(int vid,
> + uint16_t vring_idx, uint16_t idx);
> +
> +/**
> + * Update the inflight free_head, used_idx and used_wrap_counter.
> + *
> + * This function will update status first before updating descriptors
> + * to used
> + *
> + * @param vid
> + * vhost device ID
> + * @param vring_idx
> + * vring index
> + * @param head
> + * head of descriptors
> + * @return
> + * 0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_last_inflight_io_packed(int vid,
> + uint16_t vring_idx, uint16_t head);
> +
> +/**
> + * Clear the split inflight status.
> + *
> + * @param vid
> + * vhost device ID
> + * @param vring_idx
> + * vring index
> + * @param last_used_idx
> + * last used idx of used ring
> + * @param idx
> + * inflight entry index
> + * @return
> + * 0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
> + uint16_t last_used_idx, uint16_t idx);
> +
> +/**
> + * Clear the packed inflight status.
> + *
> + * @param vid
> + * vhost device ID
> + * @param vring_idx
> + * vring index
> + * @param head
> + * inflight entry index
> + * @return
> + * 0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
> + uint16_t head);
> +
> /**
> * Notify the guest that used descriptors have been added to the vring. This
> * function acts as a memory barrier.
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index 660ac2a07..58940152f 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -783,6 +783,258 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
> return 0;
> }
>
> +int
> +rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
> + uint16_t idx)
> +{
> + struct virtio_net *dev;
> + struct vhost_virtqueue *vq;
> +
> + dev = get_device(vid);
> + if (unlikely(!dev))
> + return -1;
> +
> + if (unlikely(!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> + return 0;
> +
> + if (unlikely(vq_is_packed(dev)))
> + return -1;
> +
> + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> + return -1;
> +
> + vq = dev->virtqueue[vring_idx];
> + if (unlikely(!vq))
> + return -1;
> +
> + if (unlikely(!vq->inflight_split))
> + return -1;
> +
> + vq->inflight_split->desc[idx].counter = vq->global_counter++;
> + vq->inflight_split->desc[idx].inflight = 1;
> + return 0;
> +}
> +
> +int
> +rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
> + uint16_t head, uint16_t last, uint16_t *inflight_entry)
> +{
> + struct virtio_net *dev;
> + struct vhost_virtqueue *vq;
> + struct inflight_info_packed *inflight_info;
> + struct vring_packed_desc *desc;
> + uint16_t old_free_head, free_head;
> +
> + dev = get_device(vid);
> + if (unlikely(!dev))
> + return -1;
> +
> + if (unlikely(!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> + return 0;
> +
> + if (unlikely(!vq_is_packed(dev)))
> + return -1;
> +
> + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> + return -1;
> +
> + vq = dev->virtqueue[vring_idx];
> + if (unlikely(!vq))
> + return -1;
> +
> + inflight_info = vq->inflight_packed;
> + if (unlikely(!inflight_info))
> + return -1;
> +
> + desc = vq->desc_packed;
> + old_free_head = inflight_info->old_free_head;
> + free_head = old_free_head;
> +
> + /* init header descriptor */
> + inflight_info->desc[old_free_head].num = 0;
> + inflight_info->desc[old_free_head].counter = vq->global_counter++;
> + inflight_info->desc[old_free_head].inflight = 1;
I think head has to be validated so that it does not causes out of
bounds accesses.
> + /* save desc entry in flight entry */
> + while (head != ((last + 1) % vq->size)) {
> + inflight_info->desc[old_free_head].num++;
> + inflight_info->desc[free_head].addr = desc[head].addr;
> + inflight_info->desc[free_head].len = desc[head].len;
> + inflight_info->desc[free_head].flags = desc[head].flags;
> + inflight_info->desc[free_head].id = desc[head].id;
> +
> + inflight_info->desc[old_free_head].last = free_head;
> + free_head = inflight_info->desc[free_head].next;
> + inflight_info->free_head = free_head;
> + head = (head + 1) % vq->size;
> + }
> +
> + inflight_info->old_free_head = free_head;
> + *inflight_entry = old_free_head;
> +
> + return 0;
> +}
> +
> +int
> +rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
> + uint16_t last_used_idx, uint16_t idx)
> +{
> + struct virtio_net *dev;
> + struct vhost_virtqueue *vq;
> +
> + dev = get_device(vid);
> + if (unlikely(!dev))
> + return -1;
> +
> + if (unlikely(!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> + return 0;
> +
> + if (unlikely(vq_is_packed(dev)))
> + return -1;
> +
> + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> + return -1;
> +
> + vq = dev->virtqueue[vring_idx];
> + if (unlikely(!vq))
> + return -1;
> +
> + if (unlikely(!vq->inflight_split))
> + return -1;
> +
> + rte_compiler_barrier();
> +
> + vq->inflight_split->desc[idx].inflight = 0;
Maybe it would be better to check idx value, so that it does not causes
out-of-bound accesses.
> +
> + rte_compiler_barrier();
> +
> + vq->inflight_split->used_idx = last_used_idx;
> + return 0;
> +}
> +
> +int
> +rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
> + uint16_t head)
> +{
> + struct virtio_net *dev;
> + struct vhost_virtqueue *vq;
> + struct inflight_info_packed *inflight_info;
> +
> + dev = get_device(vid);
> + if (unlikely(!dev))
> + return -1;
> +
> + if (unlikely(!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> + return 0;
> +
> + if (unlikely(!vq_is_packed(dev)))
> + return -1;
> +
> + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> + return -1;
> +
> + vq = dev->virtqueue[vring_idx];
> + if (unlikely(!vq))
> + return -1;
> +
> + inflight_info = vq->inflight_packed;
> + if (unlikely(!inflight_info))
> + return -1;
> +
> + rte_compiler_barrier();
> +
> + inflight_info->desc[head].inflight = 0;
Maybe it would be better to check head value, so that it does not causes
out-of-bound accesses.
> + rte_compiler_barrier();
> +
> + inflight_info->old_free_head = inflight_info->free_head;
> + inflight_info->old_used_idx = inflight_info->used_idx;
> + inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter;
> +
> + return 0;
> +}
> +
> +int
> +rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
> + uint16_t idx)
> +{
> + struct virtio_net *dev;
> + struct vhost_virtqueue *vq;
> +
> + dev = get_device(vid);
> + if (unlikely(!dev))
> + return -1;
> +
> + if (unlikely(!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> + return 0;
> +
> + if (unlikely(vq_is_packed(dev)))
> + return -1;
> +
> + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> + return -1;
> +
> + vq = dev->virtqueue[vring_idx];
> + if (unlikely(!vq))
> + return -1;
> +
> + if (unlikely(!vq->inflight_split))
> + return -1;
> +
> + vq->inflight_split->last_inflight_io = idx;
> + return 0;
> +}
> +
> +int
> +rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx,
> + uint16_t head)
> +{
> + struct virtio_net *dev;
> + struct vhost_virtqueue *vq;
> + struct inflight_info_packed *inflight_info;
> + uint16_t last;
> +
> + dev = get_device(vid);
> + if (unlikely(!dev))
> + return -1;
> +
> + if (unlikely(!(dev->protocol_features &
> + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> + return 0;
> +
> + if (unlikely(!vq_is_packed(dev)))
> + return -1;
> +
> + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> + return -1;
> +
> + vq = dev->virtqueue[vring_idx];
> + if (unlikely(!vq))
> + return -1;
> +
> + inflight_info = vq->inflight_packed;
> + if (unlikely(!inflight_info))
> + return -1;
> +
> + last = inflight_info->desc[head].last;
Ditto
> + inflight_info->desc[last].next = inflight_info->free_head;
> + inflight_info->free_head = head;
> + inflight_info->used_idx += inflight_info->desc[head].num;
> + if (inflight_info->used_idx >= inflight_info->desc_num) {
> + inflight_info->used_idx -= inflight_info->desc_num;
> + inflight_info->used_wrap_counter =
> + !inflight_info->used_wrap_counter;
> + }
> +
> + return 0;
> +}
> +
> int
> rte_vhost_vring_call(int vid, uint16_t vring_idx)
> {
>
> -----Original Message-----
> From: Maxime Coquelin [mailto:maxime.coquelin@redhat.com]
> Sent: Thursday, September 12, 2019 5:27 PM
> To: Yu, Jin <jin.yu@intel.com>; dev@dpdk.org
> Cc: Liu, Changpeng <changpeng.liu@intel.com>; Bie, Tiwei
> <tiwei.bie@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Lin Li
> <lilin24@baidu.com>; Xun Ni <nixun@baidu.com>; Yu Zhang
> <zhangyu31@baidu.com>
> Subject: Re: [PATCH v6 06/10] vhost: add the APIs to operate inflight ring
>
>
>
> On 8/29/19 4:12 PM, JinYu wrote:
> > This patch introduces three APIs to operate the inflight ring. Three
> > APIs are set, set last and clear. It includes split and packed ring.
> >
> > Signed-off-by: Lin Li <lilin24@baidu.com>
> > Signed-off-by: Xun Ni <nixun@baidu.com>
> > Signed-off-by: Yu Zhang <zhangyu31@baidu.com>
> > Signed-off-by: Jin Yu <jin.yu@intel.com>
> > ---
> > lib/librte_vhost/rte_vhost.h | 116 ++++++++++++++++
> > lib/librte_vhost/vhost.c | 252 +++++++++++++++++++++++++++++++++++
> > 2 files changed, 368 insertions(+)
> >
> > diff --git a/lib/librte_vhost/rte_vhost.h
> > b/lib/librte_vhost/rte_vhost.h index e090cdfee..d3b6eda21 100644
> > --- a/lib/librte_vhost/rte_vhost.h
> > +++ b/lib/librte_vhost/rte_vhost.h
> > @@ -693,6 +693,122 @@ int rte_vhost_get_mem_table(int vid, struct
> > rte_vhost_memory **mem); int rte_vhost_get_vhost_vring(int vid, uint16_t
> vring_idx,
> > struct rte_vhost_vring *vring);
> >
> > +/**
> > + * Set split inflight descriptor.
> > + *
> > + * This function save descriptors that has been comsumed in available
> > + * ring
> > + *
> > + * @param vid
> > + * vhost device ID
> > + * @param vring_idx
> > + * vring index
> > + * @param idx
> > + * inflight entry index
> > + * @return
> > + * 0 on success, -1 on failure
> > + */
> > +__rte_experimental
> > +int
> > +rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
> > + uint16_t idx);
> > +
> > +/**
> > + * Set packed inflight descriptor and get corresponding inflight
> > +entry
> > + *
> > + * This function save descriptors that has been comsumed
> > + *
> > + * @param vid
> > + * vhost device ID
> > + * @param vring_idx
> > + * vring index
> > + * @param head
> > + * head of descriptors
> > + * @param last
> > + * last of descriptors
> > + * @param inflight_entry
> > + * corresponding inflight entry
> > + * @return
> > + * 0 on success, -1 on failure
> > + */
> > +__rte_experimental
> > +int
> > +rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
> > + uint16_t head, uint16_t last, uint16_t *inflight_entry);
> > +
> > +/**
> > + * Save the head of list that the last batch of used descriptors.
> > + *
> > + * @param vid
> > + * vhost device ID
> > + * @param vring_idx
> > + * vring index
> > + * @param idx
> > + * descriptor entry index
> > + * @return
> > + * 0 on success, -1 on failure
> > + */
> > +__rte_experimental
> > +int
> > +rte_vhost_set_last_inflight_io_split(int vid,
> > + uint16_t vring_idx, uint16_t idx);
> > +
> > +/**
> > + * Update the inflight free_head, used_idx and used_wrap_counter.
> > + *
> > + * This function will update status first before updating descriptors
> > + * to used
> > + *
> > + * @param vid
> > + * vhost device ID
> > + * @param vring_idx
> > + * vring index
> > + * @param head
> > + * head of descriptors
> > + * @return
> > + * 0 on success, -1 on failure
> > + */
> > +__rte_experimental
> > +int
> > +rte_vhost_set_last_inflight_io_packed(int vid,
> > + uint16_t vring_idx, uint16_t head);
> > +
> > +/**
> > + * Clear the split inflight status.
> > + *
> > + * @param vid
> > + * vhost device ID
> > + * @param vring_idx
> > + * vring index
> > + * @param last_used_idx
> > + * last used idx of used ring
> > + * @param idx
> > + * inflight entry index
> > + * @return
> > + * 0 on success, -1 on failure
> > + */
> > +__rte_experimental
> > +int
> > +rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
> > + uint16_t last_used_idx, uint16_t idx);
> > +
> > +/**
> > + * Clear the packed inflight status.
> > + *
> > + * @param vid
> > + * vhost device ID
> > + * @param vring_idx
> > + * vring index
> > + * @param head
> > + * inflight entry index
> > + * @return
> > + * 0 on success, -1 on failure
> > + */
> > +__rte_experimental
> > +int
> > +rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
> > + uint16_t head);
> > +
> > /**
> > * Notify the guest that used descriptors have been added to the vring. This
> > * function acts as a memory barrier.
> > diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index
> > 660ac2a07..58940152f 100644
> > --- a/lib/librte_vhost/vhost.c
> > +++ b/lib/librte_vhost/vhost.c
> > @@ -783,6 +783,258 @@ rte_vhost_get_vhost_vring(int vid, uint16_t
> vring_idx,
> > return 0;
> > }
> >
> > +int
> > +rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
> > + uint16_t idx)
> > +{
> > + struct virtio_net *dev;
> > + struct vhost_virtqueue *vq;
> > +
> > + dev = get_device(vid);
> > + if (unlikely(!dev))
> > + return -1;
> > +
> > + if (unlikely(!(dev->protocol_features &
> > + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> > + return 0;
> > +
> > + if (unlikely(vq_is_packed(dev)))
> > + return -1;
> > +
> > + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> > + return -1;
> > +
> > + vq = dev->virtqueue[vring_idx];
> > + if (unlikely(!vq))
> > + return -1;
> > +
> > + if (unlikely(!vq->inflight_split))
> > + return -1;
> > +
> > + vq->inflight_split->desc[idx].counter = vq->global_counter++;
> > + vq->inflight_split->desc[idx].inflight = 1;
> > + return 0;
> > +}
> > +
> > +int
> > +rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
> > + uint16_t head, uint16_t last, uint16_t *inflight_entry) {
> > + struct virtio_net *dev;
> > + struct vhost_virtqueue *vq;
> > + struct inflight_info_packed *inflight_info;
> > + struct vring_packed_desc *desc;
> > + uint16_t old_free_head, free_head;
> > +
> > + dev = get_device(vid);
> > + if (unlikely(!dev))
> > + return -1;
> > +
> > + if (unlikely(!(dev->protocol_features &
> > + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> > + return 0;
> > +
> > + if (unlikely(!vq_is_packed(dev)))
> > + return -1;
> > +
> > + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> > + return -1;
> > +
> > + vq = dev->virtqueue[vring_idx];
> > + if (unlikely(!vq))
> > + return -1;
> > +
> > + inflight_info = vq->inflight_packed;
> > + if (unlikely(!inflight_info))
> > + return -1;
> > +
> > + desc = vq->desc_packed;
> > + old_free_head = inflight_info->old_free_head;
> > + free_head = old_free_head;
> > +
> > + /* init header descriptor */
> > + inflight_info->desc[old_free_head].num = 0;
> > + inflight_info->desc[old_free_head].counter = vq->global_counter++;
> > + inflight_info->desc[old_free_head].inflight = 1;
>
> I think head has to be validated so that it does not causes out of bounds
> accesses.
Got it.
>
> > + /* save desc entry in flight entry */
> > + while (head != ((last + 1) % vq->size)) {
> > + inflight_info->desc[old_free_head].num++;
> > + inflight_info->desc[free_head].addr = desc[head].addr;
> > + inflight_info->desc[free_head].len = desc[head].len;
> > + inflight_info->desc[free_head].flags = desc[head].flags;
> > + inflight_info->desc[free_head].id = desc[head].id;
> > +
> > + inflight_info->desc[old_free_head].last = free_head;
> > + free_head = inflight_info->desc[free_head].next;
> > + inflight_info->free_head = free_head;
> > + head = (head + 1) % vq->size;
> > + }
> > +
> > + inflight_info->old_free_head = free_head;
> > + *inflight_entry = old_free_head;
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
> > + uint16_t last_used_idx, uint16_t idx) {
> > + struct virtio_net *dev;
> > + struct vhost_virtqueue *vq;
> > +
> > + dev = get_device(vid);
> > + if (unlikely(!dev))
> > + return -1;
> > +
> > + if (unlikely(!(dev->protocol_features &
> > + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> > + return 0;
> > +
> > + if (unlikely(vq_is_packed(dev)))
> > + return -1;
> > +
> > + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> > + return -1;
> > +
> > + vq = dev->virtqueue[vring_idx];
> > + if (unlikely(!vq))
> > + return -1;
> > +
> > + if (unlikely(!vq->inflight_split))
> > + return -1;
> > +
> > + rte_compiler_barrier();
> > +
> > + vq->inflight_split->desc[idx].inflight = 0;
>
>
> Maybe it would be better to check idx value, so that it does not causes out-of-
> bound accesses.
>
Got it.
> > +
> > + rte_compiler_barrier();
> > +
> > + vq->inflight_split->used_idx = last_used_idx;
> > + return 0;
> > +}
> > +
> > +int
> > +rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
> > + uint16_t head)
> > +{
> > + struct virtio_net *dev;
> > + struct vhost_virtqueue *vq;
> > + struct inflight_info_packed *inflight_info;
> > +
> > + dev = get_device(vid);
> > + if (unlikely(!dev))
> > + return -1;
> > +
> > + if (unlikely(!(dev->protocol_features &
> > + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> > + return 0;
> > +
> > + if (unlikely(!vq_is_packed(dev)))
> > + return -1;
> > +
> > + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> > + return -1;
> > +
> > + vq = dev->virtqueue[vring_idx];
> > + if (unlikely(!vq))
> > + return -1;
> > +
> > + inflight_info = vq->inflight_packed;
> > + if (unlikely(!inflight_info))
> > + return -1;
> > +
> > + rte_compiler_barrier();
> > +
> > + inflight_info->desc[head].inflight = 0;
>
>
> Maybe it would be better to check head value, so that it does not causes out-of-
> bound accesses.
>
Got it.
>
> > + rte_compiler_barrier();
> > +
> > + inflight_info->old_free_head = inflight_info->free_head;
> > + inflight_info->old_used_idx = inflight_info->used_idx;
> > + inflight_info->old_used_wrap_counter =
> > +inflight_info->used_wrap_counter;
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
> > + uint16_t idx)
> > +{
> > + struct virtio_net *dev;
> > + struct vhost_virtqueue *vq;
> > +
> > + dev = get_device(vid);
> > + if (unlikely(!dev))
> > + return -1;
> > +
> > + if (unlikely(!(dev->protocol_features &
> > + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> > + return 0;
> > +
> > + if (unlikely(vq_is_packed(dev)))
> > + return -1;
> > +
> > + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> > + return -1;
> > +
> > + vq = dev->virtqueue[vring_idx];
> > + if (unlikely(!vq))
> > + return -1;
> > +
> > + if (unlikely(!vq->inflight_split))
> > + return -1;
> > +
> > + vq->inflight_split->last_inflight_io = idx;
> > + return 0;
> > +}
> > +
> > +int
> > +rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx,
> > + uint16_t head)
> > +{
> > + struct virtio_net *dev;
> > + struct vhost_virtqueue *vq;
> > + struct inflight_info_packed *inflight_info;
> > + uint16_t last;
> > +
> > + dev = get_device(vid);
> > + if (unlikely(!dev))
> > + return -1;
> > +
> > + if (unlikely(!(dev->protocol_features &
> > + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> > + return 0;
> > +
> > + if (unlikely(!vq_is_packed(dev)))
> > + return -1;
> > +
> > + if (unlikely(vring_idx >= VHOST_MAX_VRING))
> > + return -1;
> > +
> > + vq = dev->virtqueue[vring_idx];
> > + if (unlikely(!vq))
> > + return -1;
> > +
> > + inflight_info = vq->inflight_packed;
> > + if (unlikely(!inflight_info))
> > + return -1;
> > +
> > + last = inflight_info->desc[head].last;
>
> Ditto
Got it.
> > + inflight_info->desc[last].next = inflight_info->free_head;
> > + inflight_info->free_head = head;
> > + inflight_info->used_idx += inflight_info->desc[head].num;
> > + if (inflight_info->used_idx >= inflight_info->desc_num) {
> > + inflight_info->used_idx -= inflight_info->desc_num;
> > + inflight_info->used_wrap_counter =
> > + !inflight_info->used_wrap_counter;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > int
> > rte_vhost_vring_call(int vid, uint16_t vring_idx) {
> >
Thanks.
Jin
@@ -693,6 +693,122 @@ int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
struct rte_vhost_vring *vring);
+/**
+ * Set split inflight descriptor.
+ *
+ * This function save descriptors that has been comsumed in available
+ * ring
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param idx
+ * inflight entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t idx);
+
+/**
+ * Set packed inflight descriptor and get corresponding inflight entry
+ *
+ * This function save descriptors that has been comsumed
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param head
+ * head of descriptors
+ * @param last
+ * last of descriptors
+ * @param inflight_entry
+ * corresponding inflight entry
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head, uint16_t last, uint16_t *inflight_entry);
+
+/**
+ * Save the head of list that the last batch of used descriptors.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param idx
+ * descriptor entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_last_inflight_io_split(int vid,
+ uint16_t vring_idx, uint16_t idx);
+
+/**
+ * Update the inflight free_head, used_idx and used_wrap_counter.
+ *
+ * This function will update status first before updating descriptors
+ * to used
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param head
+ * head of descriptors
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_last_inflight_io_packed(int vid,
+ uint16_t vring_idx, uint16_t head);
+
+/**
+ * Clear the split inflight status.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param last_used_idx
+ * last used idx of used ring
+ * @param idx
+ * inflight entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t last_used_idx, uint16_t idx);
+
+/**
+ * Clear the packed inflight status.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param head
+ * inflight entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head);
+
/**
* Notify the guest that used descriptors have been added to the vring. This
* function acts as a memory barrier.
@@ -783,6 +783,258 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
return 0;
}
+int
+rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t idx)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ if (unlikely(!vq->inflight_split))
+ return -1;
+
+ vq->inflight_split->desc[idx].counter = vq->global_counter++;
+ vq->inflight_split->desc[idx].inflight = 1;
+ return 0;
+}
+
+int
+rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head, uint16_t last, uint16_t *inflight_entry)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+ struct inflight_info_packed *inflight_info;
+ struct vring_packed_desc *desc;
+ uint16_t old_free_head, free_head;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(!vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ inflight_info = vq->inflight_packed;
+ if (unlikely(!inflight_info))
+ return -1;
+
+ desc = vq->desc_packed;
+ old_free_head = inflight_info->old_free_head;
+ free_head = old_free_head;
+
+ /* init header descriptor */
+ inflight_info->desc[old_free_head].num = 0;
+ inflight_info->desc[old_free_head].counter = vq->global_counter++;
+ inflight_info->desc[old_free_head].inflight = 1;
+
+ /* save desc entry in flight entry */
+ while (head != ((last + 1) % vq->size)) {
+ inflight_info->desc[old_free_head].num++;
+ inflight_info->desc[free_head].addr = desc[head].addr;
+ inflight_info->desc[free_head].len = desc[head].len;
+ inflight_info->desc[free_head].flags = desc[head].flags;
+ inflight_info->desc[free_head].id = desc[head].id;
+
+ inflight_info->desc[old_free_head].last = free_head;
+ free_head = inflight_info->desc[free_head].next;
+ inflight_info->free_head = free_head;
+ head = (head + 1) % vq->size;
+ }
+
+ inflight_info->old_free_head = free_head;
+ *inflight_entry = old_free_head;
+
+ return 0;
+}
+
+int
+rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t last_used_idx, uint16_t idx)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ if (unlikely(!vq->inflight_split))
+ return -1;
+
+ rte_compiler_barrier();
+
+ vq->inflight_split->desc[idx].inflight = 0;
+
+ rte_compiler_barrier();
+
+ vq->inflight_split->used_idx = last_used_idx;
+ return 0;
+}
+
+int
+rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+ struct inflight_info_packed *inflight_info;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(!vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ inflight_info = vq->inflight_packed;
+ if (unlikely(!inflight_info))
+ return -1;
+
+ rte_compiler_barrier();
+
+ inflight_info->desc[head].inflight = 0;
+
+ rte_compiler_barrier();
+
+ inflight_info->old_free_head = inflight_info->free_head;
+ inflight_info->old_used_idx = inflight_info->used_idx;
+ inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter;
+
+ return 0;
+}
+
+int
+rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
+ uint16_t idx)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ if (unlikely(!vq->inflight_split))
+ return -1;
+
+ vq->inflight_split->last_inflight_io = idx;
+ return 0;
+}
+
+int
+rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx,
+ uint16_t head)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+ struct inflight_info_packed *inflight_info;
+ uint16_t last;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(!vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ inflight_info = vq->inflight_packed;
+ if (unlikely(!inflight_info))
+ return -1;
+
+ last = inflight_info->desc[head].last;
+
+ inflight_info->desc[last].next = inflight_info->free_head;
+ inflight_info->free_head = head;
+ inflight_info->used_idx += inflight_info->desc[head].num;
+ if (inflight_info->used_idx >= inflight_info->desc_num) {
+ inflight_info->used_idx -= inflight_info->desc_num;
+ inflight_info->used_wrap_counter =
+ !inflight_info->used_wrap_counter;
+ }
+
+ return 0;
+}
+
int
rte_vhost_vring_call(int vid, uint16_t vring_idx)
{