[dpdk-dev,RFC,2/4] vhost: make buf vector for scatter RX local.

Message ID 1455863563-15751-3-git-send-email-i.maximets@samsung.com (mailing list archive)
State Changes Requested, archived
Headers

Commit Message

Ilya Maximets Feb. 19, 2016, 6:32 a.m. UTC
  Array of buf_vector's is just an array for temporary storing information
about available descriptors. It used only locally in virtio_dev_merge_rx()
and there is no reason for that array to be shared.

Fix that by allocating local buf_vec inside virtio_dev_merge_rx().

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 lib/librte_vhost/rte_virtio_net.h |  1 -
 lib/librte_vhost/vhost_rxtx.c     | 45 ++++++++++++++++++++-------------------
 2 files changed, 23 insertions(+), 23 deletions(-)
  

Comments

Yuanhan Liu Feb. 19, 2016, 7:06 a.m. UTC | #1
On Fri, Feb 19, 2016 at 09:32:41AM +0300, Ilya Maximets wrote:
> Array of buf_vector's is just an array for temporary storing information
> about available descriptors. It used only locally in virtio_dev_merge_rx()
> and there is no reason for that array to be shared.
> 
> Fix that by allocating local buf_vec inside virtio_dev_merge_rx().
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  lib/librte_vhost/rte_virtio_net.h |  1 -
>  lib/librte_vhost/vhost_rxtx.c     | 45 ++++++++++++++++++++-------------------
>  2 files changed, 23 insertions(+), 23 deletions(-)
> 
> diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
> index 10dcb90..ae1e4fb 100644
> --- a/lib/librte_vhost/rte_virtio_net.h
> +++ b/lib/librte_vhost/rte_virtio_net.h
> @@ -91,7 +91,6 @@ struct vhost_virtqueue {
>  	int			kickfd;			/**< Currently unused as polling mode is enabled. */
>  	int			enabled;
>  	uint64_t		reserved[16];		/**< Reserve some spaces for future extension. */
> -	struct buf_vector	buf_vec[BUF_VECTOR_MAX];	/**< for scatter RX. */
>  } __rte_cache_aligned;

I like this kind of cleanup, however, it breaks ABI.

	--yliu
  
Ilya Maximets Feb. 19, 2016, 7:30 a.m. UTC | #2
On 19.02.2016 10:06, Yuanhan Liu wrote:
> On Fri, Feb 19, 2016 at 09:32:41AM +0300, Ilya Maximets wrote:
>> Array of buf_vector's is just an array for temporary storing information
>> about available descriptors. It used only locally in virtio_dev_merge_rx()
>> and there is no reason for that array to be shared.
>>
>> Fix that by allocating local buf_vec inside virtio_dev_merge_rx().
>>
>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>> ---
>>  lib/librte_vhost/rte_virtio_net.h |  1 -
>>  lib/librte_vhost/vhost_rxtx.c     | 45 ++++++++++++++++++++-------------------
>>  2 files changed, 23 insertions(+), 23 deletions(-)
>>
>> diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
>> index 10dcb90..ae1e4fb 100644
>> --- a/lib/librte_vhost/rte_virtio_net.h
>> +++ b/lib/librte_vhost/rte_virtio_net.h
>> @@ -91,7 +91,6 @@ struct vhost_virtqueue {
>>  	int			kickfd;			/**< Currently unused as polling mode is enabled. */
>>  	int			enabled;
>>  	uint64_t		reserved[16];		/**< Reserve some spaces for future extension. */
>> -	struct buf_vector	buf_vec[BUF_VECTOR_MAX];	/**< for scatter RX. */
>>  } __rte_cache_aligned;
> 
> I like this kind of cleanup, however, it breaks ABI.

Should I prepare version of this patch with field above marked as
deprecated and add note to doc/guides/rel_notes/release_16_04.rst
about future deletion?

Best regards, Ilya Maximets.
  
Huawei Xie Feb. 19, 2016, 8:10 a.m. UTC | #3
On 2/19/2016 3:31 PM, Ilya Maximets wrote:
> On 19.02.2016 10:06, Yuanhan Liu wrote:
>> On Fri, Feb 19, 2016 at 09:32:41AM +0300, Ilya Maximets wrote:
>>> Array of buf_vector's is just an array for temporary storing information
>>> about available descriptors. It used only locally in virtio_dev_merge_rx()
>>> and there is no reason for that array to be shared.
>>>
>>> Fix that by allocating local buf_vec inside virtio_dev_merge_rx().
>>>
>>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>>> ---
>>>  lib/librte_vhost/rte_virtio_net.h |  1 -
>>>  lib/librte_vhost/vhost_rxtx.c     | 45 ++++++++++++++++++++-------------------
>>>  2 files changed, 23 insertions(+), 23 deletions(-)
>>>
>>> diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
>>> index 10dcb90..ae1e4fb 100644
>>> --- a/lib/librte_vhost/rte_virtio_net.h
>>> +++ b/lib/librte_vhost/rte_virtio_net.h
>>> @@ -91,7 +91,6 @@ struct vhost_virtqueue {
>>>  	int			kickfd;			/**< Currently unused as polling mode is enabled. */
>>>  	int			enabled;
>>>  	uint64_t		reserved[16];		/**< Reserve some spaces for future extension. */
>>> -	struct buf_vector	buf_vec[BUF_VECTOR_MAX];	/**< for scatter RX. */
>>>  } __rte_cache_aligned;
>> I like this kind of cleanup, however, it breaks ABI.
> Should I prepare version of this patch with field above marked as
> deprecated and add note to doc/guides/rel_notes/release_16_04.rst
> about future deletion?

Ilya, you could follow the ABI process:
http://dpdk.org/doc/guides/contributing/versioning.html

>
> Best regards, Ilya Maximets.
>
  
Yuanhan Liu April 5, 2016, 5:47 a.m. UTC | #4
On Fri, Feb 19, 2016 at 03:06:50PM +0800, Yuanhan Liu wrote:
> On Fri, Feb 19, 2016 at 09:32:41AM +0300, Ilya Maximets wrote:
> > Array of buf_vector's is just an array for temporary storing information
> > about available descriptors. It used only locally in virtio_dev_merge_rx()
> > and there is no reason for that array to be shared.
> > 
> > Fix that by allocating local buf_vec inside virtio_dev_merge_rx().
> > 
> > Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> > ---
> >  lib/librte_vhost/rte_virtio_net.h |  1 -
> >  lib/librte_vhost/vhost_rxtx.c     | 45 ++++++++++++++++++++-------------------
> >  2 files changed, 23 insertions(+), 23 deletions(-)
> > 
> > diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
> > index 10dcb90..ae1e4fb 100644
> > --- a/lib/librte_vhost/rte_virtio_net.h
> > +++ b/lib/librte_vhost/rte_virtio_net.h
> > @@ -91,7 +91,6 @@ struct vhost_virtqueue {
> >  	int			kickfd;			/**< Currently unused as polling mode is enabled. */
> >  	int			enabled;
> >  	uint64_t		reserved[16];		/**< Reserve some spaces for future extension. */
> > -	struct buf_vector	buf_vec[BUF_VECTOR_MAX];	/**< for scatter RX. */
> >  } __rte_cache_aligned;
> 
> I like this kind of cleanup, however, it breaks ABI.

So, I was considering to add vhost-user Tx delayed-copy (or zero copy)
support recently, which comes to yet another ABI violation, as we need
add a new field to virtio_memory_regions struct to do guest phys addr
to host phys addr translation. You may ask, however, that why do we need
expose virtio_memory_regions struct to users at all?

You are right, we don't have to. And here is the thing: we exposed way
too many fields (or even structures) than necessary. Say, vhost_virtqueue
struct should NOT be exposed to user at all: application just need to
tell the right queue id to locate a specific queue, and that's all.
The structure should be defined in an internal header file. With that,
we could do any changes to it we want, without worrying about that we
may offense the painful ABI rules.

Similar changes could be done to virtio_net struct as well, just exposing
very few fields that are necessary and moving all others to an internal
structure.

Huawei then suggested a more radical yet much cleaner one: just exposing
a virtio_net handle to application, just like the way kernel exposes an
fd to user for locating a specific file. However, it's more than an ABI
change; it's also an API change: some fields are referenced by applications,
such as flags, virt_qp_nb. We could expose some new functions to access
them though.

I'd vote for this one, as it sounds very clean to me. This would also
solve the block issue of this patch. Though it would break OVS, I'm thinking
that'd be okay, as OVS has dependence on DPDK version: what we need to
do is just to send few patches to OVS, and let it points to next release,
say DPDK v16.07. Flavio, please correct me if I'm wrong.

Thoughts/comments?

	--yliu
  
Thomas Monjalon April 5, 2016, 8:37 a.m. UTC | #5
2016-04-05 13:47, Yuanhan Liu:
> So, I was considering to add vhost-user Tx delayed-copy (or zero copy)
> support recently, which comes to yet another ABI violation, as we need
> add a new field to virtio_memory_regions struct to do guest phys addr
> to host phys addr translation. You may ask, however, that why do we need
> expose virtio_memory_regions struct to users at all?
> 
> You are right, we don't have to. And here is the thing: we exposed way
> too many fields (or even structures) than necessary. Say, vhost_virtqueue
> struct should NOT be exposed to user at all: application just need to
> tell the right queue id to locate a specific queue, and that's all.
> The structure should be defined in an internal header file. With that,
> we could do any changes to it we want, without worrying about that we
> may offense the painful ABI rules.
> 
> Similar changes could be done to virtio_net struct as well, just exposing
> very few fields that are necessary and moving all others to an internal
> structure.
> 
> Huawei then suggested a more radical yet much cleaner one: just exposing
> a virtio_net handle to application, just like the way kernel exposes an
> fd to user for locating a specific file. However, it's more than an ABI
> change; it's also an API change: some fields are referenced by applications,
> such as flags, virt_qp_nb. We could expose some new functions to access
> them though.
> 
> I'd vote for this one, as it sounds very clean to me. This would also
> solve the block issue of this patch. Though it would break OVS, I'm thinking
> that'd be okay, as OVS has dependence on DPDK version: what we need to
> do is just to send few patches to OVS, and let it points to next release,
> say DPDK v16.07. Flavio, please correct me if I'm wrong.
> 
> Thoughts/comments?

Do you plan to send a deprecation notice to change API in 16.07?
  
Yuanhan Liu April 5, 2016, 2:06 p.m. UTC | #6
On Tue, Apr 05, 2016 at 10:37:13AM +0200, Thomas Monjalon wrote:
> 2016-04-05 13:47, Yuanhan Liu:
> > So, I was considering to add vhost-user Tx delayed-copy (or zero copy)
> > support recently, which comes to yet another ABI violation, as we need
> > add a new field to virtio_memory_regions struct to do guest phys addr
> > to host phys addr translation. You may ask, however, that why do we need
> > expose virtio_memory_regions struct to users at all?
> > 
> > You are right, we don't have to. And here is the thing: we exposed way
> > too many fields (or even structures) than necessary. Say, vhost_virtqueue
> > struct should NOT be exposed to user at all: application just need to
> > tell the right queue id to locate a specific queue, and that's all.
> > The structure should be defined in an internal header file. With that,
> > we could do any changes to it we want, without worrying about that we
> > may offense the painful ABI rules.
> > 
> > Similar changes could be done to virtio_net struct as well, just exposing
> > very few fields that are necessary and moving all others to an internal
> > structure.
> > 
> > Huawei then suggested a more radical yet much cleaner one: just exposing
> > a virtio_net handle to application, just like the way kernel exposes an
> > fd to user for locating a specific file. However, it's more than an ABI
> > change; it's also an API change: some fields are referenced by applications,
> > such as flags, virt_qp_nb. We could expose some new functions to access
> > them though.
> > 
> > I'd vote for this one, as it sounds very clean to me. This would also
> > solve the block issue of this patch. Though it would break OVS, I'm thinking
> > that'd be okay, as OVS has dependence on DPDK version: what we need to
> > do is just to send few patches to OVS, and let it points to next release,
> > say DPDK v16.07. Flavio, please correct me if I'm wrong.
> > 
> > Thoughts/comments?
> 
> Do you plan to send a deprecation notice to change API in 16.07?

Yes, I planned to, shortly. Before that, I'd ask for comments first.

	--yliu
  
Flavio Leitner April 6, 2016, 4:14 a.m. UTC | #7
On Tue, Apr 05, 2016 at 01:47:33PM +0800, Yuanhan Liu wrote:
> On Fri, Feb 19, 2016 at 03:06:50PM +0800, Yuanhan Liu wrote:
> > On Fri, Feb 19, 2016 at 09:32:41AM +0300, Ilya Maximets wrote:
> > > Array of buf_vector's is just an array for temporary storing information
> > > about available descriptors. It used only locally in virtio_dev_merge_rx()
> > > and there is no reason for that array to be shared.
> > > 
> > > Fix that by allocating local buf_vec inside virtio_dev_merge_rx().
> > > 
> > > Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> > > ---
> > >  lib/librte_vhost/rte_virtio_net.h |  1 -
> > >  lib/librte_vhost/vhost_rxtx.c     | 45 ++++++++++++++++++++-------------------
> > >  2 files changed, 23 insertions(+), 23 deletions(-)
> > > 
> > > diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
> > > index 10dcb90..ae1e4fb 100644
> > > --- a/lib/librte_vhost/rte_virtio_net.h
> > > +++ b/lib/librte_vhost/rte_virtio_net.h
> > > @@ -91,7 +91,6 @@ struct vhost_virtqueue {
> > >  	int			kickfd;			/**< Currently unused as polling mode is enabled. */
> > >  	int			enabled;
> > >  	uint64_t		reserved[16];		/**< Reserve some spaces for future extension. */
> > > -	struct buf_vector	buf_vec[BUF_VECTOR_MAX];	/**< for scatter RX. */
> > >  } __rte_cache_aligned;
> > 
> > I like this kind of cleanup, however, it breaks ABI.
> 
> So, I was considering to add vhost-user Tx delayed-copy (or zero copy)
> support recently, which comes to yet another ABI violation, as we need
> add a new field to virtio_memory_regions struct to do guest phys addr
> to host phys addr translation. You may ask, however, that why do we need
> expose virtio_memory_regions struct to users at all?
> 
> You are right, we don't have to. And here is the thing: we exposed way
> too many fields (or even structures) than necessary. Say, vhost_virtqueue
> struct should NOT be exposed to user at all: application just need to
> tell the right queue id to locate a specific queue, and that's all.
> The structure should be defined in an internal header file. With that,
> we could do any changes to it we want, without worrying about that we
> may offense the painful ABI rules.
> 
> Similar changes could be done to virtio_net struct as well, just exposing
> very few fields that are necessary and moving all others to an internal
> structure.
> 
> Huawei then suggested a more radical yet much cleaner one: just exposing
> a virtio_net handle to application, just like the way kernel exposes an
> fd to user for locating a specific file. However, it's more than an ABI
> change; it's also an API change: some fields are referenced by applications,
> such as flags, virt_qp_nb. We could expose some new functions to access
> them though.
> 
> I'd vote for this one, as it sounds very clean to me. This would also
> solve the block issue of this patch. Though it would break OVS, I'm thinking
> that'd be okay, as OVS has dependence on DPDK version: what we need to
> do is just to send few patches to OVS, and let it points to next release,
> say DPDK v16.07. Flavio, please correct me if I'm wrong.

There is a plan to use vHost PMD, so from OVS point of view the virtio
stuff would be hidden because vhost PMD would look like just as a
regular ethernet, right?

I think we are waiting for 16.04 to be released with that so we can
start push changes to OVS as well.
  
Yuanhan Liu April 6, 2016, 4:54 a.m. UTC | #8
On Wed, Apr 06, 2016 at 01:14:09AM -0300, Flavio Leitner wrote:
> > 
> > I'd vote for this one, as it sounds very clean to me. This would also
> > solve the block issue of this patch. Though it would break OVS, I'm thinking
> > that'd be okay, as OVS has dependence on DPDK version: what we need to
> > do is just to send few patches to OVS, and let it points to next release,
> > say DPDK v16.07. Flavio, please correct me if I'm wrong.
> 
> There is a plan to use vHost PMD,

Great.

> so from OVS point of view the virtio
> stuff would be hidden because vhost PMD would look like just as a
> regular ethernet, right?

Yes.

	--yliu
  

Patch

diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 10dcb90..ae1e4fb 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -91,7 +91,6 @@  struct vhost_virtqueue {
 	int			kickfd;			/**< Currently unused as polling mode is enabled. */
 	int			enabled;
 	uint64_t		reserved[16];		/**< Reserve some spaces for future extension. */
-	struct buf_vector	buf_vec[BUF_VECTOR_MAX];	/**< for scatter RX. */
 } __rte_cache_aligned;
 
 
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 411dd95..9095fb1 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -295,7 +295,7 @@  virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 static inline uint32_t __attribute__((always_inline))
 copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 			uint16_t res_base_idx, uint16_t res_end_idx,
-			struct rte_mbuf *pkt)
+			struct rte_mbuf *pkt, struct buf_vector *buf_vec)
 {
 	uint32_t vec_idx = 0;
 	uint32_t entry_success = 0;
@@ -325,7 +325,7 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 	 */
 	vq = dev->virtqueue[queue_id];
 
-	vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+	vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
 	vb_hdr_addr = vb_addr;
 
 	/* Prefetch buffer address. */
@@ -345,19 +345,19 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 
 	seg_avail = rte_pktmbuf_data_len(pkt);
 	vb_offset = vq->vhost_hlen;
-	vb_avail = vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
+	vb_avail = buf_vec[vec_idx].buf_len - vq->vhost_hlen;
 
 	entry_len = vq->vhost_hlen;
 
 	if (vb_avail == 0) {
 		uint32_t desc_idx =
-			vq->buf_vec[vec_idx].desc_idx;
+			buf_vec[vec_idx].desc_idx;
 
 		if ((vq->desc[desc_idx].flags
 			& VRING_DESC_F_NEXT) == 0) {
 			/* Update used ring with desc information */
 			vq->used->ring[cur_idx & (vq->size - 1)].id
-				= vq->buf_vec[vec_idx].desc_idx;
+				= buf_vec[vec_idx].desc_idx;
 			vq->used->ring[cur_idx & (vq->size - 1)].len
 				= entry_len;
 
@@ -367,12 +367,12 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 		}
 
 		vec_idx++;
-		vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+		vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
 
 		/* Prefetch buffer address. */
 		rte_prefetch0((void *)(uintptr_t)vb_addr);
 		vb_offset = 0;
-		vb_avail = vq->buf_vec[vec_idx].buf_len;
+		vb_avail = buf_vec[vec_idx].buf_len;
 	}
 
 	cpy_len = RTE_MIN(vb_avail, seg_avail);
@@ -399,11 +399,11 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 			 * entry reach to its end.
 			 * But the segment doesn't complete.
 			 */
-			if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &
+			if ((vq->desc[buf_vec[vec_idx].desc_idx].flags &
 				VRING_DESC_F_NEXT) == 0) {
 				/* Update used ring with desc information */
 				vq->used->ring[cur_idx & (vq->size - 1)].id
-					= vq->buf_vec[vec_idx].desc_idx;
+					= buf_vec[vec_idx].desc_idx;
 				vq->used->ring[cur_idx & (vq->size - 1)].len
 					= entry_len;
 				entry_len = 0;
@@ -413,9 +413,9 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 
 			vec_idx++;
 			vb_addr = gpa_to_vva(dev,
-				vq->buf_vec[vec_idx].buf_addr);
+				buf_vec[vec_idx].buf_addr);
 			vb_offset = 0;
-			vb_avail = vq->buf_vec[vec_idx].buf_len;
+			vb_avail = buf_vec[vec_idx].buf_len;
 			cpy_len = RTE_MIN(vb_avail, seg_avail);
 		} else {
 			/*
@@ -434,7 +434,7 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 					 * from buf_vec.
 					 */
 					uint32_t desc_idx =
-						vq->buf_vec[vec_idx].desc_idx;
+						buf_vec[vec_idx].desc_idx;
 
 					if ((vq->desc[desc_idx].flags &
 						VRING_DESC_F_NEXT) == 0) {
@@ -456,9 +456,9 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 					/* Get next buffer from buf_vec. */
 					vec_idx++;
 					vb_addr = gpa_to_vva(dev,
-						vq->buf_vec[vec_idx].buf_addr);
+						buf_vec[vec_idx].buf_addr);
 					vb_avail =
-						vq->buf_vec[vec_idx].buf_len;
+						buf_vec[vec_idx].buf_len;
 					vb_offset = 0;
 				}
 
@@ -471,7 +471,7 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 				 */
 				/* Update used ring with desc information */
 				vq->used->ring[cur_idx & (vq->size - 1)].id
-					= vq->buf_vec[vec_idx].desc_idx;
+					= buf_vec[vec_idx].desc_idx;
 				vq->used->ring[cur_idx & (vq->size - 1)].len
 					= entry_len;
 				entry_success++;
@@ -485,7 +485,7 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 
 static inline void __attribute__((always_inline))
 update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
-	uint32_t *secure_len, uint32_t *vec_idx)
+	uint32_t *secure_len, uint32_t *vec_idx, struct buf_vector *buf_vec)
 {
 	uint16_t wrapped_idx = id & (vq->size - 1);
 	uint32_t idx = vq->avail->ring[wrapped_idx];
@@ -496,9 +496,9 @@  update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
 	do {
 		next_desc = 0;
 		len += vq->desc[idx].len;
-		vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
-		vq->buf_vec[vec_id].buf_len = vq->desc[idx].len;
-		vq->buf_vec[vec_id].desc_idx = idx;
+		buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
+		buf_vec[vec_id].buf_len = vq->desc[idx].len;
+		buf_vec[vec_id].desc_idx = idx;
 		vec_id++;
 
 		if (vq->desc[idx].flags & VRING_DESC_F_NEXT) {
@@ -523,6 +523,7 @@  virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 	uint16_t avail_idx;
 	uint16_t res_base_idx, res_cur_idx;
 	uint8_t success = 0;
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 
 	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
 		dev->device_fh);
@@ -561,8 +562,8 @@  virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 				if (unlikely(res_cur_idx == avail_idx))
 					goto merge_rx_exit;
 
-				update_secure_len(vq, res_cur_idx,
-						  &secure_len, &vec_idx);
+				update_secure_len(vq, res_cur_idx, &secure_len,
+						  &vec_idx, buf_vec);
 				res_cur_idx++;
 			} while (pkt_len > secure_len);
 
@@ -573,7 +574,7 @@  virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 		} while (success == 0);
 
 		entry_success = copy_from_mbuf_to_vring(dev, queue_id,
-			res_base_idx, res_cur_idx, pkts[pkt_idx]);
+			res_base_idx, res_cur_idx, pkts[pkt_idx], buf_vec);
 
 		rte_smp_wmb();