[v1,2/4] vhost: dynamically alloc async memory

Message ID 20200911015316.1903181-3-patrick.fu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series optimize async data path |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Patrick Fu Sept. 11, 2020, 1:53 a.m. UTC
  alloc async internal memory buffer by rte_malloc(), replacing array
declaration inside vq structure. Dynamic allocation can help to save
memory footprint when async path is not registered.

Signed-off-by: Patrick Fu <patrick.fu@intel.com>
---
 lib/librte_vhost/vhost.c | 49 ++++++++++++++++++++++++----------------
 lib/librte_vhost/vhost.h |  4 ++--
 2 files changed, 31 insertions(+), 22 deletions(-)
  

Comments

Maxime Coquelin Sept. 23, 2020, 9:15 a.m. UTC | #1
s/alloc/allocate/

On 9/11/20 3:53 AM, Patrick Fu wrote:
> alloc async internal memory buffer by rte_malloc(), replacing array

Allocate async internal memory buffer with rte_malloc()

> declaration inside vq structure. Dynamic allocation can help to save
> memory footprint when async path is not registered.
> 
> Signed-off-by: Patrick Fu <patrick.fu@intel.com>
> ---
>  lib/librte_vhost/vhost.c | 49 ++++++++++++++++++++++++----------------
>  lib/librte_vhost/vhost.h |  4 ++--
>  2 files changed, 31 insertions(+), 22 deletions(-)
> 
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index eca507836..ba374da67 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -324,6 +324,24 @@ cleanup_device(struct virtio_net *dev, int destroy)
>  	}
>  }
>  
> +static void
> +vhost_free_async_mem(struct vhost_virtqueue *vq)
> +{
> +	if (vq->async_pkts_pending)
> +		rte_free(vq->async_pkts_pending);
> +	if (vq->async_pkts_info)
> +		rte_free(vq->async_pkts_info);
> +	if (vq->it_pool)
> +		rte_free(vq->it_pool);
> +	if (vq->vec_pool)
> +		rte_free(vq->vec_pool);
> +
> +	vq->async_pkts_pending = NULL;
> +	vq->async_pkts_info = NULL;
> +	vq->it_pool = NULL;
> +	vq->vec_pool = NULL;
> +}
> +
>  void
>  free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
>  {
> @@ -331,10 +349,7 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
>  		rte_free(vq->shadow_used_packed);
>  	else {
>  		rte_free(vq->shadow_used_split);
> -		if (vq->async_pkts_pending)
> -			rte_free(vq->async_pkts_pending);
> -		if (vq->async_pkts_info)
> -			rte_free(vq->async_pkts_info);
> +		vhost_free_async_mem(vq);
>  	}
>  	rte_free(vq->batch_copy_elems);
>  	rte_mempool_free(vq->iotlb_pool);
> @@ -1576,13 +1591,15 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
>  	vq->async_pkts_info = rte_malloc(NULL,
>  			vq->size * sizeof(struct async_inflight_info),
>  			RTE_CACHE_LINE_SIZE);
> -	if (!vq->async_pkts_pending || !vq->async_pkts_info) {
> -		if (vq->async_pkts_pending)
> -			rte_free(vq->async_pkts_pending);
> -
> -		if (vq->async_pkts_info)
> -			rte_free(vq->async_pkts_info);
> -
> +	vq->it_pool = rte_malloc(NULL,
> +			VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
> +			RTE_CACHE_LINE_SIZE);
> +	vq->vec_pool = rte_malloc(NULL,
> +			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
> +			RTE_CACHE_LINE_SIZE);
> +	if (!vq->async_pkts_pending || !vq->async_pkts_info ||
> +		!vq->it_pool || !vq->vec_pool) {
> +		vhost_free_async_mem(vq);
>  		VHOST_LOG_CONFIG(ERR,
>  				"async register failed: cannot allocate memory for vq data "
>  				"(vid %d, qid: %d)\n", vid, queue_id);
> @@ -1630,15 +1647,7 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
>  		goto out;
>  	}
>  
> -	if (vq->async_pkts_pending) {
> -		rte_free(vq->async_pkts_pending);
> -		vq->async_pkts_pending = NULL;
> -	}
> -
> -	if (vq->async_pkts_info) {
> -		rte_free(vq->async_pkts_info);
> -		vq->async_pkts_info = NULL;
> -	}
> +	vhost_free_async_mem(vq);
>  
>  	vq->async_ops.transfer_data = NULL;
>  	vq->async_ops.check_completed_copies = NULL;
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 28aa77380..0af0ac23d 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -218,8 +218,8 @@ struct vhost_virtqueue {
>  	/* operation callbacks for async dma */
>  	struct rte_vhost_async_channel_ops	async_ops;
>  
> -	struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
> -	struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
> +	struct rte_vhost_iov_iter *it_pool;
> +	struct iovec *vec_pool;
>  
>  	/* async data transfer status */
>  	uintptr_t	**async_pkts_pending;
> 

I think you should also take care of reallocating on the same numa node
the ring is (seel numa_realloc().
  
Patrick Fu Sept. 29, 2020, 5:55 a.m. UTC | #2
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, September 23, 2020 5:15 PM
> To: Fu, Patrick <patrick.fu@intel.com>; dev@dpdk.org; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: Wang, Zhihong <zhihong.wang@intel.com>; Jiang, Cheng1
> <cheng1.jiang@intel.com>
> Subject: Re: [PATCH v1 2/4] vhost: dynamically alloc async memory
> 
> s/alloc/allocate/
> 
Fix in v2

> On 9/11/20 3:53 AM, Patrick Fu wrote:
> > alloc async internal memory buffer by rte_malloc(), replacing array
> 
> Allocate async internal memory buffer with rte_malloc()
> 
Fix in v2

> > index 28aa77380..0af0ac23d 100644
> > --- a/lib/librte_vhost/vhost.h
> > +++ b/lib/librte_vhost/vhost.h
> > @@ -218,8 +218,8 @@ struct vhost_virtqueue {
> >  	/* operation callbacks for async dma */
> >  	struct rte_vhost_async_channel_ops	async_ops;
> >
> > -	struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
> > -	struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
> > +	struct rte_vhost_iov_iter *it_pool;
> > +	struct iovec *vec_pool;
> >
> >  	/* async data transfer status */
> >  	uintptr_t	**async_pkts_pending;
> >
> 
> I think you should also take care of reallocating on the same numa node
> the ring is (seel numa_realloc().
Agree, will add numa based allocation in v2

Thanks,

Patrick
  

Patch

diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index eca507836..ba374da67 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -324,6 +324,24 @@  cleanup_device(struct virtio_net *dev, int destroy)
 	}
 }
 
+static void
+vhost_free_async_mem(struct vhost_virtqueue *vq)
+{
+	if (vq->async_pkts_pending)
+		rte_free(vq->async_pkts_pending);
+	if (vq->async_pkts_info)
+		rte_free(vq->async_pkts_info);
+	if (vq->it_pool)
+		rte_free(vq->it_pool);
+	if (vq->vec_pool)
+		rte_free(vq->vec_pool);
+
+	vq->async_pkts_pending = NULL;
+	vq->async_pkts_info = NULL;
+	vq->it_pool = NULL;
+	vq->vec_pool = NULL;
+}
+
 void
 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
@@ -331,10 +349,7 @@  free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
 		rte_free(vq->shadow_used_packed);
 	else {
 		rte_free(vq->shadow_used_split);
-		if (vq->async_pkts_pending)
-			rte_free(vq->async_pkts_pending);
-		if (vq->async_pkts_info)
-			rte_free(vq->async_pkts_info);
+		vhost_free_async_mem(vq);
 	}
 	rte_free(vq->batch_copy_elems);
 	rte_mempool_free(vq->iotlb_pool);
@@ -1576,13 +1591,15 @@  int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 	vq->async_pkts_info = rte_malloc(NULL,
 			vq->size * sizeof(struct async_inflight_info),
 			RTE_CACHE_LINE_SIZE);
-	if (!vq->async_pkts_pending || !vq->async_pkts_info) {
-		if (vq->async_pkts_pending)
-			rte_free(vq->async_pkts_pending);
-
-		if (vq->async_pkts_info)
-			rte_free(vq->async_pkts_info);
-
+	vq->it_pool = rte_malloc(NULL,
+			VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
+			RTE_CACHE_LINE_SIZE);
+	vq->vec_pool = rte_malloc(NULL,
+			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
+			RTE_CACHE_LINE_SIZE);
+	if (!vq->async_pkts_pending || !vq->async_pkts_info ||
+		!vq->it_pool || !vq->vec_pool) {
+		vhost_free_async_mem(vq);
 		VHOST_LOG_CONFIG(ERR,
 				"async register failed: cannot allocate memory for vq data "
 				"(vid %d, qid: %d)\n", vid, queue_id);
@@ -1630,15 +1647,7 @@  int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
 		goto out;
 	}
 
-	if (vq->async_pkts_pending) {
-		rte_free(vq->async_pkts_pending);
-		vq->async_pkts_pending = NULL;
-	}
-
-	if (vq->async_pkts_info) {
-		rte_free(vq->async_pkts_info);
-		vq->async_pkts_info = NULL;
-	}
+	vhost_free_async_mem(vq);
 
 	vq->async_ops.transfer_data = NULL;
 	vq->async_ops.check_completed_copies = NULL;
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 28aa77380..0af0ac23d 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -218,8 +218,8 @@  struct vhost_virtqueue {
 	/* operation callbacks for async dma */
 	struct rte_vhost_async_channel_ops	async_ops;
 
-	struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
-	struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
+	struct rte_vhost_iov_iter *it_pool;
+	struct iovec *vec_pool;
 
 	/* async data transfer status */
 	uintptr_t	**async_pkts_pending;