[v4,2/2] examples/vhost: unconfigure DMA vchannel

Message ID 20221013064040.98489-3-xuan.ding@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: introduce DMA vchannel unconfiguration |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-x86_64-compile-testing success Testing PASS
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed

Commit Message

Ding, Xuan Oct. 13, 2022, 6:40 a.m. UTC
  From: Xuan Ding <xuan.ding@intel.com>

This patch applies rte_vhost_async_dma_unconfigure() to manually
free DMA vchannels. Before unconfiguration, need make sure the
specified DMA device is no longer used by any vhost ports.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
 examples/vhost/main.c | 38 +++++++++++++++++++++++++-------------
 1 file changed, 25 insertions(+), 13 deletions(-)
  

Comments

Maxime Coquelin Oct. 13, 2022, 8:07 a.m. UTC | #1
On 10/13/22 08:40, xuan.ding@intel.com wrote:
> From: Xuan Ding <xuan.ding@intel.com>
> 
> This patch applies rte_vhost_async_dma_unconfigure() to manually
> free DMA vchannels. Before unconfiguration, need make sure the
> specified DMA device is no longer used by any vhost ports.
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
>   examples/vhost/main.c | 38 +++++++++++++++++++++++++-------------
>   1 file changed, 25 insertions(+), 13 deletions(-)
> 
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index ac78704d79..bfeb808dcc 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -73,6 +73,7 @@ static int total_num_mbufs = NUM_MBUFS_DEFAULT;
>   
>   struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
>   int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
> +int16_t dma_ref_count[RTE_DMADEV_DEFAULT_MAX];
>   static int dma_count;
>   
>   /* mask of enabled ports */
> @@ -371,6 +372,7 @@ open_dma(const char *value)
>   done:
>   		(dma_info + socketid)->dmas[vring_id].dev_id = dev_id;
>   		(dma_info + socketid)->async_flag |= async_flag;
> +		dma_ref_count[dev_id]++;
>   		i++;
>   	}
>   out:
> @@ -1562,6 +1564,27 @@ vhost_clear_queue(struct vhost_dev *vdev, uint16_t queue_id)
>   	}
>   }
>   
> +static void
> +vhost_clear_async(struct vhost_dev *vdev, int vid, uint16_t queue_id)
> +{
> +	int16_t dma_id;
> +
> +	if (dma_bind[vid].dmas[queue_id].async_enabled) {

if (!dma_bind[vid].dmas[queue_id].async_enabled)
	return;

> +		vhost_clear_queue(vdev, queue_id);
> +		rte_vhost_async_channel_unregister(vid, queue_id);
> +		dma_bind[vid].dmas[queue_id].async_enabled = false;
> +
> +		dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
> +		dma_ref_count[dma_id]--;
> +
> +		if (dma_ref_count[dma_id] == 0) {

if (dma_ref_count[dma_id] > 0)
	return;

Doing this should improve readability.

> +			if (rte_vhost_async_dma_unconfigure(dma_id, 0) < 0)
> +				RTE_LOG(ERR, VHOST_CONFIG,
> +				       "Failed to unconfigure DMA %d in vhost.\n", dma_id);
> +		}
> +	}
> +}
> +
>   /*
>    * Remove a device from the specific data core linked list and from the
>    * main linked list. Synchronization  occurs through the use of the
> @@ -1618,17 +1641,8 @@ destroy_device(int vid)
>   		"(%d) device has been removed from data core\n",
>   		vdev->vid);
>   
> -	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
> -		vhost_clear_queue(vdev, VIRTIO_RXQ);
> -		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
> -		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
> -	}
> -
> -	if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
> -		vhost_clear_queue(vdev, VIRTIO_TXQ);
> -		rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
> -		dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
> -	}
> +	vhost_clear_async(vdev, vid, VIRTIO_RXQ);
> +	vhost_clear_async(vdev, vid, VIRTIO_TXQ);
>   
>   	rte_free(vdev);
>   }
> @@ -1690,8 +1704,6 @@ vhost_async_channel_register(int vid)
>   	return rx_ret | tx_ret;
>   }
>   
> -
> -
>   /*
>    * A new device is added to a data core. First the device is added to the main linked list
>    * and then allocated to a specific data core.
  
Ding, Xuan Oct. 13, 2022, 8:49 a.m. UTC | #2
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Thursday, October 13, 2022 4:07 PM
> To: Ding, Xuan <xuan.ding@intel.com>; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; He, Xingguang
> <xingguang.he@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>;
> Jiang, Cheng1 <cheng1.jiang@intel.com>; Wang, YuanX
> <yuanx.wang@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>
> Subject: Re: [PATCH v4 2/2] examples/vhost: unconfigure DMA vchannel
> 
> 
> 
> On 10/13/22 08:40, xuan.ding@intel.com wrote:
> > From: Xuan Ding <xuan.ding@intel.com>
> >
> > This patch applies rte_vhost_async_dma_unconfigure() to manually free
> > DMA vchannels. Before unconfiguration, need make sure the specified
> > DMA device is no longer used by any vhost ports.
> >
> > Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> > ---
> >   examples/vhost/main.c | 38 +++++++++++++++++++++++++-------------
> >   1 file changed, 25 insertions(+), 13 deletions(-)
> >
> > diff --git a/examples/vhost/main.c b/examples/vhost/main.c index
> > ac78704d79..bfeb808dcc 100644
> > --- a/examples/vhost/main.c
> > +++ b/examples/vhost/main.c
> > @@ -73,6 +73,7 @@ static int total_num_mbufs = NUM_MBUFS_DEFAULT;
> >
> >   struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
> >   int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
> > +int16_t dma_ref_count[RTE_DMADEV_DEFAULT_MAX];
> >   static int dma_count;
> >
> >   /* mask of enabled ports */
> > @@ -371,6 +372,7 @@ open_dma(const char *value)
> >   done:
> >   		(dma_info + socketid)->dmas[vring_id].dev_id = dev_id;
> >   		(dma_info + socketid)->async_flag |= async_flag;
> > +		dma_ref_count[dev_id]++;
> >   		i++;
> >   	}
> >   out:
> > @@ -1562,6 +1564,27 @@ vhost_clear_queue(struct vhost_dev *vdev,
> uint16_t queue_id)
> >   	}
> >   }
> >
> > +static void
> > +vhost_clear_async(struct vhost_dev *vdev, int vid, uint16_t queue_id)
> > +{
> > +	int16_t dma_id;
> > +
> > +	if (dma_bind[vid].dmas[queue_id].async_enabled) {
> 
> if (!dma_bind[vid].dmas[queue_id].async_enabled)
> 	return;
> 
> > +		vhost_clear_queue(vdev, queue_id);
> > +		rte_vhost_async_channel_unregister(vid, queue_id);
> > +		dma_bind[vid].dmas[queue_id].async_enabled = false;
> > +
> > +		dma_id = dma_bind[vid2socketid[vdev-
> >vid]].dmas[queue_id].dev_id;
> > +		dma_ref_count[dma_id]--;
> > +
> > +		if (dma_ref_count[dma_id] == 0) {
> 
> if (dma_ref_count[dma_id] > 0)
> 	return;
> 
> Doing this should improve readability.

Good suggestion! Please see v5.

Thanks,
Xuan

> 
> > +			if (rte_vhost_async_dma_unconfigure(dma_id, 0) < 0)
> > +				RTE_LOG(ERR, VHOST_CONFIG,
> > +				       "Failed to unconfigure DMA %d in
> vhost.\n", dma_id);
> > +		}
> > +	}
> > +}
> > +
> >   /*
> >    * Remove a device from the specific data core linked list and from the
> >    * main linked list. Synchronization  occurs through the use of the
> > @@ -1618,17 +1641,8 @@ destroy_device(int vid)
> >   		"(%d) device has been removed from data core\n",
> >   		vdev->vid);
> >
> > -	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
> > -		vhost_clear_queue(vdev, VIRTIO_RXQ);
> > -		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
> > -		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
> > -	}
> > -
> > -	if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
> > -		vhost_clear_queue(vdev, VIRTIO_TXQ);
> > -		rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
> > -		dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
> > -	}
> > +	vhost_clear_async(vdev, vid, VIRTIO_RXQ);
> > +	vhost_clear_async(vdev, vid, VIRTIO_TXQ);
> >
> >   	rte_free(vdev);
> >   }
> > @@ -1690,8 +1704,6 @@ vhost_async_channel_register(int vid)
> >   	return rx_ret | tx_ret;
> >   }
> >
> > -
> > -
> >   /*
> >    * A new device is added to a data core. First the device is added to the
> main linked list
> >    * and then allocated to a specific data core.
  

Patch

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index ac78704d79..bfeb808dcc 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -73,6 +73,7 @@  static int total_num_mbufs = NUM_MBUFS_DEFAULT;
 
 struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
 int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
+int16_t dma_ref_count[RTE_DMADEV_DEFAULT_MAX];
 static int dma_count;
 
 /* mask of enabled ports */
@@ -371,6 +372,7 @@  open_dma(const char *value)
 done:
 		(dma_info + socketid)->dmas[vring_id].dev_id = dev_id;
 		(dma_info + socketid)->async_flag |= async_flag;
+		dma_ref_count[dev_id]++;
 		i++;
 	}
 out:
@@ -1562,6 +1564,27 @@  vhost_clear_queue(struct vhost_dev *vdev, uint16_t queue_id)
 	}
 }
 
+static void
+vhost_clear_async(struct vhost_dev *vdev, int vid, uint16_t queue_id)
+{
+	int16_t dma_id;
+
+	if (dma_bind[vid].dmas[queue_id].async_enabled) {
+		vhost_clear_queue(vdev, queue_id);
+		rte_vhost_async_channel_unregister(vid, queue_id);
+		dma_bind[vid].dmas[queue_id].async_enabled = false;
+
+		dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
+		dma_ref_count[dma_id]--;
+
+		if (dma_ref_count[dma_id] == 0) {
+			if (rte_vhost_async_dma_unconfigure(dma_id, 0) < 0)
+				RTE_LOG(ERR, VHOST_CONFIG,
+				       "Failed to unconfigure DMA %d in vhost.\n", dma_id);
+		}
+	}
+}
+
 /*
  * Remove a device from the specific data core linked list and from the
  * main linked list. Synchronization  occurs through the use of the
@@ -1618,17 +1641,8 @@  destroy_device(int vid)
 		"(%d) device has been removed from data core\n",
 		vdev->vid);
 
-	if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
-		vhost_clear_queue(vdev, VIRTIO_RXQ);
-		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
-		dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
-	}
-
-	if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
-		vhost_clear_queue(vdev, VIRTIO_TXQ);
-		rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
-		dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
-	}
+	vhost_clear_async(vdev, vid, VIRTIO_RXQ);
+	vhost_clear_async(vdev, vid, VIRTIO_TXQ);
 
 	rte_free(vdev);
 }
@@ -1690,8 +1704,6 @@  vhost_async_channel_register(int vid)
 	return rx_ret | tx_ret;
 }
 
-
-
 /*
  * A new device is added to a data core. First the device is added to the main linked list
  * and then allocated to a specific data core.