[v3,2/2] examples/vhost: unconfigure DMA vchannel
Checks
Commit Message
From: Xuan Ding <xuan.ding@intel.com>
This patch applies rte_vhost_async_dma_unconfigure() API
to manually free DMA vchannels instead of waiting
until the program ends to be released.
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
examples/vhost/main.c | 45 ++++++++++++++++++++++++++++++-------------
examples/vhost/main.h | 1 +
2 files changed, 33 insertions(+), 13 deletions(-)
Comments
> -----Original Message-----
> From: Ding, Xuan <xuan.ding@intel.com>
> Sent: Thursday, September 29, 2022 9:33 AM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; He, Xingguang
> <xingguang.he@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>; Jiang,
> Cheng1 <cheng1.jiang@intel.com>; Wang, YuanX <yuanx.wang@intel.com>; Ma,
> WenwuX <wenwux.ma@intel.com>; Ding, Xuan <xuan.ding@intel.com>
> Subject: [PATCH v3 2/2] examples/vhost: unconfigure DMA vchannel
>
> From: Xuan Ding <xuan.ding@intel.com>
>
> This patch applies rte_vhost_async_dma_unconfigure() API
> to manually free DMA vchannels instead of waiting
> until the program ends to be released.
>
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
> examples/vhost/main.c | 45 ++++++++++++++++++++++++++++++-------------
> examples/vhost/main.h | 1 +
> 2 files changed, 33 insertions(+), 13 deletions(-)
>
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index 0fa4753e70..32f396d88a 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -1558,6 +1558,28 @@ vhost_clear_queue(struct vhost_dev *vdev, uint16_t
> queue_id)
> }
> }
>
> +static void
> +vhost_clear_async(struct vhost_dev *vdev, int vid, uint16_t queue_id)
> +{
> + int16_t dma_id;
> + uint16_t ref_count;
> +
> + if (dma_bind[vid].dmas[queue_id].async_enabled) {
> + vhost_clear_queue(vdev, queue_id);
> + rte_vhost_async_channel_unregister(vid, queue_id);
> + dma_bind[vid].dmas[queue_id].async_enabled = false;
> + }
> +
> + dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
> + dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dma_ref_count--;
> + ref_count = dma_bind[vid2socketid[vdev-
> >vid]].dmas[queue_id].dma_ref_count;
Above two lines could be combined like 'ref_count = --XXXX'
But I wonder is this dma_ref_count really needed? It's always 0/1 like
Async_enabled, no? If it target at multi-port sharing same one dma case,
it should not be implemented like this.
Thanks,
Chenbo
> +
> + if (ref_count == 0 && dma_id != INVALID_DMA_ID) {
> + if (rte_vhost_async_dma_unconfigure(dma_id, 0) < 0)
> + RTE_LOG(ERR, VHOST_PORT, "Failed to unconfigure DMA in
> vhost.\n");
> + }
> +}
> +
> /*
> * Remove a device from the specific data core linked list and from the
> * main linked list. Synchronization occurs through the use of the
> @@ -1614,17 +1636,8 @@ destroy_device(int vid)
> "(%d) device has been removed from data core\n",
> vdev->vid);
>
> - if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
> - vhost_clear_queue(vdev, VIRTIO_RXQ);
> - rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
> - dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
> - }
> -
> - if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
> - vhost_clear_queue(vdev, VIRTIO_TXQ);
> - rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
> - dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
> - }
> + vhost_clear_async(vdev, vid, VIRTIO_RXQ);
> + vhost_clear_async(vdev, vid, VIRTIO_TXQ);
>
> rte_free(vdev);
> }
> @@ -1673,14 +1686,19 @@ vhost_async_channel_register(int vid)
>
> if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id !=
> INVALID_DMA_ID) {
> rx_ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
> - if (rx_ret == 0)
> + if (rx_ret == 0) {
>
> dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = true;
> +
> dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dma_ref_count++;
> + }
> +
> }
>
> if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id !=
> INVALID_DMA_ID) {
> tx_ret = rte_vhost_async_channel_register(vid, VIRTIO_TXQ);
> - if (tx_ret == 0)
> + if (tx_ret == 0) {
>
> dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = true;
> +
> dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dma_ref_count++;
> + }
> }
>
> return rx_ret | tx_ret;
> @@ -1886,6 +1904,7 @@ reset_dma(void)
> for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
> dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
> dma_bind[i].dmas[j].async_enabled = false;
> + dma_bind[i].dmas[j].dma_ref_count = 0;
> }
> }
>
> diff --git a/examples/vhost/main.h b/examples/vhost/main.h
> index 2fcb8376c5..2b2cf828d3 100644
> --- a/examples/vhost/main.h
> +++ b/examples/vhost/main.h
> @@ -96,6 +96,7 @@ struct dma_info {
> struct rte_pci_addr addr;
> int16_t dev_id;
> bool async_enabled;
> + uint16_t dma_ref_count;
> };
>
> struct dma_for_vhost {
> --
> 2.17.1
Hi Chenbo,
> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Thursday, September 29, 2022 4:28 PM
> To: Ding, Xuan <xuan.ding@intel.com>; maxime.coquelin@redhat.com
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; He, Xingguang
> <xingguang.he@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>;
> Jiang, Cheng1 <cheng1.jiang@intel.com>; Wang, YuanX
> <yuanx.wang@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>
> Subject: RE: [PATCH v3 2/2] examples/vhost: unconfigure DMA vchannel
>
> > -----Original Message-----
> > From: Ding, Xuan <xuan.ding@intel.com>
> > Sent: Thursday, September 29, 2022 9:33 AM
> > To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; He, Xingguang
> > <xingguang.he@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>;
> > Jiang,
> > Cheng1 <cheng1.jiang@intel.com>; Wang, YuanX
> <yuanx.wang@intel.com>;
> > Ma, WenwuX <wenwux.ma@intel.com>; Ding, Xuan
> <xuan.ding@intel.com>
> > Subject: [PATCH v3 2/2] examples/vhost: unconfigure DMA vchannel
> >
> > From: Xuan Ding <xuan.ding@intel.com>
> >
> > This patch applies rte_vhost_async_dma_unconfigure() API to manually
> > free DMA vchannels instead of waiting until the program ends to be
> > released.
> >
> > Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> > ---
> > examples/vhost/main.c | 45
> > ++++++++++++++++++++++++++++++-------------
> > examples/vhost/main.h | 1 +
> > 2 files changed, 33 insertions(+), 13 deletions(-)
> >
> > diff --git a/examples/vhost/main.c b/examples/vhost/main.c index
> > 0fa4753e70..32f396d88a 100644
> > --- a/examples/vhost/main.c
> > +++ b/examples/vhost/main.c
> > @@ -1558,6 +1558,28 @@ vhost_clear_queue(struct vhost_dev *vdev,
> > uint16_t
> > queue_id)
> > }
> > }
> >
> > +static void
> > +vhost_clear_async(struct vhost_dev *vdev, int vid, uint16_t queue_id)
> > +{
> > + int16_t dma_id;
> > + uint16_t ref_count;
> > +
> > + if (dma_bind[vid].dmas[queue_id].async_enabled) {
> > + vhost_clear_queue(vdev, queue_id);
> > + rte_vhost_async_channel_unregister(vid, queue_id);
> > + dma_bind[vid].dmas[queue_id].async_enabled = false;
> > + }
> > +
> > + dma_id = dma_bind[vid2socketid[vdev-
> >vid]].dmas[queue_id].dev_id;
> > + dma_bind[vid2socketid[vdev-
> >vid]].dmas[queue_id].dma_ref_count--;
> > + ref_count = dma_bind[vid2socketid[vdev-
> > >vid]].dmas[queue_id].dma_ref_count;
>
> Above two lines could be combined like 'ref_count = --XXXX'
>
> But I wonder is this dma_ref_count really needed? It's always 0/1 like
> Async_enabled, no? If it target at multi-port sharing same one dma case, it
> should not be implemented like this.
Yes, the dma_ref_count here is for multi-port sharing same one dma case. The intention was to
unconfigure dma device when it is no longer used by any ports.
I will check the implementation.
Thanks,
Xuan
>
> Thanks,
> Chenbo
>
> > +
> > + if (ref_count == 0 && dma_id != INVALID_DMA_ID) {
> > + if (rte_vhost_async_dma_unconfigure(dma_id, 0) < 0)
> > + RTE_LOG(ERR, VHOST_PORT, "Failed to unconfigure
> DMA in
> > vhost.\n");
> > + }
> > +}
> > +
> > /*
> > * Remove a device from the specific data core linked list and from the
> > * main linked list. Synchronization occurs through the use of the
> > @@ -1614,17 +1636,8 @@ destroy_device(int vid)
> > "(%d) device has been removed from data core\n",
> > vdev->vid);
> >
> > - if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
> > - vhost_clear_queue(vdev, VIRTIO_RXQ);
> > - rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
> > - dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
> > - }
> > -
> > - if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
> > - vhost_clear_queue(vdev, VIRTIO_TXQ);
> > - rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
> > - dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
> > - }
> > + vhost_clear_async(vdev, vid, VIRTIO_RXQ);
> > + vhost_clear_async(vdev, vid, VIRTIO_TXQ);
> >
> > rte_free(vdev);
> > }
> > @@ -1673,14 +1686,19 @@ vhost_async_channel_register(int vid)
> >
> > if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id !=
> > INVALID_DMA_ID) {
> > rx_ret = rte_vhost_async_channel_register(vid,
> VIRTIO_RXQ);
> > - if (rx_ret == 0)
> > + if (rx_ret == 0) {
> >
> > dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled =
> true;
> > +
> > dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dma_ref_count++;
> > + }
> > +
> > }
> >
> > if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id !=
> > INVALID_DMA_ID) {
> > tx_ret = rte_vhost_async_channel_register(vid,
> VIRTIO_TXQ);
> > - if (tx_ret == 0)
> > + if (tx_ret == 0) {
> >
> > dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled =
> true;
> > +
> > dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dma_ref_count++;
> > + }
> > }
> >
> > return rx_ret | tx_ret;
> > @@ -1886,6 +1904,7 @@ reset_dma(void)
> > for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
> > dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
> > dma_bind[i].dmas[j].async_enabled = false;
> > + dma_bind[i].dmas[j].dma_ref_count = 0;
> > }
> > }
> >
> > diff --git a/examples/vhost/main.h b/examples/vhost/main.h index
> > 2fcb8376c5..2b2cf828d3 100644
> > --- a/examples/vhost/main.h
> > +++ b/examples/vhost/main.h
> > @@ -96,6 +96,7 @@ struct dma_info {
> > struct rte_pci_addr addr;
> > int16_t dev_id;
> > bool async_enabled;
> > + uint16_t dma_ref_count;
> > };
> >
> > struct dma_for_vhost {
> > --
> > 2.17.1
@@ -1558,6 +1558,28 @@ vhost_clear_queue(struct vhost_dev *vdev, uint16_t queue_id)
}
}
+static void
+vhost_clear_async(struct vhost_dev *vdev, int vid, uint16_t queue_id)
+{
+ int16_t dma_id;
+ uint16_t ref_count;
+
+ if (dma_bind[vid].dmas[queue_id].async_enabled) {
+ vhost_clear_queue(vdev, queue_id);
+ rte_vhost_async_channel_unregister(vid, queue_id);
+ dma_bind[vid].dmas[queue_id].async_enabled = false;
+ }
+
+ dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
+ dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dma_ref_count--;
+ ref_count = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dma_ref_count;
+
+ if (ref_count == 0 && dma_id != INVALID_DMA_ID) {
+ if (rte_vhost_async_dma_unconfigure(dma_id, 0) < 0)
+ RTE_LOG(ERR, VHOST_PORT, "Failed to unconfigure DMA in vhost.\n");
+ }
+}
+
/*
* Remove a device from the specific data core linked list and from the
* main linked list. Synchronization occurs through the use of the
@@ -1614,17 +1636,8 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
- vhost_clear_queue(vdev, VIRTIO_RXQ);
- rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
- dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
- }
-
- if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
- vhost_clear_queue(vdev, VIRTIO_TXQ);
- rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
- dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
- }
+ vhost_clear_async(vdev, vid, VIRTIO_RXQ);
+ vhost_clear_async(vdev, vid, VIRTIO_TXQ);
rte_free(vdev);
}
@@ -1673,14 +1686,19 @@ vhost_async_channel_register(int vid)
if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
rx_ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
- if (rx_ret == 0)
+ if (rx_ret == 0) {
dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = true;
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dma_ref_count++;
+ }
+
}
if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id != INVALID_DMA_ID) {
tx_ret = rte_vhost_async_channel_register(vid, VIRTIO_TXQ);
- if (tx_ret == 0)
+ if (tx_ret == 0) {
dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = true;
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dma_ref_count++;
+ }
}
return rx_ret | tx_ret;
@@ -1886,6 +1904,7 @@ reset_dma(void)
for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
dma_bind[i].dmas[j].async_enabled = false;
+ dma_bind[i].dmas[j].dma_ref_count = 0;
}
}
@@ -96,6 +96,7 @@ struct dma_info {
struct rte_pci_addr addr;
int16_t dev_id;
bool async_enabled;
+ uint16_t dma_ref_count;
};
struct dma_for_vhost {