[v3,4/5] vhost_user: support function defines for back-end
Checks
Commit Message
Here we define support functions which are called from the various
vhost-user back-end message functions like set memory table, get
memory slots, add memory region, remove memory region. These are
essetially common functions to initialize memory, unmap a set of
memory regions, perform register copy and align memory addresses.
Signed-off-by: Pravin M Bathija <pravin.bathija@dell.com>
---
lib/vhost/vhost_user.c | 80 +++++++++++++++++++++++++++++++++++-------
1 file changed, 68 insertions(+), 12 deletions(-)
Comments
On 11/4/2025 12:21 PM, Pravin M Bathija wrote:
> Here we define support functions which are called from the various
> vhost-user back-end message functions like set memory table, get
> memory slots, add memory region, remove memory region. These are
> essetially common functions to initialize memory, unmap a set of
> memory regions, perform register copy and align memory addresses.
>
> Signed-off-by: Pravin M Bathija <pravin.bathija@dell.com>
> ---
> lib/vhost/vhost_user.c | 80 +++++++++++++++++++++++++++++++++++-------
> 1 file changed, 68 insertions(+), 12 deletions(-)
>
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 168432e7d1..9a85f2fc92 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -228,7 +228,17 @@ async_dma_map(struct virtio_net *dev, bool do_map)
> }
>
> static void
> -free_mem_region(struct virtio_net *dev)
> +free_mem_region(struct rte_vhost_mem_region *reg)
> +{
> + if (reg != NULL && reg->host_user_addr) {
> + munmap(reg->mmap_addr, reg->mmap_size);
> + close(reg->fd);
> + memset(reg, 0, sizeof(struct rte_vhost_mem_region));
> + }
> +}
> +
> +static void
> +free_all_mem_regions(struct virtio_net *dev)
> {
> uint32_t i;
> struct rte_vhost_mem_region *reg;
> @@ -239,12 +249,10 @@ free_mem_region(struct virtio_net *dev)
> if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> async_dma_map(dev, false);
>
> - for (i = 0; i < dev->mem->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> reg = &dev->mem->regions[i];
> - if (reg->host_user_addr) {
> - munmap(reg->mmap_addr, reg->mmap_size);
> - close(reg->fd);
> - }
> + if (reg->mmap_addr)
> + free_mem_region(reg);
> }
> }
>
> @@ -258,7 +266,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
> vdpa_dev->ops->dev_cleanup(dev->vid);
>
> if (dev->mem) {
> - free_mem_region(dev);
> + free_all_mem_regions(dev);
> rte_free(dev->mem);
> dev->mem = NULL;
> }
> @@ -707,7 +715,7 @@ numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
> vhost_devices[dev->vid] = dev;
>
> mem_size = sizeof(struct rte_vhost_memory) +
> - sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
> + sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS;
> mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
> if (!mem) {
> VHOST_CONFIG_LOG(dev->ifname, ERR,
> @@ -811,8 +819,10 @@ hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
> uint32_t i;
> uintptr_t hua = (uintptr_t)ptr;
>
> - for (i = 0; i < mem->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> r = &mem->regions[i];
> + if (r->host_user_addr == 0)
> + continue;
> if (hua >= r->host_user_addr &&
> hua < r->host_user_addr + r->size) {
> return get_blk_size(r->fd);
> @@ -1250,9 +1260,13 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
> * retrieve the region offset when handling userfaults.
> */
> memory = &ctx->msg.payload.memory;
> - for (i = 0; i < memory->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
I think the using MAX_NREGIONS are most for convienent, but it will impact the performance,
because the rte_vhost_va_from_guest_pa() should iter the entire array.
I think we should keep the original impl: make sure the nregions entry of memory-region is
always valid.
Beside, where is the modification for rte_vhost_va_from_guest_pa()???
> + int reg_msg_index = 0;
> reg = &dev->mem->regions[i];
> - memory->regions[i].userspace_addr = reg->host_user_addr;
> + if (reg->host_user_addr == 0)
> + continue;
> + memory->regions[reg_msg_index].userspace_addr = reg->host_user_addr;
> + reg_msg_index++;
> }
>
> /* Send the addresses back to qemu */
> @@ -1279,8 +1293,10 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
> }
>
> /* Now userfault register and we can use the memory */
> - for (i = 0; i < memory->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> reg = &dev->mem->regions[i];
> + if (reg->host_user_addr == 0)
> + continue;
> if (vhost_user_postcopy_region_register(dev, reg) < 0)
> return -1;
> }
> @@ -1385,6 +1401,46 @@ vhost_user_mmap_region(struct virtio_net *dev,
> return 0;
> }
>
> +static int
> +vhost_user_initialize_memory(struct virtio_net **pdev)
This function should be part of 3/5, else the 3/5 will compile fail
> +{
> + struct virtio_net *dev = *pdev;
> + int numa_node = SOCKET_ID_ANY;
> +
> + /*
> + * If VQ 0 has already been allocated, try to allocate on the same
> + * NUMA node. It can be reallocated later in numa_realloc().
> + */
> + if (dev->nr_vring > 0)
> + numa_node = dev->virtqueue[0]->numa_node;
> +
> + dev->nr_guest_pages = 0;
> + if (dev->guest_pages == NULL) {
> + dev->max_guest_pages = 8;
It should be VHOST_MEMORY_MAX_NREGIONS
> + dev->guest_pages = rte_zmalloc_socket(NULL,
> + dev->max_guest_pages *
> + sizeof(struct guest_page),
> + RTE_CACHE_LINE_SIZE,
> + numa_node);
> + if (dev->guest_pages == NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR,
> + "failed to allocate memory for dev->guest_pages");
> + return -1;
> + }
> + }
> +
> + dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
> + sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
> + if (dev->mem == NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
> + rte_free(dev->guest_pages);
> + dev->guest_pages = NULL;
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> static int
> vhost_user_set_mem_table(struct virtio_net **pdev,
> struct vhu_msg_context *ctx,
Responses inline.
Internal Use - Confidential
> -----Original Message-----
> From: fengchengwen <fengchengwen@huawei.com>
> Sent: Tuesday, November 4, 2025 12:06 AM
> To: Bathija, Pravin <Pravin.Bathija@dell.com>; dev@dpdk.org
> Cc: pravin.m.bathija.dev@gmail.com
> Subject: Re: [PATCH v3 4/5] vhost_user: support function defines for back-end
>
>
> [EXTERNAL EMAIL]
>
> On 11/4/2025 12:21 PM, Pravin M Bathija wrote:
> > Here we define support functions which are called from the various
> > vhost-user back-end message functions like set memory table, get
> > memory slots, add memory region, remove memory region. These are
> > essetially common functions to initialize memory, unmap a set of
> > memory regions, perform register copy and align memory addresses.
> >
> > Signed-off-by: Pravin M Bathija <pravin.bathija@dell.com>
> > ---
> > lib/vhost/vhost_user.c | 80
> > +++++++++++++++++++++++++++++++++++-------
> > 1 file changed, 68 insertions(+), 12 deletions(-)
> >
> > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> > 168432e7d1..9a85f2fc92 100644
> > --- a/lib/vhost/vhost_user.c
> > +++ b/lib/vhost/vhost_user.c
> > @@ -228,7 +228,17 @@ async_dma_map(struct virtio_net *dev, bool
> > do_map) }
> >
> > static void
> > -free_mem_region(struct virtio_net *dev)
> > +free_mem_region(struct rte_vhost_mem_region *reg) {
> > + if (reg != NULL && reg->host_user_addr) {
> > + munmap(reg->mmap_addr, reg->mmap_size);
> > + close(reg->fd);
> > + memset(reg, 0, sizeof(struct rte_vhost_mem_region));
> > + }
> > +}
> > +
> > +static void
> > +free_all_mem_regions(struct virtio_net *dev)
> > {
> > uint32_t i;
> > struct rte_vhost_mem_region *reg;
> > @@ -239,12 +249,10 @@ free_mem_region(struct virtio_net *dev)
> > if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> > async_dma_map(dev, false);
> >
> > - for (i = 0; i < dev->mem->nregions; i++) {
> > + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> > reg = &dev->mem->regions[i];
> > - if (reg->host_user_addr) {
> > - munmap(reg->mmap_addr, reg->mmap_size);
> > - close(reg->fd);
> > - }
> > + if (reg->mmap_addr)
> > + free_mem_region(reg);
> > }
> > }
> >
> > @@ -258,7 +266,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
> > vdpa_dev->ops->dev_cleanup(dev->vid);
> >
> > if (dev->mem) {
> > - free_mem_region(dev);
> > + free_all_mem_regions(dev);
> > rte_free(dev->mem);
> > dev->mem = NULL;
> > }
> > @@ -707,7 +715,7 @@ numa_realloc(struct virtio_net **pdev, struct
> vhost_virtqueue **pvq)
> > vhost_devices[dev->vid] = dev;
> >
> > mem_size = sizeof(struct rte_vhost_memory) +
> > - sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
> > + sizeof(struct rte_vhost_mem_region) *
> VHOST_MEMORY_MAX_NREGIONS;
> > mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
> > if (!mem) {
> > VHOST_CONFIG_LOG(dev->ifname, ERR,
> > @@ -811,8 +819,10 @@ hua_to_alignment(struct rte_vhost_memory
> *mem, void *ptr)
> > uint32_t i;
> > uintptr_t hua = (uintptr_t)ptr;
> >
> > - for (i = 0; i < mem->nregions; i++) {
> > + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> > r = &mem->regions[i];
> > + if (r->host_user_addr == 0)
> > + continue;
> > if (hua >= r->host_user_addr &&
> > hua < r->host_user_addr + r->size) {
> > return get_blk_size(r->fd);
> > @@ -1250,9 +1260,13 @@ vhost_user_postcopy_register(struct virtio_net
> *dev, int main_fd,
> > * retrieve the region offset when handling userfaults.
> > */
> > memory = &ctx->msg.payload.memory;
> > - for (i = 0; i < memory->nregions; i++) {
> > + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
>
> I think the using MAX_NREGIONS are most for convienent, but it will impact
> the performance, because the rte_vhost_va_from_guest_pa() should iter the
> entire array.
>
Replaced VHOST_MEMORY_MAX_NREGIONS with memory->nregions. Please review v4.
> I think we should keep the original impl: make sure the nregions entry of
> memory-region is always valid.
>
> Beside, where is the modification for rte_vhost_va_from_guest_pa()???
Could you please provide more detail ? rte_vhost_va_from_guest_pa() was never called from here before.
>
> > + int reg_msg_index = 0;
> > reg = &dev->mem->regions[i];
> > - memory->regions[i].userspace_addr = reg->host_user_addr;
> > + if (reg->host_user_addr == 0)
> > + continue;
> > + memory->regions[reg_msg_index].userspace_addr = reg-
> >host_user_addr;
> > + reg_msg_index++;
> > }
> >
> > /* Send the addresses back to qemu */ @@ -1279,8 +1293,10 @@
> > vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
> > }
> >
> > /* Now userfault register and we can use the memory */
> > - for (i = 0; i < memory->nregions; i++) {
> > + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> > reg = &dev->mem->regions[i];
> > + if (reg->host_user_addr == 0)
> > + continue;
> > if (vhost_user_postcopy_region_register(dev, reg) < 0)
> > return -1;
> > }
> > @@ -1385,6 +1401,46 @@ vhost_user_mmap_region(struct virtio_net *dev,
> > return 0;
> > }
> >
> > +static int
> > +vhost_user_initialize_memory(struct virtio_net **pdev)
>
> This function should be part of 3/5, else the 3/5 will compile fail
I have moved support functions to patch-3. Please review v4.
>
> > +{
> > + struct virtio_net *dev = *pdev;
> > + int numa_node = SOCKET_ID_ANY;
> > +
> > + /*
> > + * If VQ 0 has already been allocated, try to allocate on the same
> > + * NUMA node. It can be reallocated later in numa_realloc().
> > + */
> > + if (dev->nr_vring > 0)
> > + numa_node = dev->virtqueue[0]->numa_node;
> > +
> > + dev->nr_guest_pages = 0;
> > + if (dev->guest_pages == NULL) {
> > + dev->max_guest_pages = 8;
>
> It should be VHOST_MEMORY_MAX_NREGIONS
Done. Please review v4.
>
> > + dev->guest_pages = rte_zmalloc_socket(NULL,
> > + dev->max_guest_pages *
> > + sizeof(struct guest_page),
> > + RTE_CACHE_LINE_SIZE,
> > + numa_node);
> > + if (dev->guest_pages == NULL) {
> > + VHOST_CONFIG_LOG(dev->ifname, ERR,
> > + "failed to allocate memory for dev-
> >guest_pages");
> > + return -1;
> > + }
> > + }
> > +
> > + dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct
> rte_vhost_memory) +
> > + sizeof(struct rte_vhost_mem_region) *
> VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
> > + if (dev->mem == NULL) {
> > + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate
> memory for dev->mem");
> > + rte_free(dev->guest_pages);
> > + dev->guest_pages = NULL;
> > + return -1;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > static int
> > vhost_user_set_mem_table(struct virtio_net **pdev,
> > struct vhu_msg_context *ctx,
On 11/11/2025 7:31 PM, Bathija, Pravin wrote:
> Responses inline.
>
>
> Internal Use - Confidential
>> -----Original Message-----
>> From: fengchengwen <fengchengwen@huawei.com>
>> Sent: Tuesday, November 4, 2025 12:06 AM
>> To: Bathija, Pravin <Pravin.Bathija@dell.com>; dev@dpdk.org
>> Cc: pravin.m.bathija.dev@gmail.com
>> Subject: Re: [PATCH v3 4/5] vhost_user: support function defines for back-end
>>
>>
>> [EXTERNAL EMAIL]
>>
>> On 11/4/2025 12:21 PM, Pravin M Bathija wrote:
>>> Here we define support functions which are called from the various
>>> vhost-user back-end message functions like set memory table, get
>>> memory slots, add memory region, remove memory region. These are
>>> essetially common functions to initialize memory, unmap a set of
>>> memory regions, perform register copy and align memory addresses.
>>>
>>> Signed-off-by: Pravin M Bathija <pravin.bathija@dell.com>
>>> ---
>>> lib/vhost/vhost_user.c | 80
>>> +++++++++++++++++++++++++++++++++++-------
>>> 1 file changed, 68 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
>>> 168432e7d1..9a85f2fc92 100644
>>> --- a/lib/vhost/vhost_user.c
>>> +++ b/lib/vhost/vhost_user.c
>>> @@ -228,7 +228,17 @@ async_dma_map(struct virtio_net *dev, bool
>>> do_map) }
>>>
>>> static void
>>> -free_mem_region(struct virtio_net *dev)
>>> +free_mem_region(struct rte_vhost_mem_region *reg) {
>>> + if (reg != NULL && reg->host_user_addr) {
>>> + munmap(reg->mmap_addr, reg->mmap_size);
>>> + close(reg->fd);
>>> + memset(reg, 0, sizeof(struct rte_vhost_mem_region));
>>> + }
>>> +}
>>> +
>>> +static void
>>> +free_all_mem_regions(struct virtio_net *dev)
>>> {
>>> uint32_t i;
>>> struct rte_vhost_mem_region *reg;
>>> @@ -239,12 +249,10 @@ free_mem_region(struct virtio_net *dev)
>>> if (dev->async_copy && rte_vfio_is_enabled("vfio"))
>>> async_dma_map(dev, false);
>>>
>>> - for (i = 0; i < dev->mem->nregions; i++) {
>>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
>>> reg = &dev->mem->regions[i];
>>> - if (reg->host_user_addr) {
>>> - munmap(reg->mmap_addr, reg->mmap_size);
>>> - close(reg->fd);
>>> - }
>>> + if (reg->mmap_addr)
>>> + free_mem_region(reg);
>>> }
>>> }
>>>
>>> @@ -258,7 +266,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
>>> vdpa_dev->ops->dev_cleanup(dev->vid);
>>>
>>> if (dev->mem) {
>>> - free_mem_region(dev);
>>> + free_all_mem_regions(dev);
>>> rte_free(dev->mem);
>>> dev->mem = NULL;
>>> }
>>> @@ -707,7 +715,7 @@ numa_realloc(struct virtio_net **pdev, struct
>> vhost_virtqueue **pvq)
>>> vhost_devices[dev->vid] = dev;
>>>
>>> mem_size = sizeof(struct rte_vhost_memory) +
>>> - sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
>>> + sizeof(struct rte_vhost_mem_region) *
>> VHOST_MEMORY_MAX_NREGIONS;
>>> mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
>>> if (!mem) {
>>> VHOST_CONFIG_LOG(dev->ifname, ERR,
>>> @@ -811,8 +819,10 @@ hua_to_alignment(struct rte_vhost_memory
>> *mem, void *ptr)
>>> uint32_t i;
>>> uintptr_t hua = (uintptr_t)ptr;
>>>
>>> - for (i = 0; i < mem->nregions; i++) {
>>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
>>> r = &mem->regions[i];
>>> + if (r->host_user_addr == 0)
>>> + continue;
>>> if (hua >= r->host_user_addr &&
>>> hua < r->host_user_addr + r->size) {
>>> return get_blk_size(r->fd);
>>> @@ -1250,9 +1260,13 @@ vhost_user_postcopy_register(struct virtio_net
>> *dev, int main_fd,
>>> * retrieve the region offset when handling userfaults.
>>> */
>>> memory = &ctx->msg.payload.memory;
>>> - for (i = 0; i < memory->nregions; i++) {
>>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
>>
>> I think the using MAX_NREGIONS are most for convienent, but it will impact
>> the performance, because the rte_vhost_va_from_guest_pa() should iter the
>> entire array.
>>
>
> Replaced VHOST_MEMORY_MAX_NREGIONS with memory->nregions. Please review v4.
>
>> I think we should keep the original impl: make sure the nregions entry of
>> memory-region is always valid.
>>
>> Beside, where is the modification for rte_vhost_va_from_guest_pa()???
>
> Could you please provide more detail ? rte_vhost_va_from_guest_pa() was never called from here before.
Because rte_vhost_va_from_guest_pa() use mem-nregions as for uplimit:
for (i = 0; i < mem->nregions; i++) {
this function was never called, but I think we need to make each commit complete.
>
>>
>>> + int reg_msg_index = 0;
>>> reg = &dev->mem->regions[i];
>>> - memory->regions[i].userspace_addr = reg->host_user_addr;
>>> + if (reg->host_user_addr == 0)
>>> + continue;
>>> + memory->regions[reg_msg_index].userspace_addr = reg-
>>> host_user_addr;
>>> + reg_msg_index++;
>>> }
>>>
>>> /* Send the addresses back to qemu */ @@ -1279,8 +1293,10 @@
>>> vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
>>> }
>>>
>>> /* Now userfault register and we can use the memory */
>>> - for (i = 0; i < memory->nregions; i++) {
>>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
>>> reg = &dev->mem->regions[i];
>>> + if (reg->host_user_addr == 0)
>>> + continue;
>>> if (vhost_user_postcopy_region_register(dev, reg) < 0)
>>> return -1;
>>> }
>>> @@ -1385,6 +1401,46 @@ vhost_user_mmap_region(struct virtio_net *dev,
>>> return 0;
>>> }
>>>
>>> +static int
>>> +vhost_user_initialize_memory(struct virtio_net **pdev)
>>
>> This function should be part of 3/5, else the 3/5 will compile fail
>
> I have moved support functions to patch-3. Please review v4.
>
>>
>>> +{
>>> + struct virtio_net *dev = *pdev;
>>> + int numa_node = SOCKET_ID_ANY;
>>> +
>>> + /*
>>> + * If VQ 0 has already been allocated, try to allocate on the same
>>> + * NUMA node. It can be reallocated later in numa_realloc().
>>> + */
>>> + if (dev->nr_vring > 0)
>>> + numa_node = dev->virtqueue[0]->numa_node;
>>> +
>>> + dev->nr_guest_pages = 0;
>>> + if (dev->guest_pages == NULL) {
>>> + dev->max_guest_pages = 8;
>>
>> It should be VHOST_MEMORY_MAX_NREGIONS
>
> Done. Please review v4.
>
>>
>>> + dev->guest_pages = rte_zmalloc_socket(NULL,
>>> + dev->max_guest_pages *
>>> + sizeof(struct guest_page),
>>> + RTE_CACHE_LINE_SIZE,
>>> + numa_node);
>>> + if (dev->guest_pages == NULL) {
>>> + VHOST_CONFIG_LOG(dev->ifname, ERR,
>>> + "failed to allocate memory for dev-
>>> guest_pages");
>>> + return -1;
>>> + }
>>> + }
>>> +
>>> + dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct
>> rte_vhost_memory) +
>>> + sizeof(struct rte_vhost_mem_region) *
>> VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
>>> + if (dev->mem == NULL) {
>>> + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate
>> memory for dev->mem");
>>> + rte_free(dev->guest_pages);
>>> + dev->guest_pages = NULL;
>>> + return -1;
>>> + }
>>> +
>>> + return 0;
>>> +}
>>> +
>>> static int
>>> vhost_user_set_mem_table(struct virtio_net **pdev,
>>> struct vhu_msg_context *ctx,
>
>
Comments inline.
Internal Use - Confidential
> -----Original Message-----
> From: fengchengwen <fengchengwen@huawei.com>
> Sent: Tuesday, November 11, 2025 5:21 PM
> To: Bathija, Pravin <Pravin.Bathija@dell.com>; dev@dpdk.org
> Cc: pravin.m.bathija.dev@gmail.com
> Subject: Re: [PATCH v3 4/5] vhost_user: support function defines for back-end
>
>
> [EXTERNAL EMAIL]
>
> On 11/11/2025 7:31 PM, Bathija, Pravin wrote:
> > Responses inline.
> >
> >
> > Internal Use - Confidential
> >> -----Original Message-----
> >> From: fengchengwen <fengchengwen@huawei.com>
> >> Sent: Tuesday, November 4, 2025 12:06 AM
> >> To: Bathija, Pravin <Pravin.Bathija@dell.com>; dev@dpdk.org
> >> Cc: pravin.m.bathija.dev@gmail.com
> >> Subject: Re: [PATCH v3 4/5] vhost_user: support function defines for
> >> back-end
> >>
> >>
> >> [EXTERNAL EMAIL]
> >>
> >> On 11/4/2025 12:21 PM, Pravin M Bathija wrote:
> >>> Here we define support functions which are called from the various
> >>> vhost-user back-end message functions like set memory table, get
> >>> memory slots, add memory region, remove memory region. These are
> >>> essetially common functions to initialize memory, unmap a set of
> >>> memory regions, perform register copy and align memory addresses.
> >>>
> >>> Signed-off-by: Pravin M Bathija <pravin.bathija@dell.com>
> >>> ---
> >>> lib/vhost/vhost_user.c | 80
> >>> +++++++++++++++++++++++++++++++++++-------
> >>> 1 file changed, 68 insertions(+), 12 deletions(-)
> >>>
> >>> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> >>> 168432e7d1..9a85f2fc92 100644
> >>> --- a/lib/vhost/vhost_user.c
> >>> +++ b/lib/vhost/vhost_user.c
> >>> @@ -228,7 +228,17 @@ async_dma_map(struct virtio_net *dev, bool
> >>> do_map) }
> >>>
> >>> static void
> >>> -free_mem_region(struct virtio_net *dev)
> >>> +free_mem_region(struct rte_vhost_mem_region *reg) {
> >>> + if (reg != NULL && reg->host_user_addr) {
> >>> + munmap(reg->mmap_addr, reg->mmap_size);
> >>> + close(reg->fd);
> >>> + memset(reg, 0, sizeof(struct rte_vhost_mem_region));
> >>> + }
> >>> +}
> >>> +
> >>> +static void
> >>> +free_all_mem_regions(struct virtio_net *dev)
> >>> {
> >>> uint32_t i;
> >>> struct rte_vhost_mem_region *reg; @@ -239,12 +249,10 @@
> >>> free_mem_region(struct virtio_net *dev)
> >>> if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> >>> async_dma_map(dev, false);
> >>>
> >>> - for (i = 0; i < dev->mem->nregions; i++) {
> >>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> >>> reg = &dev->mem->regions[i];
> >>> - if (reg->host_user_addr) {
> >>> - munmap(reg->mmap_addr, reg->mmap_size);
> >>> - close(reg->fd);
> >>> - }
> >>> + if (reg->mmap_addr)
> >>> + free_mem_region(reg);
> >>> }
> >>> }
> >>>
> >>> @@ -258,7 +266,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
> >>> vdpa_dev->ops->dev_cleanup(dev->vid);
> >>>
> >>> if (dev->mem) {
> >>> - free_mem_region(dev);
> >>> + free_all_mem_regions(dev);
> >>> rte_free(dev->mem);
> >>> dev->mem = NULL;
> >>> }
> >>> @@ -707,7 +715,7 @@ numa_realloc(struct virtio_net **pdev, struct
> >> vhost_virtqueue **pvq)
> >>> vhost_devices[dev->vid] = dev;
> >>>
> >>> mem_size = sizeof(struct rte_vhost_memory) +
> >>> - sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
> >>> + sizeof(struct rte_vhost_mem_region) *
> >> VHOST_MEMORY_MAX_NREGIONS;
> >>> mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
> >>> if (!mem) {
> >>> VHOST_CONFIG_LOG(dev->ifname, ERR, @@ -811,8 +819,10 @@
> >>> hua_to_alignment(struct rte_vhost_memory
> >> *mem, void *ptr)
> >>> uint32_t i;
> >>> uintptr_t hua = (uintptr_t)ptr;
> >>>
> >>> - for (i = 0; i < mem->nregions; i++) {
> >>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> >>> r = &mem->regions[i];
> >>> + if (r->host_user_addr == 0)
> >>> + continue;
> >>> if (hua >= r->host_user_addr &&
> >>> hua < r->host_user_addr + r->size) {
> >>> return get_blk_size(r->fd); @@ -1250,9 +1260,13
> >>> @@ vhost_user_postcopy_register(struct virtio_net
> >> *dev, int main_fd,
> >>> * retrieve the region offset when handling userfaults.
> >>> */
> >>> memory = &ctx->msg.payload.memory;
> >>> - for (i = 0; i < memory->nregions; i++) {
> >>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> >>
> >> I think the using MAX_NREGIONS are most for convienent, but it will
> >> impact the performance, because the rte_vhost_va_from_guest_pa()
> >> should iter the entire array.
> >>
> >
> > Replaced VHOST_MEMORY_MAX_NREGIONS with memory->nregions.
> Please review v4.
> >
> >> I think we should keep the original impl: make sure the nregions
> >> entry of memory-region is always valid.
> >>
> >> Beside, where is the modification for rte_vhost_va_from_guest_pa()???
> >
> > Could you please provide more detail ? rte_vhost_va_from_guest_pa() was
> never called from here before.
>
> Because rte_vhost_va_from_guest_pa() use mem-nregions as for uplimit:
> for (i = 0; i < mem->nregions; i++) {
>
> this function was never called, but I think we need to make each commit
> complete.
From what I gather, both of your latest comments, for patches 4 and 5 are related to performance impact because of 128 memory regions. My thoughts on this are, I will work on an algorithm to optimize as you suggested.
Meanwhile we could approve patches 1-4. The rest of the implementation does not need 128 memory regions. The existing 8 regions are fine. I put in this change as I felt it makes the system scale more and was also suggested by Maxime. I think patches 1-4 stand by themselves and have been thoroughly tested. They could be merged in unless you have some other recommendations or changes you would like me to consider. Thoughts ?
>
> >
> >>
> >>> + int reg_msg_index = 0;
> >>> reg = &dev->mem->regions[i];
> >>> - memory->regions[i].userspace_addr = reg->host_user_addr;
> >>> + if (reg->host_user_addr == 0)
> >>> + continue;
> >>> + memory->regions[reg_msg_index].userspace_addr = reg-
> >>> host_user_addr;
> >>> + reg_msg_index++;
> >>> }
> >>>
> >>> /* Send the addresses back to qemu */ @@ -1279,8 +1293,10 @@
> >>> vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
> >>> }
> >>>
> >>> /* Now userfault register and we can use the memory */
> >>> - for (i = 0; i < memory->nregions; i++) {
> >>> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> >>> reg = &dev->mem->regions[i];
> >>> + if (reg->host_user_addr == 0)
> >>> + continue;
> >>> if (vhost_user_postcopy_region_register(dev, reg) < 0)
> >>> return -1;
> >>> }
> >>> @@ -1385,6 +1401,46 @@ vhost_user_mmap_region(struct virtio_net
> *dev,
> >>> return 0;
> >>> }
> >>>
> >>> +static int
> >>> +vhost_user_initialize_memory(struct virtio_net **pdev)
> >>
> >> This function should be part of 3/5, else the 3/5 will compile fail
> >
> > I have moved support functions to patch-3. Please review v4.
> >
> >>
> >>> +{
> >>> + struct virtio_net *dev = *pdev;
> >>> + int numa_node = SOCKET_ID_ANY;
> >>> +
> >>> + /*
> >>> + * If VQ 0 has already been allocated, try to allocate on the same
> >>> + * NUMA node. It can be reallocated later in numa_realloc().
> >>> + */
> >>> + if (dev->nr_vring > 0)
> >>> + numa_node = dev->virtqueue[0]->numa_node;
> >>> +
> >>> + dev->nr_guest_pages = 0;
> >>> + if (dev->guest_pages == NULL) {
> >>> + dev->max_guest_pages = 8;
> >>
> >> It should be VHOST_MEMORY_MAX_NREGIONS
> >
> > Done. Please review v4.
> >
> >>
> >>> + dev->guest_pages = rte_zmalloc_socket(NULL,
> >>> + dev->max_guest_pages *
> >>> + sizeof(struct guest_page),
> >>> + RTE_CACHE_LINE_SIZE,
> >>> + numa_node);
> >>> + if (dev->guest_pages == NULL) {
> >>> + VHOST_CONFIG_LOG(dev->ifname, ERR,
> >>> + "failed to allocate memory for dev-
> >>> guest_pages");
> >>> + return -1;
> >>> + }
> >>> + }
> >>> +
> >>> + dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct
> >> rte_vhost_memory) +
> >>> + sizeof(struct rte_vhost_mem_region) *
> >> VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
> >>> + if (dev->mem == NULL) {
> >>> + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate
> >> memory for dev->mem");
> >>> + rte_free(dev->guest_pages);
> >>> + dev->guest_pages = NULL;
> >>> + return -1;
> >>> + }
> >>> +
> >>> + return 0;
> >>> +}
> >>> +
> >>> static int
> >>> vhost_user_set_mem_table(struct virtio_net **pdev,
> >>> struct vhu_msg_context *ctx,
> >
> >
@@ -228,7 +228,17 @@ async_dma_map(struct virtio_net *dev, bool do_map)
}
static void
-free_mem_region(struct virtio_net *dev)
+free_mem_region(struct rte_vhost_mem_region *reg)
+{
+ if (reg != NULL && reg->host_user_addr) {
+ munmap(reg->mmap_addr, reg->mmap_size);
+ close(reg->fd);
+ memset(reg, 0, sizeof(struct rte_vhost_mem_region));
+ }
+}
+
+static void
+free_all_mem_regions(struct virtio_net *dev)
{
uint32_t i;
struct rte_vhost_mem_region *reg;
@@ -239,12 +249,10 @@ free_mem_region(struct virtio_net *dev)
if (dev->async_copy && rte_vfio_is_enabled("vfio"))
async_dma_map(dev, false);
- for (i = 0; i < dev->mem->nregions; i++) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
reg = &dev->mem->regions[i];
- if (reg->host_user_addr) {
- munmap(reg->mmap_addr, reg->mmap_size);
- close(reg->fd);
- }
+ if (reg->mmap_addr)
+ free_mem_region(reg);
}
}
@@ -258,7 +266,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
vdpa_dev->ops->dev_cleanup(dev->vid);
if (dev->mem) {
- free_mem_region(dev);
+ free_all_mem_regions(dev);
rte_free(dev->mem);
dev->mem = NULL;
}
@@ -707,7 +715,7 @@ numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
vhost_devices[dev->vid] = dev;
mem_size = sizeof(struct rte_vhost_memory) +
- sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
+ sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS;
mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
if (!mem) {
VHOST_CONFIG_LOG(dev->ifname, ERR,
@@ -811,8 +819,10 @@ hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
uint32_t i;
uintptr_t hua = (uintptr_t)ptr;
- for (i = 0; i < mem->nregions; i++) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
r = &mem->regions[i];
+ if (r->host_user_addr == 0)
+ continue;
if (hua >= r->host_user_addr &&
hua < r->host_user_addr + r->size) {
return get_blk_size(r->fd);
@@ -1250,9 +1260,13 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
* retrieve the region offset when handling userfaults.
*/
memory = &ctx->msg.payload.memory;
- for (i = 0; i < memory->nregions; i++) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+ int reg_msg_index = 0;
reg = &dev->mem->regions[i];
- memory->regions[i].userspace_addr = reg->host_user_addr;
+ if (reg->host_user_addr == 0)
+ continue;
+ memory->regions[reg_msg_index].userspace_addr = reg->host_user_addr;
+ reg_msg_index++;
}
/* Send the addresses back to qemu */
@@ -1279,8 +1293,10 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
}
/* Now userfault register and we can use the memory */
- for (i = 0; i < memory->nregions; i++) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
reg = &dev->mem->regions[i];
+ if (reg->host_user_addr == 0)
+ continue;
if (vhost_user_postcopy_region_register(dev, reg) < 0)
return -1;
}
@@ -1385,6 +1401,46 @@ vhost_user_mmap_region(struct virtio_net *dev,
return 0;
}
+static int
+vhost_user_initialize_memory(struct virtio_net **pdev)
+{
+ struct virtio_net *dev = *pdev;
+ int numa_node = SOCKET_ID_ANY;
+
+ /*
+ * If VQ 0 has already been allocated, try to allocate on the same
+ * NUMA node. It can be reallocated later in numa_realloc().
+ */
+ if (dev->nr_vring > 0)
+ numa_node = dev->virtqueue[0]->numa_node;
+
+ dev->nr_guest_pages = 0;
+ if (dev->guest_pages == NULL) {
+ dev->max_guest_pages = 8;
+ dev->guest_pages = rte_zmalloc_socket(NULL,
+ dev->max_guest_pages *
+ sizeof(struct guest_page),
+ RTE_CACHE_LINE_SIZE,
+ numa_node);
+ if (dev->guest_pages == NULL) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR,
+ "failed to allocate memory for dev->guest_pages");
+ return -1;
+ }
+ }
+
+ dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
+ sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
+ if (dev->mem == NULL) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
+ rte_free(dev->guest_pages);
+ dev->guest_pages = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
static int
vhost_user_set_mem_table(struct virtio_net **pdev,
struct vhu_msg_context *ctx,