vhost: flush IOTLB cache on new mem table handling
Checks
Commit Message
IOTLB entries contain the host virtual address of the guest
pages. When receiving a new VHOST_USER_SET_MEM_TABLE request,
the previous regions get unmapped, so the IOTLB entries, if any,
will be invalid. It does cause the vhost-user process to
segfault.
This patch introduces a new function to flush the IOTLB cache,
and call it as soon as the backend handles a VHOST_USER_SET_MEM
request.
Fixes: 69c90e98f483 ("vhost: enable IOMMU support")
Cc: stable@dpdk.org
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/librte_vhost/iotlb.c | 10 ++++++++--
lib/librte_vhost/iotlb.h | 2 +-
lib/librte_vhost/vhost_user.c | 11 +++++++++++
3 files changed, 20 insertions(+), 3 deletions(-)
Comments
On Thu, 2 Aug 2018 18:37:27 +0200
Maxime Coquelin <maxime.coquelin@redhat.com> wrote:
>
> +void
> +vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)
> +{
> + vhost_user_iotlb_cache_remove_all(vq);
> + vhost_user_iotlb_pending_remove_all(vq);
> +}
> +
Wrong indentation, you don't want two tabs here, only one.
On 08/02/2018 07:09 PM, Stephen Hemminger wrote:
> On Thu, 2 Aug 2018 18:37:27 +0200
> Maxime Coquelin <maxime.coquelin@redhat.com> wrote:
>
>>
>> +void
>> +vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)
>> +{
>> + vhost_user_iotlb_cache_remove_all(vq);
>> + vhost_user_iotlb_pending_remove_all(vq);
>> +}
>> +
>
> Wrong indentation, you don't want two tabs here, only one.
>
Right, thanks for pointing it out.
I had sent the wrong version anyway, v2 is on its way.
Thanks,
Maxime
@@ -303,6 +303,13 @@ vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
return vva;
}
+void
+vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)
+{
+ vhost_user_iotlb_cache_remove_all(vq);
+ vhost_user_iotlb_pending_remove_all(vq);
+}
+
int
vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
{
@@ -315,8 +322,7 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
* The cache has already been initialized,
* just drop all cached and pending entries.
*/
- vhost_user_iotlb_cache_remove_all(vq);
- vhost_user_iotlb_pending_remove_all(vq);
+ vhost_user_iotlb_flush_all(vq);
}
#ifdef RTE_LIBRTE_VHOST_NUMA
@@ -73,7 +73,7 @@ void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova,
uint8_t perm);
void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,
uint64_t size, uint8_t perm);
-
+void vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq);
int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);
#endif /* _VHOST_IOTLB_H_ */
@@ -813,6 +813,17 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
dev->mem = NULL;
}
+ /* Flush IOTLB cache as previous HVAs are now invalid */
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ vhost_user_iotlb_wr_lock(vq);
+ vhost_user_iotlb_flush_all(vq);
+ vhost_user_iotlb_wr_unlock(vq);
+ }
+ }
+
dev->nr_guest_pages = 0;
if (!dev->guest_pages) {
dev->max_guest_pages = 8;