vhost: fix vring memory partially mapped

Message ID 20190906125020.23276-1-amorenoz@redhat.com (mailing list archive)
State Accepted, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: fix vring memory partially mapped |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-dpdk_compile_ovs success Compile Testing PASS
ci/iol-dpdk_compile_spdk success Compile Testing PASS
ci/iol-dpdk_compile success Compile Testing PASS
ci/intel-Performance success Performance Testing PASS
ci/mellanox-Performance success Performance Testing PASS

Commit Message

Adrian Moreno Sept. 6, 2019, 12:50 p.m. UTC
  Only the mapping of the vring addresses is being ensured. This causes
errors when the vring size is larger than the IOTLB page size. E.g:
queue sizes > 256 for 4K IOTLB pages

Ensure the entire vring memory range gets mapped. Refactor duplicated
code for for IOTLB UPDATE and IOTLB INVALIDATE and add packed virtqueue
support.

Fixes: 09927b524969 ("vhost: translate ring addresses when IOMMU enabled")
Cc: maxime.coquelin@redhat.com
Cc: stable@dpdk.org

Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
---
 lib/librte_vhost/vhost_user.c | 64 +++++++++++++++++++++--------------
 1 file changed, 39 insertions(+), 25 deletions(-)
  

Comments

Adrian Moreno Sept. 17, 2019, 7:03 p.m. UTC | #1
Adding relevant maintainers. Sorry for the slip

Adrian

On 9/6/19 2:50 PM, Adrian Moreno wrote:
> Only the mapping of the vring addresses is being ensured. This causes
> errors when the vring size is larger than the IOTLB page size. E.g:
> queue sizes > 256 for 4K IOTLB pages
> 
> Ensure the entire vring memory range gets mapped. Refactor duplicated
> code for for IOTLB UPDATE and IOTLB INVALIDATE and add packed virtqueue
> support.
> 
> Fixes: 09927b524969 ("vhost: translate ring addresses when IOMMU enabled")
> Cc: maxime.coquelin@redhat.com
> Cc: stable@dpdk.org
> 
> Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
> ---
>  lib/librte_vhost/vhost_user.c | 64 +++++++++++++++++++++--------------
>  1 file changed, 39 insertions(+), 25 deletions(-)
> 
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index 0b72648a5..168ad9e0f 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -558,11 +558,13 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  {
>  	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
>  		uint64_t vva;
> +		uint64_t req_size = *size;
>  
>  		vva = vhost_user_iotlb_cache_find(vq, ra,
>  					size, VHOST_ACCESS_RW);
> -		if (!vva)
> -			vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
> +		if (req_size != *size)
> +			vhost_user_iotlb_miss(dev, (ra + *size),
> +					      VHOST_ACCESS_RW);
>  
>  		return vva;
>  	}
> @@ -1570,54 +1572,66 @@ vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
>  }
>  
>  static int
> -is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
> +is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
>  {
>  	struct vhost_vring_addr *ra;
> -	uint64_t start, end;
> +	uint64_t start, end, len;
>  
>  	start = imsg->iova;
>  	end = start + imsg->size;
>  
>  	ra = &vq->ring_addrs;
> -	if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
> +	len = sizeof(struct vring_desc) * vq->size;
> +	if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
>  		return 1;
> -	if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
> +
> +	len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
> +	if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
>  		return 1;
> -	if (ra->used_user_addr >= start && ra->used_user_addr < end)
> +
> +	len = sizeof(struct vring_used) +
> +	       sizeof(struct vring_used_elem) * vq->size;
> +	if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
>  		return 1;
>  
>  	return 0;
>  }
>  
>  static int
> -is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
> -				struct vhost_iotlb_msg *imsg)
> +is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
>  {
> -	uint64_t istart, iend, vstart, vend;
> +	struct vhost_vring_addr *ra;
> +	uint64_t start, end, len;
>  
> -	istart = imsg->iova;
> -	iend = istart + imsg->size - 1;
> +	start = imsg->iova;
> +	end = start + imsg->size;
>  
> -	vstart = (uintptr_t)vq->desc;
> -	vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
> -	if (vstart <= iend && istart <= vend)
> +	ra = &vq->ring_addrs;
> +	len = sizeof(struct vring_packed_desc) * vq->size;
> +	if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
>  		return 1;
>  
> -	vstart = (uintptr_t)vq->avail;
> -	vend = vstart + sizeof(struct vring_avail);
> -	vend += sizeof(uint16_t) * vq->size - 1;
> -	if (vstart <= iend && istart <= vend)
> +	len = sizeof(struct vring_packed_desc_event);
> +	if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
>  		return 1;
>  
> -	vstart = (uintptr_t)vq->used;
> -	vend = vstart + sizeof(struct vring_used);
> -	vend += sizeof(struct vring_used_elem) * vq->size - 1;
> -	if (vstart <= iend && istart <= vend)
> +	len = sizeof(struct vring_packed_desc_event);
> +	if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
>  		return 1;
>  
>  	return 0;
>  }
>  
> +static int is_vring_iotlb(struct virtio_net *dev,
> +			  struct vhost_virtqueue *vq,
> +			  struct vhost_iotlb_msg *imsg)
> +{
> +	if (vq_is_packed(dev))
> +		return is_vring_iotlb_packed(vq, imsg);
> +	else
> +		return is_vring_iotlb_split(vq, imsg);
> +}
> +
>  static int
>  vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
>  			int main_fd __rte_unused)
> @@ -1640,7 +1654,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
>  			vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
>  					len, imsg->perm);
>  
> -			if (is_vring_iotlb_update(vq, imsg))
> +			if (is_vring_iotlb(dev, vq, imsg))
>  				*pdev = dev = translate_ring_addresses(dev, i);
>  		}
>  		break;
> @@ -1651,7 +1665,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
>  			vhost_user_iotlb_cache_remove(vq, imsg->iova,
>  					imsg->size);
>  
> -			if (is_vring_iotlb_invalidate(vq, imsg))
> +			if (is_vring_iotlb(dev, vq, imsg))
>  				vring_invalidate(dev, vq);
>  		}
>  		break;
>
  
Maxime Coquelin Sept. 24, 2019, 11:58 a.m. UTC | #2
On 9/6/19 2:50 PM, Adrian Moreno wrote:
> Only the mapping of the vring addresses is being ensured. This causes
> errors when the vring size is larger than the IOTLB page size. E.g:
> queue sizes > 256 for 4K IOTLB pages
> 
> Ensure the entire vring memory range gets mapped. Refactor duplicated
> code for for IOTLB UPDATE and IOTLB INVALIDATE and add packed virtqueue
> support.
> 
> Fixes: 09927b524969 ("vhost: translate ring addresses when IOMMU enabled")
> Cc: maxime.coquelin@redhat.com
> Cc: stable@dpdk.org
> 
> Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
> ---
>  lib/librte_vhost/vhost_user.c | 64 +++++++++++++++++++++--------------
>  1 file changed, 39 insertions(+), 25 deletions(-)
> 


Thanks for the fix Adrian, it looks good to me:

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Maxime
  
Maxime Coquelin Sept. 27, 2019, 9:58 a.m. UTC | #3
On 9/17/19 9:03 PM, Adrian Moreno wrote:
> On 9/6/19 2:50 PM, Adrian Moreno wrote:
>> Only the mapping of the vring addresses is being ensured. This causes
>> errors when the vring size is larger than the IOTLB page size. E.g:
>> queue sizes > 256 for 4K IOTLB pages
>>
>> Ensure the entire vring memory range gets mapped. Refactor duplicated
>> code for for IOTLB UPDATE and IOTLB INVALIDATE and add packed virtqueue
>> support.
>>
>> Fixes: 09927b524969 ("vhost: translate ring addresses when IOMMU enabled")
>> Cc: maxime.coquelin@redhat.com
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
>> ---
>>  lib/librte_vhost/vhost_user.c | 64 +++++++++++++++++++++--------------
>>  1 file changed, 39 insertions(+), 25 deletions(-)

Applied to dpdk-next-virtio/master.

Thanks,
Maxime
  

Patch

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 0b72648a5..168ad9e0f 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -558,11 +558,13 @@  ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
 	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
 		uint64_t vva;
+		uint64_t req_size = *size;
 
 		vva = vhost_user_iotlb_cache_find(vq, ra,
 					size, VHOST_ACCESS_RW);
-		if (!vva)
-			vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
+		if (req_size != *size)
+			vhost_user_iotlb_miss(dev, (ra + *size),
+					      VHOST_ACCESS_RW);
 
 		return vva;
 	}
@@ -1570,54 +1572,66 @@  vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
 }
 
 static int
-is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
+is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
 {
 	struct vhost_vring_addr *ra;
-	uint64_t start, end;
+	uint64_t start, end, len;
 
 	start = imsg->iova;
 	end = start + imsg->size;
 
 	ra = &vq->ring_addrs;
-	if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
+	len = sizeof(struct vring_desc) * vq->size;
+	if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
 		return 1;
-	if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
+
+	len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+	if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
 		return 1;
-	if (ra->used_user_addr >= start && ra->used_user_addr < end)
+
+	len = sizeof(struct vring_used) +
+	       sizeof(struct vring_used_elem) * vq->size;
+	if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
 		return 1;
 
 	return 0;
 }
 
 static int
-is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
-				struct vhost_iotlb_msg *imsg)
+is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
 {
-	uint64_t istart, iend, vstart, vend;
+	struct vhost_vring_addr *ra;
+	uint64_t start, end, len;
 
-	istart = imsg->iova;
-	iend = istart + imsg->size - 1;
+	start = imsg->iova;
+	end = start + imsg->size;
 
-	vstart = (uintptr_t)vq->desc;
-	vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
-	if (vstart <= iend && istart <= vend)
+	ra = &vq->ring_addrs;
+	len = sizeof(struct vring_packed_desc) * vq->size;
+	if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
 		return 1;
 
-	vstart = (uintptr_t)vq->avail;
-	vend = vstart + sizeof(struct vring_avail);
-	vend += sizeof(uint16_t) * vq->size - 1;
-	if (vstart <= iend && istart <= vend)
+	len = sizeof(struct vring_packed_desc_event);
+	if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
 		return 1;
 
-	vstart = (uintptr_t)vq->used;
-	vend = vstart + sizeof(struct vring_used);
-	vend += sizeof(struct vring_used_elem) * vq->size - 1;
-	if (vstart <= iend && istart <= vend)
+	len = sizeof(struct vring_packed_desc_event);
+	if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
 		return 1;
 
 	return 0;
 }
 
+static int is_vring_iotlb(struct virtio_net *dev,
+			  struct vhost_virtqueue *vq,
+			  struct vhost_iotlb_msg *imsg)
+{
+	if (vq_is_packed(dev))
+		return is_vring_iotlb_packed(vq, imsg);
+	else
+		return is_vring_iotlb_split(vq, imsg);
+}
+
 static int
 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			int main_fd __rte_unused)
@@ -1640,7 +1654,7 @@  vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
 					len, imsg->perm);
 
-			if (is_vring_iotlb_update(vq, imsg))
+			if (is_vring_iotlb(dev, vq, imsg))
 				*pdev = dev = translate_ring_addresses(dev, i);
 		}
 		break;
@@ -1651,7 +1665,7 @@  vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			vhost_user_iotlb_cache_remove(vq, imsg->iova,
 					imsg->size);
 
-			if (is_vring_iotlb_invalidate(vq, imsg))
+			if (is_vring_iotlb(dev, vq, imsg))
 				vring_invalidate(dev, vq);
 		}
 		break;