vfio: check iova if already mapped before do map
Checks
Commit Message
From: Lipei Liang <lianglipei@huawei.com>
If we map two continuous memory area A and B, current implementation
will merge these two segments into one, as area C. But, if area A and
B are mapped again, after sort, there while be A, C, B in mem maps,
as A and B divided by C, these segs couldn't be merged. In other words,
if segments A and B that are adjacent, are mapped twice, there while be
two map entries Corresponding to A or B.
So when we partially unmap adjacent area A and B, entry C will be Residual
within mem maps. Then map an other memory area D which size is different
with A, but within area C, the area C while be mistakenly found by
find_user_mem_maps when unmapping area D. As area D and area C are of
different chunk size, this resulted in failed to unmap area D.
Fix this by check iova if already mapped before do dma map, if iova is
absolutely within mem maps, return whithout vfio map, while iova is
overlapping with entry in mem maps, return -EEXISTS.
Fixes: 56259f7fc010 ("vfio: allow partially unmapping adjacent memory")
Cc: stable@dpdk.org
Signed-off-by: Lipei Liang <lianglipei@huawei.com>
---
lib/eal/linux/eal_vfio.c | 52 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 50 insertions(+), 2 deletions(-)
@@ -216,6 +216,39 @@ copy_maps(struct user_mem_maps *user_mem_maps, struct user_mem_map *add_maps,
}
}
+/* *
+ * check if iova area is already mapped or overlaps with existing mapped,
+ * @return
+ * 0 if iova area is not exist
+ * 1 if iova area is already mapped
+ * -1 if overlaps between iova area and existing mapped
+ */
+static int
+check_iova_in_map(struct user_mem_maps *user_mem_maps, uint64_t iova, uint64_t len)
+{
+ int i;
+ uint64_t iova_end = iova + len;
+ uint64_t map_iova_end;
+ uint64_t map_iova_off;
+ uint64_t map_chunk;
+
+ for (i = 0; i < user_mem_maps->n_maps; i++) {
+ map_iova_off = iova - user_mem_maps->maps[i].iova;
+ map_iova_end = user_mem_maps->maps[i].iova + user_mem_maps->maps[i].len;
+ map_chunk = user_mem_maps->maps[i].chunk;
+
+ if ((user_mem_maps->maps[i].iova >= iova_end) || (iova >= map_iova_end))
+ continue;
+
+ if ((user_mem_maps->maps[i].iova <= iova) && (iova_end <= map_iova_end) &&
+ (len == map_chunk) && ((map_iova_off % map_chunk) == 0))
+ return 1;
+
+ return -1;
+ }
+ return 0;
+}
+
/* try merging two maps into one, return 1 if succeeded */
static int
merge_map(struct user_mem_map *left, struct user_mem_map *right)
@@ -1873,6 +1906,7 @@ container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
struct user_mem_maps *user_mem_maps;
bool has_partial_unmap;
int ret = 0;
+ int iova_check = 0;
user_mem_maps = &vfio_cfg->mem_maps;
rte_spinlock_recursive_lock(&user_mem_maps->lock);
@@ -1882,6 +1916,22 @@ container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
ret = -1;
goto out;
}
+
+ /* do we have partial unmap support? */
+ has_partial_unmap = vfio_cfg->vfio_iommu_type->partial_unmap;
+ /* check if we can map this region */
+ if (!has_partial_unmap) {
+ iova_check = check_iova_in_map(user_mem_maps, iova, len);
+ if (iova_check == 1) {
+ goto out;
+ } else if (iova_check < 0) {
+ EAL_LOG(ERR, "Overlapping DMA regions not allowed");
+ rte_errno = ENOTSUP;
+ ret = -1;
+ goto out;
+ }
+ }
+
/* map the entry */
if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 1)) {
/* technically, this will fail if there are currently no devices
@@ -1895,8 +1945,6 @@ container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
ret = -1;
goto out;
}
- /* do we have partial unmap support? */
- has_partial_unmap = vfio_cfg->vfio_iommu_type->partial_unmap;
/* create new user mem map entry */
new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];