[v3] mem: allow using ASan in multi-process mode

Message ID 20231025092717.2069-1-artur.paszkiewicz@intel.com (mailing list archive)
State New
Delegated to: David Marchand
Headers
Series [v3] mem: allow using ASan in multi-process mode |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/intel-Functional success Functional PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS

Commit Message

Artur Paszkiewicz Oct. 25, 2023, 9:27 a.m. UTC
  Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for memseg lists as shared memory.
The primary process is responsible for creating and removing the shared
memory objects.

Disable ASan instrumentation for triggering the page fault in
alloc_seg() because if the segment is already allocated by another
process and is marked as free in the shadow, accessing this address will
cause an ASan error.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
---
v3:
- Removed conditional compilation from eal_common_memory.c.
- Improved comments.
v2:
- Added checks for config options disabling multi-process support.
- Fixed missing unmap in legacy mode.

 lib/eal/common/eal_common_memory.c |   7 ++
 lib/eal/common/eal_private.h       |  35 ++++++++++
 lib/eal/linux/eal_memalloc.c       |  23 +++++--
 lib/eal/linux/eal_memory.c         | 101 +++++++++++++++++++++++++++++
 lib/eal/linux/meson.build          |   4 ++
 5 files changed, 164 insertions(+), 6 deletions(-)
  

Patch

diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index d9433db623..5daf53d4d2 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,11 @@  eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
 	RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
 			addr, mem_sz);
 
+	if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+		RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg list");
+		return -1;
+	}
+
 	return 0;
 }
 
@@ -1050,6 +1055,8 @@  rte_eal_memory_detach(void)
 				RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
 						rte_strerror(rte_errno));
 
+		eal_memseg_list_unmap_asan_shadow(msl);
+
 		/*
 		 * we are detaching the fbarray rather than destroying because
 		 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 5eadba4902..6535b38637 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -300,6 +300,41 @@  eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline int
+eal_memseg_list_map_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+	return 0;
+}
+#endif
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+#ifdef RTE_MALLOC_ASAN
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline void
+eal_memseg_list_unmap_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+}
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index f8b1588cae..a4151534a8 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -511,6 +511,21 @@  resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow,
 			grow, dirty);
 }
 
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+	/* We need to trigger a write to the page to enforce page fault but we
+	 * can't overwrite value that is already there, so read the old value
+	 * and write it back. Kernel populates the page with zeroes initially.
+	 *
+	 * Disable ASan instrumentation here because if the segment is already
+	 * allocated by another process and is marked as free in the shadow,
+	 * accessing this address will cause an ASan error.
+	 */
+	*(volatile int *)addr = *(volatile int *)addr;
+}
+
 static int
 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		struct hugepage_info *hi, unsigned int list_idx,
@@ -636,12 +651,8 @@  alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		goto mapped;
 	}
 
-	/* we need to trigger a write to the page to enforce page fault and
-	 * ensure that page is accessible to us, but we can't overwrite value
-	 * that is already there, so read the old value, and write itback.
-	 * kernel populates the page with zeroes initially.
-	 */
-	*(volatile int *)addr = *(volatile int *)addr;
+	/* enforce page fault and ensure that page is accessible to us */
+	page_fault(addr);
 
 	iova = rte_mem_virt2iova(addr);
 	if (iova == RTE_BAD_PHYS_ADDR) {
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 9b6f08fba8..102a57fd23 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -41,6 +41,7 @@ 
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_options.h"
+#include "malloc_elem.h"
 
 #define PFN_MASK_SIZE	8
 
@@ -1469,6 +1470,7 @@  eal_legacy_hugepage_init(void)
 		if (msl->memseg_arr.count > 0)
 			continue;
 		/* this is an unused list, deallocate it */
+		eal_memseg_list_unmap_asan_shadow(msl);
 		mem_sz = msl->len;
 		munmap(msl->base_va, mem_sz);
 		msl->base_va = NULL;
@@ -1956,3 +1958,102 @@  rte_eal_memseg_init(void)
 #endif
 			memseg_secondary_init();
 }
+
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+	void *addr;
+	void *shadow_addr;
+	size_t shadow_sz;
+	int shm_oflag;
+	char shm_path[PATH_MAX];
+	int shm_fd;
+	int ret = 0;
+
+	if (!msl->heap)
+		return 0;
+
+	/* these options imply no secondary process support */
+	if (internal_conf->hugepage_file.unlink_before_mapping ||
+	    internal_conf->no_shconf || internal_conf->no_hugetlbfs) {
+		RTE_ASSERT(rte_eal_process_type() != RTE_PROC_SECONDARY);
+		return 0;
+	}
+
+	shadow_addr = ASAN_MEM_TO_SHADOW(msl->base_va);
+	shadow_sz = msl->len >> ASAN_SHADOW_SCALE;
+
+	snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+		eal_get_hugefile_prefix(), msl->memseg_arr.name);
+
+	shm_oflag = O_RDWR;
+	if (internal_conf->process_type == RTE_PROC_PRIMARY)
+		shm_oflag |= O_CREAT | O_TRUNC;
+
+	shm_fd = shm_open(shm_path, shm_oflag, 0600);
+	if (shm_fd == -1) {
+		RTE_LOG(DEBUG, EAL, "shadow shm_open() failed: %s\n",
+			strerror(errno));
+		return -1;
+	}
+
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		ret = ftruncate(shm_fd, shadow_sz);
+		if (ret == -1) {
+			RTE_LOG(DEBUG, EAL, "shadow ftruncate() failed: %s\n",
+				strerror(errno));
+			goto out;
+		}
+	}
+
+	addr = mmap(shadow_addr, shadow_sz, PROT_READ | PROT_WRITE,
+		    MAP_SHARED | MAP_FIXED, shm_fd, 0);
+	if (addr == MAP_FAILED) {
+		RTE_LOG(DEBUG, EAL, "shadow mmap() failed: %s\n",
+			strerror(errno));
+		ret = -1;
+		goto out;
+	}
+
+	if (addr != shadow_addr) {
+		RTE_LOG(DEBUG, EAL, "wrong shadow mmap() address\n");
+		munmap(addr, shadow_sz);
+		ret = -1;
+	}
+out:
+	close(shm_fd);
+	if (ret != 0) {
+		if (internal_conf->process_type == RTE_PROC_PRIMARY)
+			shm_unlink(shm_path);
+	}
+
+	return ret;
+}
+
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+
+	if (!msl->heap || internal_conf->hugepage_file.unlink_before_mapping ||
+	    internal_conf->no_shconf || internal_conf->no_hugetlbfs)
+		return;
+
+	if (munmap(ASAN_MEM_TO_SHADOW(msl->base_va),
+		   msl->len >> ASAN_SHADOW_SCALE) != 0)
+		RTE_LOG(ERR, EAL, "Could not unmap asan shadow memory: %s\n",
+			strerror(errno));
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		char shm_path[PATH_MAX];
+
+		snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+			 eal_get_hugefile_prefix(),
+			 msl->memseg_arr.name);
+		shm_unlink(shm_path);
+	}
+}
+#endif
diff --git a/lib/eal/linux/meson.build b/lib/eal/linux/meson.build
index e99ebed256..1e8a48c8d3 100644
--- a/lib/eal/linux/meson.build
+++ b/lib/eal/linux/meson.build
@@ -23,3 +23,7 @@  deps += ['kvargs', 'telemetry']
 if has_libnuma
     dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
 endif
+
+if dpdk_conf.has('RTE_MALLOC_ASAN')
+    ext_deps += cc.find_library('rt')
+endif