[v16,3/6] memarea: support alloc and free API

Message ID 20230710064923.19849-4-fengchengwen@huawei.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series introduce memarea library |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

fengchengwen July 10, 2023, 6:49 a.m. UTC
  This patch supports rte_memarea_alloc() and rte_memarea_free() API.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Dongdong Liu <liudongdong3@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 doc/guides/prog_guide/memarea_lib.rst |   6 +
 lib/memarea/memarea_private.h         |  10 ++
 lib/memarea/rte_memarea.c             | 164 ++++++++++++++++++++++++++
 lib/memarea/rte_memarea.h             |  44 +++++++
 lib/memarea/version.map               |   2 +
 5 files changed, 226 insertions(+)
  

Comments

Anatoly Burakov July 17, 2023, 1:33 p.m. UTC | #1
On 7/10/2023 7:49 AM, Chengwen Feng wrote:
> This patch supports rte_memarea_alloc() and rte_memarea_free() API.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Reviewed-by: Dongdong Liu <liudongdong3@huawei.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---

Hi,

Kind of a general question: the allocation code doesn't seem to take 
into account that since we're potentially reusing memory between 
alloc/free calls, the memory may/will be dirty, and any subsequent 
alloc/free call may require memset(0) from the user. This is the kind of 
thing C programmers are familiar with already, so it's not a big deal, 
I'm just wondering what your intention was with this API: do you think 
it should be user's responsibility to erase memory before using it, or 
can we make it part of API guarantees? If it's the latter, then it would 
be good to have some sort of note in documentation explicitly mentioning 
that the memory returned is not guaranteed to be zeroed out?

> +static inline void
> +memarea_unlock(struct rte_memarea *ma)
> +{
> +	if (ma->init.mt_safe)
> +		rte_spinlock_unlock(&ma->lock);
> +}
> +
> +/**
> + * Check cookie or panic.
> + *
> + * @param status
> + *   - 0: object is supposed to be available.
> + *   - 1: object is supposed to be allocated.
> + *   - 2: just check that cookie is valid (available or allocated).
> + */

Same as with previous patches: I feel like it would've been better if 
status were #define'd somewhere, so that we don't use explicit numbers 
in code.

Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
  

Patch

diff --git a/doc/guides/prog_guide/memarea_lib.rst b/doc/guides/prog_guide/memarea_lib.rst
index bf19090294..157baf3c7e 100644
--- a/doc/guides/prog_guide/memarea_lib.rst
+++ b/doc/guides/prog_guide/memarea_lib.rst
@@ -33,6 +33,12 @@  returns the pointer to the created memarea or ``NULL`` if the creation failed.
 
 The ``rte_memarea_destroy()`` function is used to destroy a memarea.
 
+The ``rte_memarea_alloc()`` function is used to alloc one memory object from
+the memarea.
+
+The ``rte_memarea_free()`` function is used to free one memory object which
+allocated by ``rte_memarea_alloc()``.
+
 Debug Mode
 ----------
 
diff --git a/lib/memarea/memarea_private.h b/lib/memarea/memarea_private.h
index 384c6dde9d..cef7d0f859 100644
--- a/lib/memarea/memarea_private.h
+++ b/lib/memarea/memarea_private.h
@@ -22,10 +22,20 @@ 
 #define MEMAREA_OBJECT_GET_SIZE(hdr) \
 		((uintptr_t)TAILQ_NEXT((hdr), obj_next) - (uintptr_t)(hdr) - \
 		 sizeof(struct memarea_objhdr) - sizeof(struct memarea_objtlr))
+#define MEMAREA_SPLIT_OBJECT_MIN_SIZE \
+		(sizeof(struct memarea_objhdr) + MEMAREA_OBJECT_SIZE_ALIGN + \
+		 sizeof(struct memarea_objtlr))
+#define MEMAREA_SPLIT_OBJECT_GET_HEADER(hdr, alloc_sz) \
+		RTE_PTR_ADD(hdr, sizeof(struct memarea_objhdr) + alloc_sz + \
+			    sizeof(struct memarea_objtlr))
 #else
 #define MEMAREA_OBJECT_GET_SIZE(hdr) \
 		((uintptr_t)TAILQ_NEXT((hdr), obj_next) - (uintptr_t)(hdr) - \
 		 sizeof(struct memarea_objhdr))
+#define MEMAREA_SPLIT_OBJECT_MIN_SIZE \
+		(sizeof(struct memarea_objhdr) + MEMAREA_OBJECT_SIZE_ALIGN)
+#define MEMAREA_SPLIT_OBJECT_GET_HEADER(hdr, alloc_sz) \
+		RTE_PTR_ADD(hdr, sizeof(struct memarea_objhdr) + alloc_sz)
 #endif
 
 struct memarea_objhdr {
diff --git a/lib/memarea/rte_memarea.c b/lib/memarea/rte_memarea.c
index 69ffeac4e4..d941e64a1e 100644
--- a/lib/memarea/rte_memarea.c
+++ b/lib/memarea/rte_memarea.c
@@ -2,8 +2,10 @@ 
  * Copyright(c) 2023 HiSilicon Limited
  */
 
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <sys/queue.h>
 
 #include <rte_common.h>
 #include <rte_errno.h>
@@ -94,6 +96,8 @@  memarea_alloc_area(const struct rte_memarea_param *init)
 					init->heap.socket_id);
 	else if (init->source == RTE_MEMAREA_SOURCE_LIBC)
 		ptr = memarea_alloc_from_libc(init->total_sz);
+	else if (init->source == RTE_MEMAREA_SOURCE_MEMAREA)
+		ptr = rte_memarea_alloc(init->ma.src, init->total_sz);
 
 	return ptr;
 }
@@ -105,6 +109,8 @@  memarea_free_area(const struct rte_memarea_param *init, void *ptr)
 		rte_free(ptr);
 	else if (init->source == RTE_MEMAREA_SOURCE_LIBC)
 		free(ptr);
+	else if (init->source == RTE_MEMAREA_SOURCE_MEMAREA)
+		rte_memarea_free(init->ma.src, ptr);
 }
 
 /**
@@ -219,3 +225,161 @@  rte_memarea_destroy(struct rte_memarea *ma)
 	memarea_free_area(&ma->init, ma->area_base);
 	rte_free(ma);
 }
+
+static inline void
+memarea_lock(struct rte_memarea *ma)
+{
+	if (ma->init.mt_safe)
+		rte_spinlock_lock(&ma->lock);
+}
+
+static inline void
+memarea_unlock(struct rte_memarea *ma)
+{
+	if (ma->init.mt_safe)
+		rte_spinlock_unlock(&ma->lock);
+}
+
+/**
+ * Check cookie or panic.
+ *
+ * @param status
+ *   - 0: object is supposed to be available.
+ *   - 1: object is supposed to be allocated.
+ *   - 2: just check that cookie is valid (available or allocated).
+ */
+static inline void
+memarea_check_cookie(const struct rte_memarea *ma, const struct memarea_objhdr *hdr, int status)
+{
+#ifdef RTE_LIBRTE_MEMAREA_DEBUG
+	static const char *const str[] = { "PASS", "FAILED" };
+	struct memarea_objtlr *tlr;
+	bool hdr_fail, tlr_fail;
+
+	if (hdr == ma->guard_hdr)
+		return;
+
+	tlr = RTE_PTR_SUB(TAILQ_NEXT(hdr, obj_next), sizeof(struct memarea_objtlr));
+	hdr_fail = (status == 0 && hdr->cookie != MEMAREA_OBJECT_HEADER_AVAILABLE_COOKIE) ||
+		   (status == 1 && hdr->cookie != MEMAREA_OBJECT_HEADER_ALLOCATED_COOKIE) ||
+		   (status == 2 && (hdr->cookie != MEMAREA_OBJECT_HEADER_AVAILABLE_COOKIE &&
+				    hdr->cookie != MEMAREA_OBJECT_HEADER_ALLOCATED_COOKIE));
+	tlr_fail = (tlr->cookie != MEMAREA_OBJECT_TRAILER_COOKIE);
+	if (!hdr_fail && !tlr_fail)
+		return;
+
+	rte_panic("MEMAREA: %s check cookies failed! addr-%p header-cookie<0x%" PRIx64 " %s> trailer-cookie<0x%" PRIx64 " %s>\n",
+		ma->init.name, RTE_PTR_ADD(hdr, sizeof(struct memarea_objhdr)),
+		hdr->cookie, str[hdr_fail], tlr->cookie, str[tlr_fail]);
+#else
+	RTE_SET_USED(ma);
+	RTE_SET_USED(hdr);
+	RTE_SET_USED(status);
+#endif
+}
+
+static inline void
+memarea_split_object(struct rte_memarea *ma, struct memarea_objhdr *hdr, size_t alloc_sz)
+{
+	struct memarea_objhdr *split_hdr;
+
+	split_hdr = MEMAREA_SPLIT_OBJECT_GET_HEADER(hdr, alloc_sz);
+	memarea_set_cookie(split_hdr, 2);
+	TAILQ_INSERT_AFTER(&ma->obj_list, hdr, split_hdr, obj_next);
+	TAILQ_INSERT_AFTER(&ma->avail_list, hdr, split_hdr, avail_next);
+}
+
+void *
+rte_memarea_alloc(struct rte_memarea *ma, size_t size)
+{
+	size_t align_sz = RTE_ALIGN(size, MEMAREA_OBJECT_SIZE_ALIGN);
+	struct memarea_objhdr *hdr;
+	size_t avail_sz;
+	void *ptr = NULL;
+
+	if (ma == NULL || size == 0 || align_sz < size) {
+		rte_errno = EINVAL;
+		return ptr;
+	}
+
+	memarea_lock(ma);
+
+	/** traverse every available object, return the first satisfied one. */
+	TAILQ_FOREACH(hdr, &ma->avail_list, avail_next) {
+		/** 1st: check whether the object size meets. */
+		memarea_check_cookie(ma, hdr, 0);
+		avail_sz = MEMAREA_OBJECT_GET_SIZE(hdr);
+		if (avail_sz < align_sz)
+			continue;
+
+		/** 2nd: if the object size is too long, a new object can be split. */
+		if (avail_sz - align_sz > MEMAREA_SPLIT_OBJECT_MIN_SIZE)
+			memarea_split_object(ma, hdr, align_sz);
+
+		/** 3rd: allocate successful. */
+		TAILQ_REMOVE(&ma->avail_list, hdr, avail_next);
+		MEMAREA_OBJECT_MARK_ALLOCATED(hdr);
+		memarea_set_cookie(hdr, 1);
+
+		ptr = RTE_PTR_ADD(hdr, sizeof(struct memarea_objhdr));
+		break;
+	}
+
+	memarea_unlock(ma);
+
+	if (ptr == NULL)
+		rte_errno = ENOMEM;
+	return ptr;
+}
+
+static inline void
+memarea_merge_object(struct rte_memarea *ma, struct memarea_objhdr *curr,
+		   struct memarea_objhdr *next)
+{
+	RTE_SET_USED(curr);
+	TAILQ_REMOVE(&ma->obj_list, next, obj_next);
+	TAILQ_REMOVE(&ma->avail_list, next, avail_next);
+	memarea_set_cookie(next, 4);
+}
+
+void
+rte_memarea_free(struct rte_memarea *ma, void *ptr)
+{
+	struct memarea_objhdr *hdr, *prev, *next;
+
+	if (ma == NULL || ptr == NULL) {
+		rte_errno = EINVAL;
+		return;
+	}
+
+	hdr = RTE_PTR_SUB(ptr, sizeof(struct memarea_objhdr));
+	if (!MEMAREA_OBJECT_IS_ALLOCATED(hdr)) {
+		RTE_MEMAREA_LOG(ERR, "detect invalid object in %s!", ma->init.name);
+		rte_errno = EFAULT;
+		return;
+	}
+	memarea_check_cookie(ma, hdr, 1);
+
+	memarea_lock(ma);
+
+	/** 1st: add to avail list. */
+	TAILQ_INSERT_HEAD(&ma->avail_list, hdr, avail_next);
+	memarea_set_cookie(hdr, 0);
+
+	/** 2nd: merge if previous object is avail. */
+	prev = TAILQ_PREV(hdr, memarea_objhdr_list, obj_next);
+	if (prev != NULL && !MEMAREA_OBJECT_IS_ALLOCATED(prev)) {
+		memarea_check_cookie(ma, prev, 0);
+		memarea_merge_object(ma, prev, hdr);
+		hdr = prev;
+	}
+
+	/** 3rd: merge if next object is avail. */
+	next = TAILQ_NEXT(hdr, obj_next);
+	if (next != NULL && !MEMAREA_OBJECT_IS_ALLOCATED(next)) {
+		memarea_check_cookie(ma, next, 0);
+		memarea_merge_object(ma, hdr, next);
+	}
+
+	memarea_unlock(ma);
+}
diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h
index 1d4381efd7..f771fcaf68 100644
--- a/lib/memarea/rte_memarea.h
+++ b/lib/memarea/rte_memarea.h
@@ -134,6 +134,50 @@  struct rte_memarea *rte_memarea_create(const struct rte_memarea_param *init);
 __rte_experimental
 void rte_memarea_destroy(struct rte_memarea *ma);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate memory from memarea.
+ *
+ * Allocate one memory object from the memarea.
+ *
+ * @param ma
+ *   The pointer of memarea.
+ * @param size
+ *   The memory size to be allocated.
+ *
+ * @return
+ *   - NULL on error. Not enough memory, or invalid arguments (see the
+ *     rte_errno).
+ *   - Otherwise, the pointer to the allocated object.
+ */
+__rte_experimental
+void *rte_memarea_alloc(struct rte_memarea *ma, size_t size);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Free memory to memarea.
+ *
+ * Free one memory object to the memarea.
+ * @note The memory object must have been returned by a previous call to
+ * rte_memarea_alloc(), it must be freed to the same memarea which previous
+ * allocated from. The behaviour of rte_memarea_free() is undefined if the
+ * memarea or pointer does not match these requirements.
+ *
+ * @param ma
+ *   The pointer of memarea. If the ma is NULL, the function does nothing.
+ * @param ptr
+ *   The pointer of memory object which need be freed. If the pointer is NULL,
+ *   the function does nothing.
+ *
+ * @note The rte_errno is set if free failed.
+ */
+__rte_experimental
+void rte_memarea_free(struct rte_memarea *ma, void *ptr);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/memarea/version.map b/lib/memarea/version.map
index f36a04d7cf..effbd0b488 100644
--- a/lib/memarea/version.map
+++ b/lib/memarea/version.map
@@ -1,8 +1,10 @@ 
 EXPERIMENTAL {
 	global:
 
+	rte_memarea_alloc;
 	rte_memarea_create;
 	rte_memarea_destroy;
+	rte_memarea_free;
 
 	local: *;
 };