[v8,3/9] memarea: support alloc/free/update-refcnt API

Message ID 20221011121720.2657-4-fengchengwen@huawei.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series introduce memarea library |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

fengchengwen Oct. 11, 2022, 12:17 p.m. UTC
  This patch supports rte_memarea_alloc()/rte_memarea_free()/
rte_memarea_update_refcnt() API.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/prog_guide/memarea_lib.rst |  10 ++
 lib/memarea/memarea_private.h         |   3 +
 lib/memarea/rte_memarea.c             | 155 ++++++++++++++++++++++++++
 lib/memarea/rte_memarea.h             |  56 ++++++++++
 lib/memarea/version.map               |   3 +
 5 files changed, 227 insertions(+)
  

Comments

Dmitry Kozlyuk Oct. 11, 2022, 3:58 p.m. UTC | #1
2022-10-11 12:17 (UTC+0000), Chengwen Feng:
> This patch supports rte_memarea_alloc()/rte_memarea_free()/
> rte_memarea_update_refcnt() API.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
>  doc/guides/prog_guide/memarea_lib.rst |  10 ++
>  lib/memarea/memarea_private.h         |   3 +
>  lib/memarea/rte_memarea.c             | 155 ++++++++++++++++++++++++++
>  lib/memarea/rte_memarea.h             |  56 ++++++++++
>  lib/memarea/version.map               |   3 +
>  5 files changed, 227 insertions(+)
> 
> diff --git a/doc/guides/prog_guide/memarea_lib.rst b/doc/guides/prog_guide/memarea_lib.rst
> index 85ad57145f..a9c58dc44d 100644
> --- a/doc/guides/prog_guide/memarea_lib.rst
> +++ b/doc/guides/prog_guide/memarea_lib.rst
> @@ -33,6 +33,16 @@ returns the pointer to the created memarea or ``NULL`` if the creation failed.
>  
>  The ``rte_memarea_destroy()`` function is used to destroy a memarea.
>  
> +The ``rte_memarea_alloc()`` function is used to alloc one memory object from
> +the memarea.
> +
> +The ``rte_memarea_free()`` function is used to free one memory object which
> +allocated by ``rte_memarea_alloc()``.
> +
> +The ``rte_memarea_update_refcnt()`` function is used to update the memory
> +object's reference count, if the count reaches zero, the memory object will
> +be freed to memarea.
> +
>  Reference
>  ---------
>  
> diff --git a/lib/memarea/memarea_private.h b/lib/memarea/memarea_private.h
> index 59be9c1d00..f5accf2987 100644
> --- a/lib/memarea/memarea_private.h
> +++ b/lib/memarea/memarea_private.h
> @@ -28,6 +28,9 @@ struct rte_memarea {
>  	void                    *area_addr;
>  	struct memarea_elem_list elem_list;
>  	struct memarea_elem_list free_list;
> +
> +	uint64_t alloc_fails;
> +	uint64_t refcnt_check_fails;
>  } __rte_cache_aligned;
>  
>  #endif /* MEMAREA_PRIVATE_H */
> diff --git a/lib/memarea/rte_memarea.c b/lib/memarea/rte_memarea.c
> index 85975029e2..8ad1c0acb5 100644
> --- a/lib/memarea/rte_memarea.c
> +++ b/lib/memarea/rte_memarea.c
> @@ -4,6 +4,7 @@
>  
>  #include <stdio.h>
>  #include <stdlib.h>
> +#include <sys/queue.h>
>  
>  #include <rte_common.h>
>  #include <rte_log.h>
> @@ -94,6 +95,8 @@ memarea_alloc_area(const struct rte_memarea_param *init)
>  		ptr = memarea_alloc_from_libc(init->total_sz);
>  	else if (init->source == RTE_MEMAREA_SOURCE_USER)
>  		ptr = init->user_addr;
> +	else if (init->source == RTE_MEMAREA_SOURCE_MEMAREA)
> +		ptr = rte_memarea_alloc(init->src_memarea, init->total_sz, 0);
>  
>  	if (ptr == NULL)
>  		RTE_LOG(ERR, MEMAREA, "memarea: %s alloc memory area fail!\n", init->name);
> @@ -146,6 +149,8 @@ memarea_free_area(struct rte_memarea *ma)
>  		rte_free(ma->area_addr);
>  	else if (ma->init.source == RTE_MEMAREA_SOURCE_LIBC)
>  		free(ma->area_addr);
> +	else if (ma->init.source == RTE_MEMAREA_SOURCE_MEMAREA)
> +		rte_memarea_free(ma->init.src_memarea, ma->area_addr);
>  }
>  
>  void
> @@ -156,3 +161,153 @@ rte_memarea_destroy(struct rte_memarea *ma)
>  	memarea_free_area(ma);
>  	rte_free(ma);
>  }
> +
> +static inline void
> +memarea_lock(struct rte_memarea *ma)
> +{
> +	if (ma->init.mt_safe)
> +		rte_spinlock_lock(&ma->lock);
> +}
> +
> +static inline void
> +memarea_unlock(struct rte_memarea *ma)
> +{
> +	if (ma->init.mt_safe)
> +		rte_spinlock_unlock(&ma->lock);
> +}
> +
> +static inline bool
> +memarea_whether_add_node(size_t free_size, size_t need_size)
> +{
> +	size_t align_size = RTE_ALIGN_CEIL(need_size, RTE_CACHE_LINE_SIZE);
> +	return free_size > align_size && (free_size - align_size) > sizeof(struct memarea_elem);
> +}
> +
> +static inline void
> +memarea_add_node(struct rte_memarea *ma, struct memarea_elem *elem, size_t need_size)
> +{
> +	size_t align_size = RTE_ALIGN_CEIL(need_size, RTE_CACHE_LINE_SIZE);
> +	struct memarea_elem *new_elem;
> +	new_elem = (struct memarea_elem *)RTE_PTR_ADD(elem, sizeof(struct memarea_elem) +
> +							    align_size);
> +	new_elem->size = elem->size - align_size - sizeof(struct memarea_elem);
> +	new_elem->magic = MEMAREA_AVAILABLE_ELEM_MAGIC;
> +	new_elem->cookie = MEMAREA_AVAILABLE_ELEM_COOKIE;
> +	new_elem->refcnt = 0;
> +	TAILQ_INSERT_AFTER(&ma->elem_list, elem, new_elem, elem_node);
> +	TAILQ_INSERT_AFTER(&ma->free_list, elem, new_elem, free_node);
> +	elem->size = align_size;
> +}
> +
> +void *
> +rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie)
> +{
> +	struct memarea_elem *elem;
> +	void *ptr = NULL;
> +
> +	if (unlikely(ma == NULL || size == 0))
> +		return NULL;
> +
> +	memarea_lock(ma);
> +	TAILQ_FOREACH(elem, &ma->free_list, free_node) {
> +		if (unlikely(elem->magic != MEMAREA_AVAILABLE_ELEM_MAGIC))
> +			break;
> +		if (elem->size < size)
> +			continue;
> +		if (memarea_whether_add_node(elem->size, size))
> +			memarea_add_node(ma, elem, size);
> +		elem->magic = MEMAREA_ALLOCATED_ELEM_MAGIC;
> +		elem->cookie = cookie;
> +		elem->refcnt = 1;
> +		TAILQ_REMOVE(&ma->free_list, elem, free_node);
> +		ptr = RTE_PTR_ADD(elem, sizeof(struct memarea_elem));
> +		break;
> +	}
> +	if (unlikely(ptr == NULL))
> +		ma->alloc_fails++;
> +	memarea_unlock(ma);
> +
> +	return ptr;
> +}
> +
> +void
> +rte_memarea_free(struct rte_memarea *ma, void *ptr)
> +{
> +	rte_memarea_update_refcnt(ma, ptr, -1);
> +}
> +
> +static inline void
> +memarea_merge_node(struct rte_memarea *ma, struct memarea_elem *curr,
> +		   struct memarea_elem *next, bool del_next_from_free,
> +		   bool add_curr_to_free)
> +{
> +	curr->size += next->size + sizeof(struct memarea_elem);
> +	next->size = 0;
> +	next->magic = 0;
> +	next->cookie = 0;
> +	TAILQ_REMOVE(&ma->elem_list, next, elem_node);
> +	if (del_next_from_free)
> +		TAILQ_REMOVE(&ma->free_list, next, free_node);
> +	if (add_curr_to_free) {
> +		curr->magic = MEMAREA_AVAILABLE_ELEM_MAGIC;
> +		curr->cookie = MEMAREA_AVAILABLE_ELEM_COOKIE;
> +		TAILQ_INSERT_TAIL(&ma->free_list, curr, free_node);
> +	}
> +}
> +
> +static inline void
> +memarea_free_elem(struct rte_memarea *ma, struct memarea_elem *elem)
> +{
> +	struct memarea_elem *prev, *next;
> +	bool merged = false;
> +	prev = TAILQ_PREV(elem, memarea_elem_list, elem_node);
> +	next = TAILQ_NEXT(elem, elem_node);
> +	if (prev != NULL && prev->refcnt == 0) {
> +		memarea_merge_node(ma, prev, elem, false, false);
> +		elem = prev;
> +		merged = true;
> +	}
> +	if (next != NULL && next->refcnt == 0) {
> +		memarea_merge_node(ma, elem, next, true, !merged);
> +		merged = true;
> +	}
> +	if (!merged) {
> +		elem->magic = MEMAREA_AVAILABLE_ELEM_MAGIC;
> +		elem->cookie = MEMAREA_AVAILABLE_ELEM_COOKIE;
> +		TAILQ_INSERT_TAIL(&ma->free_list, elem, free_node);
> +	}
> +}
> +
> +void
> +rte_memarea_update_refcnt(struct rte_memarea *ma, void *ptr, int16_t value)
> +{
> +	struct memarea_elem *elem = (struct memarea_elem *)RTE_PTR_SUB(ptr,
> +							    sizeof(struct memarea_elem));
> +
> +	if (unlikely(ma == NULL || ptr == NULL))
> +		return;

This check is probably not needed:
the user is only supposed to change refcnt of a valid object.
Since rte_memarea_free(ma, NULL) must be a no-op, the check should be there.

> +
> +	memarea_lock(ma);
> +	if (unlikely(elem->magic != MEMAREA_ALLOCATED_ELEM_MAGIC)) {
> +		RTE_LOG(ERR, MEMAREA, "memarea: %s magic: 0x%x check fail!\n",
> +			ma->init.name, elem->magic);

Element pointer might be useful here,
because cookie will likely not be unique for each object.

> +		memarea_unlock(ma);
> +		return;
> +	}
> +
> +	if (unlikely(elem->refcnt <= 0 || elem->refcnt + value < 0)) {
> +		RTE_LOG(ERR, MEMAREA,
> +			"memarea: %s cookie: 0x%x curr refcnt: %d update refcnt: %d check fail!\n",
> +			ma->init.name, elem->cookie, elem->refcnt, value);

Same as above.

> +		ma->refcnt_check_fails++;
> +		if (elem->refcnt > 0)
> +			elem->refcnt += value;

This may change refcnt of some unrelated block allocated concurrently,
producing another bug on top of the bug that has lead to this branch
(the case is different from the race described below).

> +		memarea_unlock(ma);
> +		return;
> +	}
> +
> +	elem->refcnt += value;
> +	if (elem->refcnt == 0)
> +		memarea_free_elem(ma, elem);
> +	memarea_unlock(ma);
> +}

This is not thread-safe:

1. Thread A wants to decrement refnct and takes the lock.
2. Thread B wants to increment refcnt and waits for the lock.
3. Thread C wants to allocate and waits for the lock.
4. Thread A decrements refcnt, frees the object, and releases the lock.
5. Thread C takes the lock, allocates the same element, and releases the lock.
6. Thread B takes the lock and increments refcnt of the new element allocated
by thread C and not the old element it had a pointer to in the beginning.

Probably some memarea generation counter could solve this.

I still tend to think that reference counting is too usage-specific
to be a part of a generic memory management library,
though others don't seem to object.

> diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h
> index ac2d63a4d3..f866fa7b67 100644
> --- a/lib/memarea/rte_memarea.h
> +++ b/lib/memarea/rte_memarea.h
> @@ -129,6 +129,62 @@ struct rte_memarea *rte_memarea_create(const struct rte_memarea_param *init);
>  __rte_experimental
>  void rte_memarea_destroy(struct rte_memarea *ma);
>  
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate memory from memarea.
> + *
> + * Allocate one memory object from the memarea.
> + *
> + * @param ma
> + *   The pointer of memarea.
> + * @param size
> + *   The memory size to be allocated.
> + * @param cookie
> + *   User-provided footprint which could used to debug memory leak.
> + *
> + * @return
> + *   Non-NULL on success. Otherwise NULL is returned.

Also NULL when size == 0 (OK, like malloc) or when ma == NULL (not obvious).

> + */
> +__rte_experimental
> +void *rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Free memory to memarea.
> + *
> + * Free one memory object to the memarea.

Not really if refcnt > 1.
Should it assert somehow that refcnt <= 1?

The second sentence adds nothing to the first.

> + *
> + * @param ma
> + *   The pointer of memarea.
> + * @param ptr
> + *   The pointer of memory object which need be freed.
> + */
> +__rte_experimental
> +void rte_memarea_free(struct rte_memarea *ma, void *ptr);
  
fengchengwen Oct. 12, 2022, 7:28 a.m. UTC | #2
Hi Dmitry,

On 2022/10/11 23:58, Dmitry Kozlyuk wrote:
> 2022-10-11 12:17 (UTC+0000), Chengwen Feng:
>> This patch supports rte_memarea_alloc()/rte_memarea_free()/
>> rte_memarea_update_refcnt() API.
>>

...

>> +
>> +void
>> +rte_memarea_update_refcnt(struct rte_memarea *ma, void *ptr, int16_t value)
>> +{
>> +	struct memarea_elem *elem = (struct memarea_elem *)RTE_PTR_SUB(ptr,
>> +							    sizeof(struct memarea_elem));
>> +
>> +	if (unlikely(ma == NULL || ptr == NULL))
>> +		return;
> 
> This check is probably not needed:
> the user is only supposed to change refcnt of a valid object.
> Since rte_memarea_free(ma, NULL) must be a no-op, the check should be there.

okay

> 
>> +
>> +	memarea_lock(ma);
>> +	if (unlikely(elem->magic != MEMAREA_ALLOCATED_ELEM_MAGIC)) {
>> +		RTE_LOG(ERR, MEMAREA, "memarea: %s magic: 0x%x check fail!\n",
>> +			ma->init.name, elem->magic);
> 
> Element pointer might be useful here,
> because cookie will likely not be unique for each object.

RTE_LOG pointer may lead to security risks.
I tend to keep the status quo.

> 
>> +		memarea_unlock(ma);
>> +		return;
>> +	}
>> +
>> +	if (unlikely(elem->refcnt <= 0 || elem->refcnt + value < 0)) {
>> +		RTE_LOG(ERR, MEMAREA,
>> +			"memarea: %s cookie: 0x%x curr refcnt: %d update refcnt: %d check fail!\n",
>> +			ma->init.name, elem->cookie, elem->refcnt, value);
> 
> Same as above.
> 
>> +		ma->refcnt_check_fails++;
>> +		if (elem->refcnt > 0)
>> +			elem->refcnt += value;
> 
> This may change refcnt of some unrelated block allocated concurrently,
> producing another bug on top of the bug that has lead to this branch
> (the case is different from the race described below).
> 
>> +		memarea_unlock(ma);
>> +		return;
>> +	}
>> +
>> +	elem->refcnt += value;
>> +	if (elem->refcnt == 0)
>> +		memarea_free_elem(ma, elem);
>> +	memarea_unlock(ma);
>> +}
> 
> This is not thread-safe:
> 
> 1. Thread A wants to decrement refnct and takes the lock.
> 2. Thread B wants to increment refcnt and waits for the lock.

The refcnt must at least 2 because two threads hold the object.

> 3. Thread C wants to allocate and waits for the lock.
> 4. Thread A decrements refcnt, frees the object, and releases the lock.

The refcnt will updated to be 1, and the object will not freed.

> 5. Thread C takes the lock, allocates the same element, and releases the lock> 6. Thread B takes the lock and increments refcnt of the new element allocated
> by thread C and not the old element it had a pointer to in the beginning.

Because object's refcnt at least 1, so the increments refcnt will success.

The refcnt just like mbuf's refcnt, If app wants to send one object to multiple entity
(rings or other), it should updated refcnt by increment (multiple entity num - 1) and
then send.

It work well when keep the above rule.

> 
> Probably some memarea generation counter could solve this.
> 
> I still tend to think that reference counting is too usage-specific
> to be a part of a generic memory management library,
> though others don't seem to object.
> 
>> diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h
>> index ac2d63a4d3..f866fa7b67 100644
>> --- a/lib/memarea/rte_memarea.h
>> +++ b/lib/memarea/rte_memarea.h
>> @@ -129,6 +129,62 @@ struct rte_memarea *rte_memarea_create(const struct rte_memarea_param *init);
>>  __rte_experimental
>>  void rte_memarea_destroy(struct rte_memarea *ma);
>>  
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Allocate memory from memarea.
>> + *
>> + * Allocate one memory object from the memarea.
>> + *
>> + * @param ma
>> + *   The pointer of memarea.
>> + * @param size
>> + *   The memory size to be allocated.
>> + * @param cookie
>> + *   User-provided footprint which could used to debug memory leak.
>> + *
>> + * @return
>> + *   Non-NULL on success. Otherwise NULL is returned.
> 
> Also NULL when size == 0 (OK, like malloc) or when ma == NULL (not obvious).

ok

> 
>> + */
>> +__rte_experimental
>> +void *rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie);
>> +
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Free memory to memarea.
>> + *
>> + * Free one memory object to the memarea.
> 
> Not really if refcnt > 1.
> Should it assert somehow that refcnt <= 1?
> 
> The second sentence adds nothing to the first.

will add more description.

> 
>> + *
>> + * @param ma
>> + *   The pointer of memarea.
>> + * @param ptr
>> + *   The pointer of memory object which need be freed.
>> + */
>> +__rte_experimental
>> +void rte_memarea_free(struct rte_memarea *ma, void *ptr);
> .
>
  

Patch

diff --git a/doc/guides/prog_guide/memarea_lib.rst b/doc/guides/prog_guide/memarea_lib.rst
index 85ad57145f..a9c58dc44d 100644
--- a/doc/guides/prog_guide/memarea_lib.rst
+++ b/doc/guides/prog_guide/memarea_lib.rst
@@ -33,6 +33,16 @@  returns the pointer to the created memarea or ``NULL`` if the creation failed.
 
 The ``rte_memarea_destroy()`` function is used to destroy a memarea.
 
+The ``rte_memarea_alloc()`` function is used to alloc one memory object from
+the memarea.
+
+The ``rte_memarea_free()`` function is used to free one memory object which
+allocated by ``rte_memarea_alloc()``.
+
+The ``rte_memarea_update_refcnt()`` function is used to update the memory
+object's reference count, if the count reaches zero, the memory object will
+be freed to memarea.
+
 Reference
 ---------
 
diff --git a/lib/memarea/memarea_private.h b/lib/memarea/memarea_private.h
index 59be9c1d00..f5accf2987 100644
--- a/lib/memarea/memarea_private.h
+++ b/lib/memarea/memarea_private.h
@@ -28,6 +28,9 @@  struct rte_memarea {
 	void                    *area_addr;
 	struct memarea_elem_list elem_list;
 	struct memarea_elem_list free_list;
+
+	uint64_t alloc_fails;
+	uint64_t refcnt_check_fails;
 } __rte_cache_aligned;
 
 #endif /* MEMAREA_PRIVATE_H */
diff --git a/lib/memarea/rte_memarea.c b/lib/memarea/rte_memarea.c
index 85975029e2..8ad1c0acb5 100644
--- a/lib/memarea/rte_memarea.c
+++ b/lib/memarea/rte_memarea.c
@@ -4,6 +4,7 @@ 
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <sys/queue.h>
 
 #include <rte_common.h>
 #include <rte_log.h>
@@ -94,6 +95,8 @@  memarea_alloc_area(const struct rte_memarea_param *init)
 		ptr = memarea_alloc_from_libc(init->total_sz);
 	else if (init->source == RTE_MEMAREA_SOURCE_USER)
 		ptr = init->user_addr;
+	else if (init->source == RTE_MEMAREA_SOURCE_MEMAREA)
+		ptr = rte_memarea_alloc(init->src_memarea, init->total_sz, 0);
 
 	if (ptr == NULL)
 		RTE_LOG(ERR, MEMAREA, "memarea: %s alloc memory area fail!\n", init->name);
@@ -146,6 +149,8 @@  memarea_free_area(struct rte_memarea *ma)
 		rte_free(ma->area_addr);
 	else if (ma->init.source == RTE_MEMAREA_SOURCE_LIBC)
 		free(ma->area_addr);
+	else if (ma->init.source == RTE_MEMAREA_SOURCE_MEMAREA)
+		rte_memarea_free(ma->init.src_memarea, ma->area_addr);
 }
 
 void
@@ -156,3 +161,153 @@  rte_memarea_destroy(struct rte_memarea *ma)
 	memarea_free_area(ma);
 	rte_free(ma);
 }
+
+static inline void
+memarea_lock(struct rte_memarea *ma)
+{
+	if (ma->init.mt_safe)
+		rte_spinlock_lock(&ma->lock);
+}
+
+static inline void
+memarea_unlock(struct rte_memarea *ma)
+{
+	if (ma->init.mt_safe)
+		rte_spinlock_unlock(&ma->lock);
+}
+
+static inline bool
+memarea_whether_add_node(size_t free_size, size_t need_size)
+{
+	size_t align_size = RTE_ALIGN_CEIL(need_size, RTE_CACHE_LINE_SIZE);
+	return free_size > align_size && (free_size - align_size) > sizeof(struct memarea_elem);
+}
+
+static inline void
+memarea_add_node(struct rte_memarea *ma, struct memarea_elem *elem, size_t need_size)
+{
+	size_t align_size = RTE_ALIGN_CEIL(need_size, RTE_CACHE_LINE_SIZE);
+	struct memarea_elem *new_elem;
+	new_elem = (struct memarea_elem *)RTE_PTR_ADD(elem, sizeof(struct memarea_elem) +
+							    align_size);
+	new_elem->size = elem->size - align_size - sizeof(struct memarea_elem);
+	new_elem->magic = MEMAREA_AVAILABLE_ELEM_MAGIC;
+	new_elem->cookie = MEMAREA_AVAILABLE_ELEM_COOKIE;
+	new_elem->refcnt = 0;
+	TAILQ_INSERT_AFTER(&ma->elem_list, elem, new_elem, elem_node);
+	TAILQ_INSERT_AFTER(&ma->free_list, elem, new_elem, free_node);
+	elem->size = align_size;
+}
+
+void *
+rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie)
+{
+	struct memarea_elem *elem;
+	void *ptr = NULL;
+
+	if (unlikely(ma == NULL || size == 0))
+		return NULL;
+
+	memarea_lock(ma);
+	TAILQ_FOREACH(elem, &ma->free_list, free_node) {
+		if (unlikely(elem->magic != MEMAREA_AVAILABLE_ELEM_MAGIC))
+			break;
+		if (elem->size < size)
+			continue;
+		if (memarea_whether_add_node(elem->size, size))
+			memarea_add_node(ma, elem, size);
+		elem->magic = MEMAREA_ALLOCATED_ELEM_MAGIC;
+		elem->cookie = cookie;
+		elem->refcnt = 1;
+		TAILQ_REMOVE(&ma->free_list, elem, free_node);
+		ptr = RTE_PTR_ADD(elem, sizeof(struct memarea_elem));
+		break;
+	}
+	if (unlikely(ptr == NULL))
+		ma->alloc_fails++;
+	memarea_unlock(ma);
+
+	return ptr;
+}
+
+void
+rte_memarea_free(struct rte_memarea *ma, void *ptr)
+{
+	rte_memarea_update_refcnt(ma, ptr, -1);
+}
+
+static inline void
+memarea_merge_node(struct rte_memarea *ma, struct memarea_elem *curr,
+		   struct memarea_elem *next, bool del_next_from_free,
+		   bool add_curr_to_free)
+{
+	curr->size += next->size + sizeof(struct memarea_elem);
+	next->size = 0;
+	next->magic = 0;
+	next->cookie = 0;
+	TAILQ_REMOVE(&ma->elem_list, next, elem_node);
+	if (del_next_from_free)
+		TAILQ_REMOVE(&ma->free_list, next, free_node);
+	if (add_curr_to_free) {
+		curr->magic = MEMAREA_AVAILABLE_ELEM_MAGIC;
+		curr->cookie = MEMAREA_AVAILABLE_ELEM_COOKIE;
+		TAILQ_INSERT_TAIL(&ma->free_list, curr, free_node);
+	}
+}
+
+static inline void
+memarea_free_elem(struct rte_memarea *ma, struct memarea_elem *elem)
+{
+	struct memarea_elem *prev, *next;
+	bool merged = false;
+	prev = TAILQ_PREV(elem, memarea_elem_list, elem_node);
+	next = TAILQ_NEXT(elem, elem_node);
+	if (prev != NULL && prev->refcnt == 0) {
+		memarea_merge_node(ma, prev, elem, false, false);
+		elem = prev;
+		merged = true;
+	}
+	if (next != NULL && next->refcnt == 0) {
+		memarea_merge_node(ma, elem, next, true, !merged);
+		merged = true;
+	}
+	if (!merged) {
+		elem->magic = MEMAREA_AVAILABLE_ELEM_MAGIC;
+		elem->cookie = MEMAREA_AVAILABLE_ELEM_COOKIE;
+		TAILQ_INSERT_TAIL(&ma->free_list, elem, free_node);
+	}
+}
+
+void
+rte_memarea_update_refcnt(struct rte_memarea *ma, void *ptr, int16_t value)
+{
+	struct memarea_elem *elem = (struct memarea_elem *)RTE_PTR_SUB(ptr,
+							    sizeof(struct memarea_elem));
+
+	if (unlikely(ma == NULL || ptr == NULL))
+		return;
+
+	memarea_lock(ma);
+	if (unlikely(elem->magic != MEMAREA_ALLOCATED_ELEM_MAGIC)) {
+		RTE_LOG(ERR, MEMAREA, "memarea: %s magic: 0x%x check fail!\n",
+			ma->init.name, elem->magic);
+		memarea_unlock(ma);
+		return;
+	}
+
+	if (unlikely(elem->refcnt <= 0 || elem->refcnt + value < 0)) {
+		RTE_LOG(ERR, MEMAREA,
+			"memarea: %s cookie: 0x%x curr refcnt: %d update refcnt: %d check fail!\n",
+			ma->init.name, elem->cookie, elem->refcnt, value);
+		ma->refcnt_check_fails++;
+		if (elem->refcnt > 0)
+			elem->refcnt += value;
+		memarea_unlock(ma);
+		return;
+	}
+
+	elem->refcnt += value;
+	if (elem->refcnt == 0)
+		memarea_free_elem(ma, elem);
+	memarea_unlock(ma);
+}
diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h
index ac2d63a4d3..f866fa7b67 100644
--- a/lib/memarea/rte_memarea.h
+++ b/lib/memarea/rte_memarea.h
@@ -129,6 +129,62 @@  struct rte_memarea *rte_memarea_create(const struct rte_memarea_param *init);
 __rte_experimental
 void rte_memarea_destroy(struct rte_memarea *ma);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate memory from memarea.
+ *
+ * Allocate one memory object from the memarea.
+ *
+ * @param ma
+ *   The pointer of memarea.
+ * @param size
+ *   The memory size to be allocated.
+ * @param cookie
+ *   User-provided footprint which could used to debug memory leak.
+ *
+ * @return
+ *   Non-NULL on success. Otherwise NULL is returned.
+ */
+__rte_experimental
+void *rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Free memory to memarea.
+ *
+ * Free one memory object to the memarea.
+ *
+ * @param ma
+ *   The pointer of memarea.
+ * @param ptr
+ *   The pointer of memory object which need be freed.
+ */
+__rte_experimental
+void rte_memarea_free(struct rte_memarea *ma, void *ptr);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Update memory's refcnt.
+ *
+ * Update one memory object's refcnt.
+ * When refcnt is updated to be zero, the memory object is freed.
+ *
+ * @param ma
+ *   The pointer of memarea.
+ * @param ptr
+ *   The pointer of memory object which need be updated refcnt.
+ * @param value
+ *   The value which need be updated.
+ */
+__rte_experimental
+void rte_memarea_update_refcnt(struct rte_memarea *ma, void *ptr, int16_t value);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/memarea/version.map b/lib/memarea/version.map
index f36a04d7cf..a0026fc5f9 100644
--- a/lib/memarea/version.map
+++ b/lib/memarea/version.map
@@ -1,8 +1,11 @@ 
 EXPERIMENTAL {
 	global:
 
+	rte_memarea_alloc;
 	rte_memarea_create;
 	rte_memarea_destroy;
+	rte_memarea_free;
+	rte_memarea_update_refcnt;
 
 	local: *;
 };