On 14-Apr-20 8:44 PM, Dmitry Kozlyuk wrote:
> All supported OS create memory segment lists (MSL) and reserve VA space
> for them in a nearly identical way. Move common code into EAL private
> functions to reduce duplication.
>
> Signed-off-by: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
> ---
> lib/librte_eal/common/eal_common_memory.c | 54 ++++++++++++++++++
> lib/librte_eal/common/eal_private.h | 34 ++++++++++++
> lib/librte_eal/freebsd/eal_memory.c | 54 +++---------------
> lib/librte_eal/linux/eal_memory.c | 68 +++++------------------
> 4 files changed, 110 insertions(+), 100 deletions(-)
>
> diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
> index cc7d54e0c..d9764681a 100644
> --- a/lib/librte_eal/common/eal_common_memory.c
> +++ b/lib/librte_eal/common/eal_common_memory.c
> @@ -25,6 +25,7 @@
> #include "eal_private.h"
> #include "eal_internal_cfg.h"
> #include "eal_memcfg.h"
> +#include "eal_options.h"
> #include "malloc_heap.h"
>
> /*
> @@ -182,6 +183,59 @@ eal_get_virtual_area(void *requested_addr, size_t *size,
> return aligned_addr;
> }
>
> +int
> +eal_reserve_memseg_list(struct rte_memseg_list *msl,
> + enum eal_mem_reserve_flags flags)
This and other similar places in this and other patches: i don't think
using enums for this purpose (i.e. to hold multiple values at once) is
good practice. I would suggest replacing with int.
Also, i don't think "eal_reserve_memseg_list" is a particularly good or
descriptive name (and neither is "eal_alloc_memseg_list" for that
matter). Suggestion: "eal_memseg_list_create" (or "_init") and
"eal_memseg_list_alloc"?
> +{
> + uint64_t page_sz;
> + size_t mem_sz;
> + void *addr;
> +
> + page_sz = msl->page_sz;
> + mem_sz = page_sz * msl->memseg_arr.len;
> +
> + addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
> + if (addr == NULL) {
> + if (rte_errno == EADDRNOTAVAIL)
> + RTE_LOG(ERR, EAL, "Cannot reserve %llu bytes at [%p] - "
> + "please use '--" OPT_BASE_VIRTADDR "' option\n",
> + (unsigned long long)mem_sz, msl->base_va);
> + else
> + RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
> + return -1;
> + }
> + msl->base_va = addr;
> + msl->len = mem_sz;
> +
> + return 0;
> +}
> +
> +int
> +eal_alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
> + int n_segs, int socket_id, int type_msl_idx, bool heap)
> +{
> + char name[RTE_FBARRAY_NAME_LEN];
> +
> + snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
> + type_msl_idx);
> + if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
> + sizeof(struct rte_memseg))) {
> + RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
> + rte_strerror(rte_errno));
> + return -1;
> + }
> +
> + msl->page_sz = page_sz;
> + msl->socket_id = socket_id;
> + msl->base_va = NULL;
It probably needs to be documented that eal_alloc_memseg_list must be
called before eal_reserve_memseg_list.
> + msl->heap = heap;
> +
> + RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
> + (size_t)page_sz >> 10, socket_id);
> +
> + return 0;
> +}
> +
> static struct rte_memseg *
> virt2memseg(const void *addr, const struct rte_memseg_list *msl)
> {
@@ -25,6 +25,7 @@
#include "eal_private.h"
#include "eal_internal_cfg.h"
#include "eal_memcfg.h"
+#include "eal_options.h"
#include "malloc_heap.h"
/*
@@ -182,6 +183,59 @@ eal_get_virtual_area(void *requested_addr, size_t *size,
return aligned_addr;
}
+int
+eal_reserve_memseg_list(struct rte_memseg_list *msl,
+ enum eal_mem_reserve_flags flags)
+{
+ uint64_t page_sz;
+ size_t mem_sz;
+ void *addr;
+
+ page_sz = msl->page_sz;
+ mem_sz = page_sz * msl->memseg_arr.len;
+
+ addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
+ if (addr == NULL) {
+ if (rte_errno == EADDRNOTAVAIL)
+ RTE_LOG(ERR, EAL, "Cannot reserve %llu bytes at [%p] - "
+ "please use '--" OPT_BASE_VIRTADDR "' option\n",
+ (unsigned long long)mem_sz, msl->base_va);
+ else
+ RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
+ return -1;
+ }
+ msl->base_va = addr;
+ msl->len = mem_sz;
+
+ return 0;
+}
+
+int
+eal_alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+ int n_segs, int socket_id, int type_msl_idx, bool heap)
+{
+ char name[RTE_FBARRAY_NAME_LEN];
+
+ snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
+ type_msl_idx);
+ if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
+ sizeof(struct rte_memseg))) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
+ rte_strerror(rte_errno));
+ return -1;
+ }
+
+ msl->page_sz = page_sz;
+ msl->socket_id = socket_id;
+ msl->base_va = NULL;
+ msl->heap = heap;
+
+ RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
+ (size_t)page_sz >> 10, socket_id);
+
+ return 0;
+}
+
static struct rte_memseg *
virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
@@ -246,6 +246,40 @@ void *
eal_get_virtual_area(void *requested_addr, size_t *size, size_t page_sz,
int flags, int mmap_flags);
+/**
+ * Reserve VA space for a memory segment list.
+ *
+ * @param msl
+ * Memory segment list with page size defined.
+ * @param flags
+ * Extra memory reservation flags. Can be 0 if unnecessary.
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+eal_reserve_memseg_list(struct rte_memseg_list *msl,
+ enum eal_mem_reserve_flags flags);
+
+/**
+ * Initialize a memory segment list with its backing storage.
+ *
+ * @param msl
+ * Memory segment list to be filled.
+ * @param page_sz
+ * Size of segment pages in the MSL.
+ * @param n_segs
+ * Number of segments.
+ * @param socket_id
+ * Socket ID. Must not be SOCKET_ID_ANY.
+ * @param type_msl_idx
+ * Index of the MSL among other MSLs of the same socket and page size.
+ * @param heap
+ * Mark MSL as pointing to a heap.
+ */
+int
+eal_alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+ int n_segs, int socket_id, int type_msl_idx, bool heap);
+
/**
* Get cpu core_id.
*
@@ -336,61 +336,23 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
return RTE_ALIGN(area_sz, page_sz);
}
-#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
static int
-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+memseg_list_alloc(struct rte_memseg_list *msl, uint64_t page_sz,
int n_segs, int socket_id, int type_msl_idx)
{
- char name[RTE_FBARRAY_NAME_LEN];
-
- snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
- type_msl_idx);
- if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
- sizeof(struct rte_memseg))) {
- RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
- rte_strerror(rte_errno));
- return -1;
- }
-
- msl->page_sz = page_sz;
- msl->socket_id = socket_id;
- msl->base_va = NULL;
-
- RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
- (size_t)page_sz >> 10, socket_id);
-
- return 0;
+ return eal_alloc_memseg_list(
+ msl, page_sz, n_segs, socket_id, type_msl_idx, false);
}
static int
-alloc_va_space(struct rte_memseg_list *msl)
+memseg_list_reserve(struct rte_memseg_list *msl)
{
- uint64_t page_sz;
- size_t mem_sz;
- void *addr;
- int flags = 0;
+ enum eal_reserve_flags flags = 0;
#ifdef RTE_ARCH_PPC_64
- flags |= MAP_HUGETLB;
+ flags |= EAL_RESERVE_HUGEPAGES;
#endif
-
- page_sz = msl->page_sz;
- mem_sz = page_sz * msl->memseg_arr.len;
-
- addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
- if (addr == NULL) {
- if (rte_errno == EADDRNOTAVAIL)
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - "
- "please use '--" OPT_BASE_VIRTADDR "' option\n",
- (unsigned long long)mem_sz, msl->base_va);
- else
- RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
- return -1;
- }
- msl->base_va = addr;
- msl->len = mem_sz;
-
- return 0;
+ return eal_reserve_memseg_list(msl, flags);
}
@@ -479,7 +441,7 @@ memseg_primary_init(void)
cur_max_mem);
n_segs = cur_mem / hugepage_sz;
- if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+ if (memseg_list_alloc(msl, hugepage_sz, n_segs,
0, type_msl_idx))
return -1;
@@ -802,7 +802,7 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
}
static int
-free_memseg_list(struct rte_memseg_list *msl)
+memseg_list_free(struct rte_memseg_list *msl)
{
if (rte_fbarray_destroy(&msl->memseg_arr)) {
RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
@@ -812,58 +812,18 @@ free_memseg_list(struct rte_memseg_list *msl)
return 0;
}
-#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
static int
-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+memseg_list_alloc(struct rte_memseg_list *msl, uint64_t page_sz,
int n_segs, int socket_id, int type_msl_idx)
{
- char name[RTE_FBARRAY_NAME_LEN];
-
- snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
- type_msl_idx);
- if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
- sizeof(struct rte_memseg))) {
- RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
- rte_strerror(rte_errno));
- return -1;
- }
-
- msl->page_sz = page_sz;
- msl->socket_id = socket_id;
- msl->base_va = NULL;
- msl->heap = 1; /* mark it as a heap segment */
-
- RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
- (size_t)page_sz >> 10, socket_id);
-
- return 0;
+ return eal_alloc_memseg_list(
+ msl, page_sz, n_segs, socket_id, type_msl_idx, true);
}
static int
-alloc_va_space(struct rte_memseg_list *msl)
+memseg_list_reserve(struct rte_memseg_list *msl)
{
- uint64_t page_sz;
- size_t mem_sz;
- void *addr;
- int flags = 0;
-
- page_sz = msl->page_sz;
- mem_sz = page_sz * msl->memseg_arr.len;
-
- addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
- if (addr == NULL) {
- if (rte_errno == EADDRNOTAVAIL)
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - "
- "please use '--" OPT_BASE_VIRTADDR "' option\n",
- (unsigned long long)mem_sz, msl->base_va);
- else
- RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
- return -1;
- }
- msl->base_va = addr;
- msl->len = mem_sz;
-
- return 0;
+ return eal_reserve_memseg_list(msl, 0);
}
/*
@@ -1009,12 +969,12 @@ prealloc_segments(struct hugepage_file *hugepages, int n_pages)
}
/* now, allocate fbarray itself */
- if (alloc_memseg_list(msl, page_sz, n_segs, socket,
+ if (memseg_list_alloc(msl, page_sz, n_segs, socket,
msl_idx) < 0)
return -1;
/* finally, allocate VA space */
- if (alloc_va_space(msl) < 0)
+ if (memseg_list_reserve(msl) < 0)
return -1;
}
}
@@ -2191,7 +2151,7 @@ memseg_primary_init_32(void)
max_pagesz_mem);
n_segs = cur_mem / hugepage_sz;
- if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+ if (memseg_list_alloc(msl, hugepage_sz, n_segs,
socket_id, type_msl_idx)) {
/* failing to allocate a memseg list is
* a serious error.
@@ -2200,13 +2160,13 @@ memseg_primary_init_32(void)
return -1;
}
- if (alloc_va_space(msl)) {
+ if (memseg_list_reserve(msl)) {
/* if we couldn't allocate VA space, we
* can try with smaller page sizes.
*/
RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
/* deallocate memseg list */
- if (free_memseg_list(msl))
+ if (memseg_list_free(msl))
return -1;
break;
}
@@ -2395,11 +2355,11 @@ memseg_primary_init(void)
}
msl = &mcfg->memsegs[msl_idx++];
- if (alloc_memseg_list(msl, pagesz, n_segs,
+ if (memseg_list_alloc(msl, pagesz, n_segs,
socket_id, cur_seglist))
goto out;
- if (alloc_va_space(msl)) {
+ if (memseg_list_reserve(msl)) {
RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
goto out;
}
@@ -2433,7 +2393,7 @@ memseg_secondary_init(void)
}
/* preallocate VA space */
- if (alloc_va_space(msl)) {
+ if (memseg_list_reserve(msl)) {
RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
return -1;
}