@@ -264,9 +264,9 @@ eal_dynmem_hugepage_init(void)
#endif
}
- /* make a copy of socket_mem, needed for balanced allocation. */
+ /* make a copy of numa_mem, needed for balanced allocation. */
for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
- memory[hp_sz_idx] = internal_conf->socket_mem[hp_sz_idx];
+ memory[hp_sz_idx] = internal_conf->numa_mem[hp_sz_idx];
/* calculate final number of pages */
if (eal_dynmem_calc_num_pages_per_socket(memory,
@@ -334,10 +334,10 @@ eal_dynmem_hugepage_init(void)
}
/* if socket limits were specified, set them */
- if (internal_conf->force_socket_limits) {
+ if (internal_conf->force_numa_limits) {
unsigned int i;
for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
- uint64_t limit = internal_conf->socket_limit[i];
+ uint64_t limit = internal_conf->numa_limit[i];
if (limit == 0)
continue;
if (rte_mem_alloc_validator_register("socket-limit",
@@ -382,7 +382,7 @@ eal_dynmem_calc_num_pages_per_socket(
return -1;
/* if specific memory amounts per socket weren't requested */
- if (internal_conf->force_sockets == 0) {
+ if (internal_conf->force_numa == 0) {
size_t total_size;
#ifdef RTE_ARCH_64
int cpu_per_socket[RTE_MAX_NUMA_NODES];
@@ -509,10 +509,10 @@ eal_dynmem_calc_num_pages_per_socket(
/* if we didn't satisfy all memory requirements per socket */
if (memory[socket] > 0 &&
- internal_conf->socket_mem[socket] != 0) {
+ internal_conf->numa_mem[socket] != 0) {
/* to prevent icc errors */
requested = (unsigned int)(
- internal_conf->socket_mem[socket] / 0x100000);
+ internal_conf->numa_mem[socket] / 0x100000);
available = requested -
((unsigned int)(memory[socket] / 0x100000));
EAL_LOG(ERR, "Not enough memory available on "
@@ -333,14 +333,14 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->hugepage_dir = NULL;
internal_cfg->hugepage_file.unlink_before_mapping = false;
internal_cfg->hugepage_file.unlink_existing = true;
- internal_cfg->force_sockets = 0;
+ internal_cfg->force_numa = 0;
/* zero out the NUMA config */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->socket_mem[i] = 0;
- internal_cfg->force_socket_limits = 0;
+ internal_cfg->numa_mem[i] = 0;
+ internal_cfg->force_numa_limits = 0;
/* zero out the NUMA limits config */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->socket_limit[i] = 0;
+ internal_cfg->numa_limit[i] = 0;
/* zero out hugedir descriptors */
for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) {
memset(&internal_cfg->hugepage_info[i], 0,
@@ -2041,7 +2041,7 @@ eal_adjust_config(struct internal_config *internal_cfg)
/* if no memory amounts were requested, this will result in 0 and
* will be overridden later, right after eal_hugepage_info_init() */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- internal_cfg->memory += internal_cfg->socket_mem[i];
+ internal_cfg->memory += internal_cfg->numa_mem[i];
return 0;
}
@@ -2082,12 +2082,12 @@ eal_check_common_options(struct internal_config *internal_cfg)
"option");
return -1;
}
- if (mem_parsed && internal_cfg->force_sockets == 1) {
+ if (mem_parsed && internal_cfg->force_numa == 1) {
EAL_LOG(ERR, "Options -m and --"OPT_SOCKET_MEM" cannot "
"be specified at the same time");
return -1;
}
- if (internal_cfg->no_hugetlbfs && internal_cfg->force_sockets == 1) {
+ if (internal_cfg->no_hugetlbfs && internal_cfg->force_numa == 1) {
EAL_LOG(ERR, "Option --"OPT_SOCKET_MEM" cannot "
"be specified together with --"OPT_NO_HUGE);
return -1;
@@ -2105,7 +2105,7 @@ eal_check_common_options(struct internal_config *internal_cfg)
"be specified together with --"OPT_NO_HUGE);
return -1;
}
- if (internal_conf->force_socket_limits && internal_conf->legacy_mem) {
+ if (internal_conf->force_numa_limits && internal_conf->legacy_mem) {
EAL_LOG(ERR, "Option --"OPT_SOCKET_LIMIT
" is only supported in non-legacy memory mode");
}
@@ -68,11 +68,11 @@ struct internal_config {
*/
volatile unsigned create_uio_dev; /**< true to create /dev/uioX devices */
volatile enum rte_proc_type_t process_type; /**< multi-process proc type */
- /** true to try allocating memory on specific sockets */
- volatile unsigned force_sockets;
- volatile uint64_t socket_mem[RTE_MAX_NUMA_NODES]; /**< amount of memory per socket */
- volatile unsigned force_socket_limits;
- volatile uint64_t socket_limit[RTE_MAX_NUMA_NODES]; /**< limit amount of memory per socket */
+ /** true to try allocating memory on specific NUMA nodes */
+ volatile unsigned force_numa;
+ volatile uint64_t numa_mem[RTE_MAX_NUMA_NODES]; /**< amount of memory per socket */
+ volatile unsigned force_numa_limits;
+ volatile uint64_t numa_limit[RTE_MAX_NUMA_NODES]; /**< limit amount of memory per socket */
uintptr_t base_virtaddr; /**< base address to try and reserve memory from */
volatile unsigned legacy_mem;
/**< true to enable legacy memory behavior (no dynamic allocation,
@@ -711,7 +711,7 @@ malloc_get_numa_socket(void)
/* for control threads, return first socket where memory is available */
for (idx = 0; idx < rte_socket_count(); idx++) {
socket_id = rte_socket_id_by_idx(idx);
- if (conf->socket_mem[socket_id] != 0)
+ if (conf->numa_mem[socket_id] != 0)
return socket_id;
}
/* We couldn't quickly find a NUMA node where memory was available,
@@ -749,7 +749,7 @@ rte_eal_init(int argc, char **argv)
}
}
- if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) {
+ if (internal_conf->memory == 0 && internal_conf->force_numa == 0) {
if (internal_conf->no_hugetlbfs)
internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
else
@@ -695,26 +695,26 @@ eal_parse_args(int argc, char **argv)
}
case OPT_SOCKET_MEM_NUM:
if (eal_parse_socket_arg(optarg,
- internal_conf->socket_mem) < 0) {
+ internal_conf->numa_mem) < 0) {
EAL_LOG(ERR, "invalid parameters for --"
OPT_SOCKET_MEM);
eal_usage(prgname);
ret = -1;
goto out;
}
- internal_conf->force_sockets = 1;
+ internal_conf->force_numa = 1;
break;
case OPT_SOCKET_LIMIT_NUM:
if (eal_parse_socket_arg(optarg,
- internal_conf->socket_limit) < 0) {
+ internal_conf->numa_limit) < 0) {
EAL_LOG(ERR, "invalid parameters for --"
OPT_SOCKET_LIMIT);
eal_usage(prgname);
ret = -1;
goto out;
}
- internal_conf->force_socket_limits = 1;
+ internal_conf->force_numa_limits = 1;
break;
case OPT_VFIO_INTR_NUM:
@@ -1137,7 +1137,7 @@ rte_eal_init(int argc, char **argv)
}
}
- if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) {
+ if (internal_conf->memory == 0 && internal_conf->force_numa == 0) {
if (internal_conf->no_hugetlbfs)
internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
}
@@ -282,7 +282,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
oldpolicy = MPOL_DEFAULT;
}
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- if (internal_conf->socket_mem[i])
+ if (internal_conf->numa_mem[i])
maxnode = i + 1;
}
#endif
@@ -301,7 +301,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
if (j == maxnode) {
node_id = (node_id + 1) % maxnode;
- while (!internal_conf->socket_mem[node_id]) {
+ while (!internal_conf->numa_mem[node_id]) {
node_id++;
node_id %= maxnode;
}
@@ -1269,9 +1269,9 @@ eal_legacy_hugepage_init(void)
huge_register_sigbus();
- /* make a copy of socket_mem, needed for balanced allocation. */
+ /* make a copy of numa_mem, needed for balanced allocation. */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- memory[i] = internal_conf->socket_mem[i];
+ memory[i] = internal_conf->numa_mem[i];
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_conf->num_hugepage_sizes; i++) {
@@ -1339,7 +1339,7 @@ eal_legacy_hugepage_init(void)
huge_recover_sigbus();
- if (internal_conf->memory == 0 && internal_conf->force_sockets == 0)
+ if (internal_conf->memory == 0 && internal_conf->force_numa == 0)
internal_conf->memory = eal_get_hugepage_mem_size();
nr_hugefiles = nr_hugepages;
@@ -1365,9 +1365,9 @@ eal_legacy_hugepage_init(void)
}
}
- /* make a copy of socket_mem, needed for number of pages calculation */
+ /* make a copy of numa_mem, needed for number of pages calculation */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- memory[i] = internal_conf->socket_mem[i];
+ memory[i] = internal_conf->numa_mem[i];
/* calculate final number of pages */
nr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,
@@ -1722,12 +1722,12 @@ memseg_primary_init_32(void)
*/
active_sockets = 0;
total_requested_mem = 0;
- if (internal_conf->force_sockets)
+ if (internal_conf->force_numa)
for (i = 0; i < rte_socket_count(); i++) {
uint64_t mem;
socket_id = rte_socket_id_by_idx(i);
- mem = internal_conf->socket_mem[socket_id];
+ mem = internal_conf->numa_mem[socket_id];
if (mem == 0)
continue;
@@ -1779,7 +1779,7 @@ memseg_primary_init_32(void)
/* if we didn't specifically request memory on this socket */
skip = active_sockets != 0 &&
- internal_conf->socket_mem[socket_id] == 0;
+ internal_conf->numa_mem[socket_id] == 0;
/* ...or if we didn't specifically request memory on *any*
* socket, and this is not main lcore
*/
@@ -1794,7 +1794,7 @@ memseg_primary_init_32(void)
/* max amount of memory on this socket */
max_socket_mem = (active_sockets != 0 ?
- internal_conf->socket_mem[socket_id] :
+ internal_conf->numa_mem[socket_id] :
internal_conf->memory) +
extra_mem_per_socket;
cur_socket_mem = 0;
@@ -331,7 +331,7 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- if (internal_conf->memory == 0 && !internal_conf->force_sockets) {
+ if (internal_conf->memory == 0 && !internal_conf->force_numa) {
if (internal_conf->no_hugetlbfs)
internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
}