get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/40982/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 40982,
    "url": "http://patchwork.dpdk.org/api/patches/40982/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/b20eb6381a2a0f07766d0d9a9515689889a43f3d.1528716160.git.anatoly.burakov@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<b20eb6381a2a0f07766d0d9a9515689889a43f3d.1528716160.git.anatoly.burakov@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/b20eb6381a2a0f07766d0d9a9515689889a43f3d.1528716160.git.anatoly.burakov@intel.com",
    "date": "2018-06-11T16:13:34",
    "name": "[3/3] eal: make memory segment preallocation OS-specific",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "04e34ee97d8293b278205a1d493832512bd44d25",
    "submitter": {
        "id": 4,
        "url": "http://patchwork.dpdk.org/api/people/4/?format=api",
        "name": "Burakov, Anatoly",
        "email": "anatoly.burakov@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/b20eb6381a2a0f07766d0d9a9515689889a43f3d.1528716160.git.anatoly.burakov@intel.com/mbox/",
    "series": [
        {
            "id": 83,
            "url": "http://patchwork.dpdk.org/api/series/83/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=83",
            "date": "2018-06-11T16:13:33",
            "name": "[1/3] eal/bsdapp: fix segment index display",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/83/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/40982/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/40982/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A9B951E8DD;\n\tMon, 11 Jun 2018 18:13:49 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n\tby dpdk.org (Postfix) with ESMTP id 069151E8D7\n\tfor <dev@dpdk.org>; Mon, 11 Jun 2018 18:13:47 +0200 (CEST)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n\tby fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t11 Jun 2018 09:13:37 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby fmsmga008.fm.intel.com with ESMTP; 11 Jun 2018 09:13:36 -0700",
            "from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com\n\t[10.237.217.45])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tw5BGDZj9007731; Mon, 11 Jun 2018 17:13:35 +0100",
            "from sivswdev01.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev01.ir.intel.com with ESMTP id w5BGDZuK018896;\n\tMon, 11 Jun 2018 17:13:35 +0100",
            "(from aburakov@localhost)\n\tby sivswdev01.ir.intel.com with LOCAL id w5BGDZ1S018891;\n\tMon, 11 Jun 2018 17:13:35 +0100"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.49,502,1520924400\"; d=\"scan'208\";a=\"46905978\"",
        "From": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Bruce Richardson <bruce.richardson@intel.com>",
        "Date": "Mon, 11 Jun 2018 17:13:34 +0100",
        "Message-Id": "<b20eb6381a2a0f07766d0d9a9515689889a43f3d.1528716160.git.anatoly.burakov@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": [
            "<fd5598072a48e941be2f851b6286078b50e0c7b2.1528716160.git.anatoly.burakov@intel.com>",
            "<fd5598072a48e941be2f851b6286078b50e0c7b2.1528716160.git.anatoly.burakov@intel.com>"
        ],
        "References": [
            "<fd5598072a48e941be2f851b6286078b50e0c7b2.1528716160.git.anatoly.burakov@intel.com>",
            "<fd5598072a48e941be2f851b6286078b50e0c7b2.1528716160.git.anatoly.burakov@intel.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 3/3] eal: make memory segment preallocation\n\tOS-specific",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "In the perfect world, it wouldn't matter how much memory was\npreallocated because most of it was always going to be private\nanonymous zero-page mappings for the duration of the program.\nHowever, in practice, due to peculiarities of FreeBSD, we need\nto additionally limit memory allocation there. This patch moves\nthe segment preallocation to EAL private functions that will be\nimplemented by an OS-specific EAL rather than being in the common\nmemory-related code.\n\nSince there is no support for growing/shrinking memory use at\nruntime on FreeBSD anyway, this does not inhibit any functionality\nbut makes core dumps faster even on default settings.\n\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\n---\n\nNotes:\n    For Linuxapp, this is 99% code move (aside from slight changes due to\n    code deduplication between Linuxapp EAL and old common memory code),\n    while for FreeBSD it's mostly code move but with changes due to\n    dropping 32-bit code and implementing FreeBSD-specific limits on\n    memory preallocation outlined in the commit.\n\n lib/librte_eal/bsdapp/eal/eal_memory.c    | 215 ++++++++++++\n lib/librte_eal/common/eal_common_memory.c | 386 +---------------------\n lib/librte_eal/common/eal_private.h       |  12 +\n lib/librte_eal/linuxapp/eal/eal_memory.c  | 341 +++++++++++++++++++\n 4 files changed, 569 insertions(+), 385 deletions(-)",
    "diff": "diff --git a/lib/librte_eal/bsdapp/eal/eal_memory.c b/lib/librte_eal/bsdapp/eal/eal_memory.c\nindex 21a390fac..3dc427bd8 100644\n--- a/lib/librte_eal/bsdapp/eal/eal_memory.c\n+++ b/lib/librte_eal/bsdapp/eal/eal_memory.c\n@@ -12,6 +12,7 @@\n \n #include <rte_eal.h>\n #include <rte_eal_memconfig.h>\n+#include <rte_errno.h>\n #include <rte_log.h>\n #include <rte_string_fns.h>\n #include \"eal_private.h\"\n@@ -300,3 +301,217 @@ rte_eal_using_phys_addrs(void)\n {\n \treturn 0;\n }\n+\n+static uint64_t\n+get_mem_amount(uint64_t page_sz, uint64_t max_mem)\n+{\n+\tuint64_t area_sz, max_pages;\n+\n+\t/* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */\n+\tmax_pages = RTE_MAX_MEMSEG_PER_LIST;\n+\tmax_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);\n+\n+\tarea_sz = RTE_MIN(page_sz * max_pages, max_mem);\n+\n+\t/* make sure the list isn't smaller than the page size */\n+\tarea_sz = RTE_MAX(area_sz, page_sz);\n+\n+\treturn RTE_ALIGN(area_sz, page_sz);\n+}\n+\n+#define MEMSEG_LIST_FMT \"memseg-%\" PRIu64 \"k-%i-%i\"\n+static int\n+alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,\n+\t\tint n_segs, int socket_id, int type_msl_idx)\n+{\n+\tchar name[RTE_FBARRAY_NAME_LEN];\n+\n+\tsnprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,\n+\t\t type_msl_idx);\n+\tif (rte_fbarray_init(&msl->memseg_arr, name, n_segs,\n+\t\t\tsizeof(struct rte_memseg))) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot allocate memseg list: %s\\n\",\n+\t\t\trte_strerror(rte_errno));\n+\t\treturn -1;\n+\t}\n+\n+\tmsl->page_sz = page_sz;\n+\tmsl->socket_id = socket_id;\n+\tmsl->base_va = NULL;\n+\n+\tRTE_LOG(DEBUG, EAL, \"Memseg list allocated: 0x%zxkB at socket %i\\n\",\n+\t\t\t(size_t)page_sz >> 10, socket_id);\n+\n+\treturn 0;\n+}\n+\n+static int\n+alloc_va_space(struct rte_memseg_list *msl)\n+{\n+\tuint64_t page_sz;\n+\tsize_t mem_sz;\n+\tvoid *addr;\n+\tint flags = 0;\n+\n+#ifdef RTE_ARCH_PPC_64\n+\tflags |= MAP_HUGETLB;\n+#endif\n+\n+\tpage_sz = msl->page_sz;\n+\tmem_sz = page_sz * msl->memseg_arr.len;\n+\n+\taddr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);\n+\tif (addr == NULL) {\n+\t\tif (rte_errno == EADDRNOTAVAIL)\n+\t\t\tRTE_LOG(ERR, EAL, \"Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\\n\",\n+\t\t\t\t(unsigned long long)mem_sz, msl->base_va);\n+\t\telse\n+\t\t\tRTE_LOG(ERR, EAL, \"Cannot reserve memory\\n\");\n+\t\treturn -1;\n+\t}\n+\tmsl->base_va = addr;\n+\n+\treturn 0;\n+}\n+\n+\n+static int\n+memseg_primary_init(void)\n+{\n+\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n+\tint hpi_idx, msl_idx = 0;\n+\tstruct rte_memseg_list *msl;\n+\tuint64_t max_mem, total_mem;\n+\n+\t/* no-huge does not need this at all */\n+\tif (internal_config.no_hugetlbfs)\n+\t\treturn 0;\n+\n+\t/* FreeBSD has an issue where core dump will dump the entire memory\n+\t * contents, including anonymous zero-page memory. Therefore, while we\n+\t * will be limiting total amount of memory to RTE_MAX_MEM_MB, we will\n+\t * also be further limiting total memory amount to whatever memory is\n+\t * available to us through contigmem driver (plus spacing blocks).\n+\t *\n+\t * so, at each stage, we will be checking how much memory we are\n+\t * preallocating, and adjust all the values accordingly.\n+\t */\n+\n+\tmax_mem = (uint64_t)RTE_MAX_MEM_MB << 20;\n+\ttotal_mem = 0;\n+\n+\t/* create memseg lists */\n+\tfor (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;\n+\t\t\thpi_idx++) {\n+\t\tuint64_t max_type_mem, total_type_mem = 0;\n+\t\tuint64_t avail_mem;\n+\t\tint type_msl_idx, max_segs, avail_segs, total_segs = 0;\n+\t\tstruct hugepage_info *hpi;\n+\t\tuint64_t hugepage_sz;\n+\n+\t\thpi = &internal_config.hugepage_info[hpi_idx];\n+\t\thugepage_sz = hpi->hugepage_sz;\n+\n+\t\t/* no NUMA support on FreeBSD */\n+\n+\t\t/* check if we've already exceeded total memory amount */\n+\t\tif (total_mem >= max_mem)\n+\t\t\tbreak;\n+\n+\t\t/* first, calculate theoretical limits according to config */\n+\t\tmax_type_mem = RTE_MIN(max_mem - total_mem,\n+\t\t\t(uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);\n+\t\tmax_segs = RTE_MAX_MEMSEG_PER_TYPE;\n+\n+\t\t/* now, limit all of that to whatever will actually be\n+\t\t * available to us, because without dynamic allocation support,\n+\t\t * all of that extra memory will be sitting there being useless\n+\t\t * and slowing down core dumps in case of a crash.\n+\t\t *\n+\t\t * we need (N*2)-1 segments because we cannot guarantee that\n+\t\t * each segment will be IOVA-contiguous with the previous one,\n+\t\t * so we will allocate more and put spaces inbetween segments\n+\t\t * that are non-contiguous.\n+\t\t */\n+\t\tavail_segs = (hpi->num_pages[0] * 2) - 1;\n+\t\tavail_mem = avail_segs * hugepage_sz;\n+\n+\t\tmax_type_mem = RTE_MIN(avail_mem, max_type_mem);\n+\t\tmax_segs = RTE_MIN(avail_segs, max_segs);\n+\n+\t\ttype_msl_idx = 0;\n+\t\twhile (total_type_mem < max_type_mem &&\n+\t\t\t\ttotal_segs < max_segs) {\n+\t\t\tuint64_t cur_max_mem, cur_mem;\n+\t\t\tunsigned int n_segs;\n+\n+\t\t\tif (msl_idx >= RTE_MAX_MEMSEG_LISTS) {\n+\t\t\t\tRTE_LOG(ERR, EAL,\n+\t\t\t\t\t\"No more space in memseg lists, please increase %s\\n\",\n+\t\t\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\n+\t\t\tmsl = &mcfg->memsegs[msl_idx++];\n+\n+\t\t\tcur_max_mem = max_type_mem - total_type_mem;\n+\n+\t\t\tcur_mem = get_mem_amount(hugepage_sz,\n+\t\t\t\t\tcur_max_mem);\n+\t\t\tn_segs = cur_mem / hugepage_sz;\n+\n+\t\t\tif (alloc_memseg_list(msl, hugepage_sz, n_segs,\n+\t\t\t\t\t0, type_msl_idx))\n+\t\t\t\treturn -1;\n+\n+\t\t\ttotal_segs += msl->memseg_arr.len;\n+\t\t\ttotal_type_mem = total_segs * hugepage_sz;\n+\t\t\ttype_msl_idx++;\n+\n+\t\t\tif (alloc_va_space(msl)) {\n+\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space for memseg list\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t\ttotal_mem += total_type_mem;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+memseg_secondary_init(void)\n+{\n+\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n+\tint msl_idx = 0;\n+\tstruct rte_memseg_list *msl;\n+\n+\tfor (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {\n+\n+\t\tmsl = &mcfg->memsegs[msl_idx];\n+\n+\t\t/* skip empty memseg lists */\n+\t\tif (msl->memseg_arr.len == 0)\n+\t\t\tcontinue;\n+\n+\t\tif (rte_fbarray_attach(&msl->memseg_arr)) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Cannot attach to primary process memseg lists\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* preallocate VA space */\n+\t\tif (alloc_va_space(msl)) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Cannot preallocate VA space for hugepage memory\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_eal_memseg_init(void)\n+{\n+\treturn rte_eal_process_type() == RTE_PROC_PRIMARY ?\n+\t\t\tmemseg_primary_init() :\n+\t\t\tmemseg_secondary_init();\n+}\ndiff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c\nindex 4f0688f9d..4b7389ed4 100644\n--- a/lib/librte_eal/common/eal_common_memory.c\n+++ b/lib/librte_eal/common/eal_common_memory.c\n@@ -153,382 +153,6 @@ eal_get_virtual_area(void *requested_addr, size_t *size,\n \treturn aligned_addr;\n }\n \n-static uint64_t\n-get_mem_amount(uint64_t page_sz, uint64_t max_mem)\n-{\n-\tuint64_t area_sz, max_pages;\n-\n-\t/* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */\n-\tmax_pages = RTE_MAX_MEMSEG_PER_LIST;\n-\tmax_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);\n-\n-\tarea_sz = RTE_MIN(page_sz * max_pages, max_mem);\n-\n-\t/* make sure the list isn't smaller than the page size */\n-\tarea_sz = RTE_MAX(area_sz, page_sz);\n-\n-\treturn RTE_ALIGN(area_sz, page_sz);\n-}\n-\n-static int\n-free_memseg_list(struct rte_memseg_list *msl)\n-{\n-\tif (rte_fbarray_destroy(&msl->memseg_arr)) {\n-\t\tRTE_LOG(ERR, EAL, \"Cannot destroy memseg list\\n\");\n-\t\treturn -1;\n-\t}\n-\tmemset(msl, 0, sizeof(*msl));\n-\treturn 0;\n-}\n-\n-static int\n-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,\n-\t\tuint64_t max_mem, int socket_id, int type_msl_idx)\n-{\n-\tchar name[RTE_FBARRAY_NAME_LEN];\n-\tuint64_t mem_amount;\n-\tint max_segs;\n-\n-\tmem_amount = get_mem_amount(page_sz, max_mem);\n-\tmax_segs = mem_amount / page_sz;\n-\n-\tsnprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,\n-\t\t type_msl_idx);\n-\tif (rte_fbarray_init(&msl->memseg_arr, name, max_segs,\n-\t\t\tsizeof(struct rte_memseg))) {\n-\t\tRTE_LOG(ERR, EAL, \"Cannot allocate memseg list: %s\\n\",\n-\t\t\trte_strerror(rte_errno));\n-\t\treturn -1;\n-\t}\n-\n-\tmsl->page_sz = page_sz;\n-\tmsl->socket_id = socket_id;\n-\tmsl->base_va = NULL;\n-\n-\tRTE_LOG(DEBUG, EAL, \"Memseg list allocated: 0x%zxkB at socket %i\\n\",\n-\t\t\t(size_t)page_sz >> 10, socket_id);\n-\n-\treturn 0;\n-}\n-\n-static int\n-alloc_va_space(struct rte_memseg_list *msl)\n-{\n-\tuint64_t page_sz;\n-\tsize_t mem_sz;\n-\tvoid *addr;\n-\tint flags = 0;\n-\n-#ifdef RTE_ARCH_PPC_64\n-\tflags |= MAP_HUGETLB;\n-#endif\n-\n-\tpage_sz = msl->page_sz;\n-\tmem_sz = page_sz * msl->memseg_arr.len;\n-\n-\taddr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);\n-\tif (addr == NULL) {\n-\t\tif (rte_errno == EADDRNOTAVAIL)\n-\t\t\tRTE_LOG(ERR, EAL, \"Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\\n\",\n-\t\t\t\t(unsigned long long)mem_sz, msl->base_va);\n-\t\telse\n-\t\t\tRTE_LOG(ERR, EAL, \"Cannot reserve memory\\n\");\n-\t\treturn -1;\n-\t}\n-\tmsl->base_va = addr;\n-\n-\treturn 0;\n-}\n-\n-static int __rte_unused\n-memseg_primary_init_32(void)\n-{\n-\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n-\tint active_sockets, hpi_idx, msl_idx = 0;\n-\tunsigned int socket_id, i;\n-\tstruct rte_memseg_list *msl;\n-\tuint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;\n-\tuint64_t max_mem;\n-\n-\t/* no-huge does not need this at all */\n-\tif (internal_config.no_hugetlbfs)\n-\t\treturn 0;\n-\n-\t/* this is a giant hack, but desperate times call for desperate\n-\t * measures. in legacy 32-bit mode, we cannot preallocate VA space,\n-\t * because having upwards of 2 gigabytes of VA space already mapped will\n-\t * interfere with our ability to map and sort hugepages.\n-\t *\n-\t * therefore, in legacy 32-bit mode, we will be initializing memseg\n-\t * lists much later - in eal_memory.c, right after we unmap all the\n-\t * unneeded pages. this will not affect secondary processes, as those\n-\t * should be able to mmap the space without (too many) problems.\n-\t */\n-\tif (internal_config.legacy_mem)\n-\t\treturn 0;\n-\n-\t/* 32-bit mode is a very special case. we cannot know in advance where\n-\t * the user will want to allocate their memory, so we have to do some\n-\t * heuristics.\n-\t */\n-\tactive_sockets = 0;\n-\ttotal_requested_mem = 0;\n-\tif (internal_config.force_sockets)\n-\t\tfor (i = 0; i < rte_socket_count(); i++) {\n-\t\t\tuint64_t mem;\n-\n-\t\t\tsocket_id = rte_socket_id_by_idx(i);\n-\t\t\tmem = internal_config.socket_mem[socket_id];\n-\n-\t\t\tif (mem == 0)\n-\t\t\t\tcontinue;\n-\n-\t\t\tactive_sockets++;\n-\t\t\ttotal_requested_mem += mem;\n-\t\t}\n-\telse\n-\t\ttotal_requested_mem = internal_config.memory;\n-\n-\tmax_mem = (uint64_t)RTE_MAX_MEM_MB << 20;\n-\tif (total_requested_mem > max_mem) {\n-\t\tRTE_LOG(ERR, EAL, \"Invalid parameters: 32-bit process can at most use %uM of memory\\n\",\n-\t\t\t\t(unsigned int)(max_mem >> 20));\n-\t\treturn -1;\n-\t}\n-\ttotal_extra_mem = max_mem - total_requested_mem;\n-\textra_mem_per_socket = active_sockets == 0 ? total_extra_mem :\n-\t\t\ttotal_extra_mem / active_sockets;\n-\n-\t/* the allocation logic is a little bit convoluted, but here's how it\n-\t * works, in a nutshell:\n-\t *  - if user hasn't specified on which sockets to allocate memory via\n-\t *    --socket-mem, we allocate all of our memory on master core socket.\n-\t *  - if user has specified sockets to allocate memory on, there may be\n-\t *    some \"unused\" memory left (e.g. if user has specified --socket-mem\n-\t *    such that not all memory adds up to 2 gigabytes), so add it to all\n-\t *    sockets that are in use equally.\n-\t *\n-\t * page sizes are sorted by size in descending order, so we can safely\n-\t * assume that we dispense with bigger page sizes first.\n-\t */\n-\n-\t/* create memseg lists */\n-\tfor (i = 0; i < rte_socket_count(); i++) {\n-\t\tint hp_sizes = (int) internal_config.num_hugepage_sizes;\n-\t\tuint64_t max_socket_mem, cur_socket_mem;\n-\t\tunsigned int master_lcore_socket;\n-\t\tstruct rte_config *cfg = rte_eal_get_configuration();\n-\t\tbool skip;\n-\n-\t\tsocket_id = rte_socket_id_by_idx(i);\n-\n-#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES\n-\t\tif (socket_id > 0)\n-\t\t\tbreak;\n-#endif\n-\n-\t\t/* if we didn't specifically request memory on this socket */\n-\t\tskip = active_sockets != 0 &&\n-\t\t\t\tinternal_config.socket_mem[socket_id] == 0;\n-\t\t/* ...or if we didn't specifically request memory on *any*\n-\t\t * socket, and this is not master lcore\n-\t\t */\n-\t\tmaster_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);\n-\t\tskip |= active_sockets == 0 && socket_id != master_lcore_socket;\n-\n-\t\tif (skip) {\n-\t\t\tRTE_LOG(DEBUG, EAL, \"Will not preallocate memory on socket %u\\n\",\n-\t\t\t\t\tsocket_id);\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\t/* max amount of memory on this socket */\n-\t\tmax_socket_mem = (active_sockets != 0 ?\n-\t\t\t\t\tinternal_config.socket_mem[socket_id] :\n-\t\t\t\t\tinternal_config.memory) +\n-\t\t\t\t\textra_mem_per_socket;\n-\t\tcur_socket_mem = 0;\n-\n-\t\tfor (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {\n-\t\t\tuint64_t max_pagesz_mem, cur_pagesz_mem = 0;\n-\t\t\tuint64_t hugepage_sz;\n-\t\t\tstruct hugepage_info *hpi;\n-\t\t\tint type_msl_idx, max_segs, total_segs = 0;\n-\n-\t\t\thpi = &internal_config.hugepage_info[hpi_idx];\n-\t\t\thugepage_sz = hpi->hugepage_sz;\n-\n-\t\t\t/* check if pages are actually available */\n-\t\t\tif (hpi->num_pages[socket_id] == 0)\n-\t\t\t\tcontinue;\n-\n-\t\t\tmax_segs = RTE_MAX_MEMSEG_PER_TYPE;\n-\t\t\tmax_pagesz_mem = max_socket_mem - cur_socket_mem;\n-\n-\t\t\t/* make it multiple of page size */\n-\t\t\tmax_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,\n-\t\t\t\t\thugepage_sz);\n-\n-\t\t\tRTE_LOG(DEBUG, EAL, \"Attempting to preallocate \"\n-\t\t\t\t\t\"%\" PRIu64 \"M on socket %i\\n\",\n-\t\t\t\t\tmax_pagesz_mem >> 20, socket_id);\n-\n-\t\t\ttype_msl_idx = 0;\n-\t\t\twhile (cur_pagesz_mem < max_pagesz_mem &&\n-\t\t\t\t\ttotal_segs < max_segs) {\n-\t\t\t\tif (msl_idx >= RTE_MAX_MEMSEG_LISTS) {\n-\t\t\t\t\tRTE_LOG(ERR, EAL,\n-\t\t\t\t\t\t\"No more space in memseg lists, please increase %s\\n\",\n-\t\t\t\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\n-\t\t\t\tmsl = &mcfg->memsegs[msl_idx];\n-\n-\t\t\t\tif (alloc_memseg_list(msl, hugepage_sz,\n-\t\t\t\t\t\tmax_pagesz_mem, socket_id,\n-\t\t\t\t\t\ttype_msl_idx)) {\n-\t\t\t\t\t/* failing to allocate a memseg list is\n-\t\t\t\t\t * a serious error.\n-\t\t\t\t\t */\n-\t\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate memseg list\\n\");\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\n-\t\t\t\tif (alloc_va_space(msl)) {\n-\t\t\t\t\t/* if we couldn't allocate VA space, we\n-\t\t\t\t\t * can try with smaller page sizes.\n-\t\t\t\t\t */\n-\t\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space for memseg list, retrying with different page size\\n\");\n-\t\t\t\t\t/* deallocate memseg list */\n-\t\t\t\t\tif (free_memseg_list(msl))\n-\t\t\t\t\t\treturn -1;\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\n-\t\t\t\ttotal_segs += msl->memseg_arr.len;\n-\t\t\t\tcur_pagesz_mem = total_segs * hugepage_sz;\n-\t\t\t\ttype_msl_idx++;\n-\t\t\t\tmsl_idx++;\n-\t\t\t}\n-\t\t\tcur_socket_mem += cur_pagesz_mem;\n-\t\t}\n-\t\tif (cur_socket_mem == 0) {\n-\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space on socket %u\\n\",\n-\t\t\t\tsocket_id);\n-\t\t\treturn -1;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int __rte_unused\n-memseg_primary_init(void)\n-{\n-\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n-\tint i, socket_id, hpi_idx, msl_idx = 0;\n-\tstruct rte_memseg_list *msl;\n-\tuint64_t max_mem, total_mem;\n-\n-\t/* no-huge does not need this at all */\n-\tif (internal_config.no_hugetlbfs)\n-\t\treturn 0;\n-\n-\tmax_mem = (uint64_t)RTE_MAX_MEM_MB << 20;\n-\ttotal_mem = 0;\n-\n-\t/* create memseg lists */\n-\tfor (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;\n-\t\t\thpi_idx++) {\n-\t\tstruct hugepage_info *hpi;\n-\t\tuint64_t hugepage_sz;\n-\n-\t\thpi = &internal_config.hugepage_info[hpi_idx];\n-\t\thugepage_sz = hpi->hugepage_sz;\n-\n-\t\tfor (i = 0; i < (int) rte_socket_count(); i++) {\n-\t\t\tuint64_t max_type_mem, total_type_mem = 0;\n-\t\t\tint type_msl_idx, max_segs, total_segs = 0;\n-\n-\t\t\tsocket_id = rte_socket_id_by_idx(i);\n-\n-#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES\n-\t\t\tif (socket_id > 0)\n-\t\t\t\tbreak;\n-#endif\n-\n-\t\t\tif (total_mem >= max_mem)\n-\t\t\t\tbreak;\n-\n-\t\t\tmax_type_mem = RTE_MIN(max_mem - total_mem,\n-\t\t\t\t(uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);\n-\t\t\tmax_segs = RTE_MAX_MEMSEG_PER_TYPE;\n-\n-\t\t\ttype_msl_idx = 0;\n-\t\t\twhile (total_type_mem < max_type_mem &&\n-\t\t\t\t\ttotal_segs < max_segs) {\n-\t\t\t\tuint64_t cur_max_mem;\n-\t\t\t\tif (msl_idx >= RTE_MAX_MEMSEG_LISTS) {\n-\t\t\t\t\tRTE_LOG(ERR, EAL,\n-\t\t\t\t\t\t\"No more space in memseg lists, please increase %s\\n\",\n-\t\t\t\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\n-\t\t\t\tmsl = &mcfg->memsegs[msl_idx++];\n-\n-\t\t\t\tcur_max_mem = max_type_mem - total_type_mem;\n-\t\t\t\tif (alloc_memseg_list(msl, hugepage_sz,\n-\t\t\t\t\t\tcur_max_mem, socket_id,\n-\t\t\t\t\t\ttype_msl_idx))\n-\t\t\t\t\treturn -1;\n-\n-\t\t\t\ttotal_segs += msl->memseg_arr.len;\n-\t\t\t\ttotal_type_mem = total_segs * hugepage_sz;\n-\t\t\t\ttype_msl_idx++;\n-\n-\t\t\t\tif (alloc_va_space(msl)) {\n-\t\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space for memseg list\\n\");\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\ttotal_mem += total_type_mem;\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-\n-static int\n-memseg_secondary_init(void)\n-{\n-\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n-\tint msl_idx = 0;\n-\tstruct rte_memseg_list *msl;\n-\n-\tfor (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {\n-\n-\t\tmsl = &mcfg->memsegs[msl_idx];\n-\n-\t\t/* skip empty memseg lists */\n-\t\tif (msl->memseg_arr.len == 0)\n-\t\t\tcontinue;\n-\n-\t\tif (rte_fbarray_attach(&msl->memseg_arr)) {\n-\t\t\tRTE_LOG(ERR, EAL, \"Cannot attach to primary process memseg lists\\n\");\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\t/* preallocate VA space */\n-\t\tif (alloc_va_space(msl)) {\n-\t\t\tRTE_LOG(ERR, EAL, \"Cannot preallocate VA space for hugepage memory\\n\");\n-\t\t\treturn -1;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n static struct rte_memseg *\n virt2memseg(const void *addr, const struct rte_memseg_list *msl)\n {\n@@ -918,15 +542,7 @@ rte_eal_memory_init(void)\n \t/* lock mem hotplug here, to prevent races while we init */\n \trte_rwlock_read_lock(&mcfg->memory_hotplug_lock);\n \n-\tretval = rte_eal_process_type() == RTE_PROC_PRIMARY ?\n-#ifndef RTE_ARCH_64\n-\t\t\tmemseg_primary_init_32() :\n-#else\n-\t\t\tmemseg_primary_init() :\n-#endif\n-\t\t\tmemseg_secondary_init();\n-\n-\tif (retval < 0)\n+\tif (rte_eal_memseg_init() < 0)\n \t\tgoto fail;\n \n \tif (eal_memalloc_init() < 0)\ndiff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h\nindex bdadc4d50..b742f4c58 100644\n--- a/lib/librte_eal/common/eal_private.h\n+++ b/lib/librte_eal/common/eal_private.h\n@@ -46,6 +46,18 @@ void eal_log_set_default(FILE *default_log);\n  */\n int rte_eal_cpu_init(void);\n \n+/**\n+ * Create memseg lists\n+ *\n+ * This function is private to EAL.\n+ *\n+ * Preallocate virtual memory.\n+ *\n+ * @return\n+ *   0 on success, negative on error\n+ */\n+int rte_eal_memseg_init(void);\n+\n /**\n  * Map memory\n  *\ndiff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c\nindex c917de1c2..b8c8a59e0 100644\n--- a/lib/librte_eal/linuxapp/eal/eal_memory.c\n+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c\n@@ -767,6 +767,34 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)\n \treturn 0;\n }\n \n+static uint64_t\n+get_mem_amount(uint64_t page_sz, uint64_t max_mem)\n+{\n+\tuint64_t area_sz, max_pages;\n+\n+\t/* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */\n+\tmax_pages = RTE_MAX_MEMSEG_PER_LIST;\n+\tmax_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);\n+\n+\tarea_sz = RTE_MIN(page_sz * max_pages, max_mem);\n+\n+\t/* make sure the list isn't smaller than the page size */\n+\tarea_sz = RTE_MAX(area_sz, page_sz);\n+\n+\treturn RTE_ALIGN(area_sz, page_sz);\n+}\n+\n+static int\n+free_memseg_list(struct rte_memseg_list *msl)\n+{\n+\tif (rte_fbarray_destroy(&msl->memseg_arr)) {\n+\t\tRTE_LOG(ERR, EAL, \"Cannot destroy memseg list\\n\");\n+\t\treturn -1;\n+\t}\n+\tmemset(msl, 0, sizeof(*msl));\n+\treturn 0;\n+}\n+\n #define MEMSEG_LIST_FMT \"memseg-%\" PRIu64 \"k-%i-%i\"\n static int\n alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,\n@@ -1840,3 +1868,316 @@ rte_eal_using_phys_addrs(void)\n {\n \treturn phys_addrs_available;\n }\n+\n+static int __rte_unused\n+memseg_primary_init_32(void)\n+{\n+\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n+\tint active_sockets, hpi_idx, msl_idx = 0;\n+\tunsigned int socket_id, i;\n+\tstruct rte_memseg_list *msl;\n+\tuint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;\n+\tuint64_t max_mem;\n+\n+\t/* no-huge does not need this at all */\n+\tif (internal_config.no_hugetlbfs)\n+\t\treturn 0;\n+\n+\t/* this is a giant hack, but desperate times call for desperate\n+\t * measures. in legacy 32-bit mode, we cannot preallocate VA space,\n+\t * because having upwards of 2 gigabytes of VA space already mapped will\n+\t * interfere with our ability to map and sort hugepages.\n+\t *\n+\t * therefore, in legacy 32-bit mode, we will be initializing memseg\n+\t * lists much later - in eal_memory.c, right after we unmap all the\n+\t * unneeded pages. this will not affect secondary processes, as those\n+\t * should be able to mmap the space without (too many) problems.\n+\t */\n+\tif (internal_config.legacy_mem)\n+\t\treturn 0;\n+\n+\t/* 32-bit mode is a very special case. we cannot know in advance where\n+\t * the user will want to allocate their memory, so we have to do some\n+\t * heuristics.\n+\t */\n+\tactive_sockets = 0;\n+\ttotal_requested_mem = 0;\n+\tif (internal_config.force_sockets)\n+\t\tfor (i = 0; i < rte_socket_count(); i++) {\n+\t\t\tuint64_t mem;\n+\n+\t\t\tsocket_id = rte_socket_id_by_idx(i);\n+\t\t\tmem = internal_config.socket_mem[socket_id];\n+\n+\t\t\tif (mem == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\tactive_sockets++;\n+\t\t\ttotal_requested_mem += mem;\n+\t\t}\n+\telse\n+\t\ttotal_requested_mem = internal_config.memory;\n+\n+\tmax_mem = (uint64_t)RTE_MAX_MEM_MB << 20;\n+\tif (total_requested_mem > max_mem) {\n+\t\tRTE_LOG(ERR, EAL, \"Invalid parameters: 32-bit process can at most use %uM of memory\\n\",\n+\t\t\t\t(unsigned int)(max_mem >> 20));\n+\t\treturn -1;\n+\t}\n+\ttotal_extra_mem = max_mem - total_requested_mem;\n+\textra_mem_per_socket = active_sockets == 0 ? total_extra_mem :\n+\t\t\ttotal_extra_mem / active_sockets;\n+\n+\t/* the allocation logic is a little bit convoluted, but here's how it\n+\t * works, in a nutshell:\n+\t *  - if user hasn't specified on which sockets to allocate memory via\n+\t *    --socket-mem, we allocate all of our memory on master core socket.\n+\t *  - if user has specified sockets to allocate memory on, there may be\n+\t *    some \"unused\" memory left (e.g. if user has specified --socket-mem\n+\t *    such that not all memory adds up to 2 gigabytes), so add it to all\n+\t *    sockets that are in use equally.\n+\t *\n+\t * page sizes are sorted by size in descending order, so we can safely\n+\t * assume that we dispense with bigger page sizes first.\n+\t */\n+\n+\t/* create memseg lists */\n+\tfor (i = 0; i < rte_socket_count(); i++) {\n+\t\tint hp_sizes = (int) internal_config.num_hugepage_sizes;\n+\t\tuint64_t max_socket_mem, cur_socket_mem;\n+\t\tunsigned int master_lcore_socket;\n+\t\tstruct rte_config *cfg = rte_eal_get_configuration();\n+\t\tbool skip;\n+\n+\t\tsocket_id = rte_socket_id_by_idx(i);\n+\n+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES\n+\t\tif (socket_id > 0)\n+\t\t\tbreak;\n+#endif\n+\n+\t\t/* if we didn't specifically request memory on this socket */\n+\t\tskip = active_sockets != 0 &&\n+\t\t\t\tinternal_config.socket_mem[socket_id] == 0;\n+\t\t/* ...or if we didn't specifically request memory on *any*\n+\t\t * socket, and this is not master lcore\n+\t\t */\n+\t\tmaster_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);\n+\t\tskip |= active_sockets == 0 && socket_id != master_lcore_socket;\n+\n+\t\tif (skip) {\n+\t\t\tRTE_LOG(DEBUG, EAL, \"Will not preallocate memory on socket %u\\n\",\n+\t\t\t\t\tsocket_id);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* max amount of memory on this socket */\n+\t\tmax_socket_mem = (active_sockets != 0 ?\n+\t\t\t\t\tinternal_config.socket_mem[socket_id] :\n+\t\t\t\t\tinternal_config.memory) +\n+\t\t\t\t\textra_mem_per_socket;\n+\t\tcur_socket_mem = 0;\n+\n+\t\tfor (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {\n+\t\t\tuint64_t max_pagesz_mem, cur_pagesz_mem = 0;\n+\t\t\tuint64_t hugepage_sz;\n+\t\t\tstruct hugepage_info *hpi;\n+\t\t\tint type_msl_idx, max_segs, total_segs = 0;\n+\n+\t\t\thpi = &internal_config.hugepage_info[hpi_idx];\n+\t\t\thugepage_sz = hpi->hugepage_sz;\n+\n+\t\t\t/* check if pages are actually available */\n+\t\t\tif (hpi->num_pages[socket_id] == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\tmax_segs = RTE_MAX_MEMSEG_PER_TYPE;\n+\t\t\tmax_pagesz_mem = max_socket_mem - cur_socket_mem;\n+\n+\t\t\t/* make it multiple of page size */\n+\t\t\tmax_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,\n+\t\t\t\t\thugepage_sz);\n+\n+\t\t\tRTE_LOG(DEBUG, EAL, \"Attempting to preallocate \"\n+\t\t\t\t\t\"%\" PRIu64 \"M on socket %i\\n\",\n+\t\t\t\t\tmax_pagesz_mem >> 20, socket_id);\n+\n+\t\t\ttype_msl_idx = 0;\n+\t\t\twhile (cur_pagesz_mem < max_pagesz_mem &&\n+\t\t\t\t\ttotal_segs < max_segs) {\n+\t\t\t\tuint64_t cur_mem;\n+\t\t\t\tunsigned int n_segs;\n+\n+\t\t\t\tif (msl_idx >= RTE_MAX_MEMSEG_LISTS) {\n+\t\t\t\t\tRTE_LOG(ERR, EAL,\n+\t\t\t\t\t\t\"No more space in memseg lists, please increase %s\\n\",\n+\t\t\t\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\n+\t\t\t\tmsl = &mcfg->memsegs[msl_idx];\n+\n+\t\t\t\tcur_mem = get_mem_amount(hugepage_sz,\n+\t\t\t\t\t\tmax_pagesz_mem);\n+\t\t\t\tn_segs = cur_mem / hugepage_sz;\n+\n+\t\t\t\tif (alloc_memseg_list(msl, hugepage_sz, n_segs,\n+\t\t\t\t\t\tsocket_id, type_msl_idx)) {\n+\t\t\t\t\t/* failing to allocate a memseg list is\n+\t\t\t\t\t * a serious error.\n+\t\t\t\t\t */\n+\t\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate memseg list\\n\");\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\n+\t\t\t\tif (alloc_va_space(msl)) {\n+\t\t\t\t\t/* if we couldn't allocate VA space, we\n+\t\t\t\t\t * can try with smaller page sizes.\n+\t\t\t\t\t */\n+\t\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space for memseg list, retrying with different page size\\n\");\n+\t\t\t\t\t/* deallocate memseg list */\n+\t\t\t\t\tif (free_memseg_list(msl))\n+\t\t\t\t\t\treturn -1;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\ttotal_segs += msl->memseg_arr.len;\n+\t\t\t\tcur_pagesz_mem = total_segs * hugepage_sz;\n+\t\t\t\ttype_msl_idx++;\n+\t\t\t\tmsl_idx++;\n+\t\t\t}\n+\t\t\tcur_socket_mem += cur_pagesz_mem;\n+\t\t}\n+\t\tif (cur_socket_mem == 0) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space on socket %u\\n\",\n+\t\t\t\tsocket_id);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int __rte_unused\n+memseg_primary_init(void)\n+{\n+\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n+\tint i, socket_id, hpi_idx, msl_idx = 0;\n+\tstruct rte_memseg_list *msl;\n+\tuint64_t max_mem, total_mem;\n+\n+\t/* no-huge does not need this at all */\n+\tif (internal_config.no_hugetlbfs)\n+\t\treturn 0;\n+\n+\tmax_mem = (uint64_t)RTE_MAX_MEM_MB << 20;\n+\ttotal_mem = 0;\n+\n+\t/* create memseg lists */\n+\tfor (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;\n+\t\t\thpi_idx++) {\n+\t\tstruct hugepage_info *hpi;\n+\t\tuint64_t hugepage_sz;\n+\n+\t\thpi = &internal_config.hugepage_info[hpi_idx];\n+\t\thugepage_sz = hpi->hugepage_sz;\n+\n+\t\tfor (i = 0; i < (int) rte_socket_count(); i++) {\n+\t\t\tuint64_t max_type_mem, total_type_mem = 0;\n+\t\t\tint type_msl_idx, max_segs, total_segs = 0;\n+\n+\t\t\tsocket_id = rte_socket_id_by_idx(i);\n+\n+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES\n+\t\t\tif (socket_id > 0)\n+\t\t\t\tbreak;\n+#endif\n+\n+\t\t\tif (total_mem >= max_mem)\n+\t\t\t\tbreak;\n+\n+\t\t\tmax_type_mem = RTE_MIN(max_mem - total_mem,\n+\t\t\t\t(uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);\n+\t\t\tmax_segs = RTE_MAX_MEMSEG_PER_TYPE;\n+\n+\t\t\ttype_msl_idx = 0;\n+\t\t\twhile (total_type_mem < max_type_mem &&\n+\t\t\t\t\ttotal_segs < max_segs) {\n+\t\t\t\tuint64_t cur_max_mem, cur_mem;\n+\t\t\t\tunsigned int n_segs;\n+\n+\t\t\t\tif (msl_idx >= RTE_MAX_MEMSEG_LISTS) {\n+\t\t\t\t\tRTE_LOG(ERR, EAL,\n+\t\t\t\t\t\t\"No more space in memseg lists, please increase %s\\n\",\n+\t\t\t\t\t\tRTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\n+\t\t\t\tmsl = &mcfg->memsegs[msl_idx++];\n+\n+\t\t\t\tcur_max_mem = max_type_mem - total_type_mem;\n+\n+\t\t\t\tcur_mem = get_mem_amount(hugepage_sz,\n+\t\t\t\t\t\tcur_max_mem);\n+\t\t\t\tn_segs = cur_mem / hugepage_sz;\n+\n+\t\t\t\tif (alloc_memseg_list(msl, hugepage_sz, n_segs,\n+\t\t\t\t\t\tsocket_id, type_msl_idx))\n+\t\t\t\t\treturn -1;\n+\n+\t\t\t\ttotal_segs += msl->memseg_arr.len;\n+\t\t\t\ttotal_type_mem = total_segs * hugepage_sz;\n+\t\t\t\ttype_msl_idx++;\n+\n+\t\t\t\tif (alloc_va_space(msl)) {\n+\t\t\t\t\tRTE_LOG(ERR, EAL, \"Cannot allocate VA space for memseg list\\n\");\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\ttotal_mem += total_type_mem;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+memseg_secondary_init(void)\n+{\n+\tstruct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;\n+\tint msl_idx = 0;\n+\tstruct rte_memseg_list *msl;\n+\n+\tfor (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {\n+\n+\t\tmsl = &mcfg->memsegs[msl_idx];\n+\n+\t\t/* skip empty memseg lists */\n+\t\tif (msl->memseg_arr.len == 0)\n+\t\t\tcontinue;\n+\n+\t\tif (rte_fbarray_attach(&msl->memseg_arr)) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Cannot attach to primary process memseg lists\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* preallocate VA space */\n+\t\tif (alloc_va_space(msl)) {\n+\t\t\tRTE_LOG(ERR, EAL, \"Cannot preallocate VA space for hugepage memory\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_eal_memseg_init(void)\n+{\n+\treturn rte_eal_process_type() == RTE_PROC_PRIMARY ?\n+#ifndef RTE_ARCH_64\n+\t\t\tmemseg_primary_init_32() :\n+#else\n+\t\t\tmemseg_primary_init() :\n+#endif\n+\t\t\tmemseg_secondary_init();\n+}\n",
    "prefixes": [
        "3/3"
    ]
}