get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/56783/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 56783,
    "url": "http://patchwork.dpdk.org/api/patches/56783/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20190719133845.32432-3-olivier.matz@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190719133845.32432-3-olivier.matz@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190719133845.32432-3-olivier.matz@6wind.com",
    "date": "2019-07-19T13:38:43",
    "name": "[RFC,2/4] mempool: unalign size when calculating required mem amount",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d1e77018d825a3821d1fba7a7445174680f04171",
    "submitter": {
        "id": 8,
        "url": "http://patchwork.dpdk.org/api/people/8/?format=api",
        "name": "Olivier Matz",
        "email": "olivier.matz@6wind.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20190719133845.32432-3-olivier.matz@6wind.com/mbox/",
    "series": [
        {
            "id": 5624,
            "url": "http://patchwork.dpdk.org/api/series/5624/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=5624",
            "date": "2019-07-19T13:38:41",
            "name": "mempool: avoid objects allocations across pages",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/5624/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/56783/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/56783/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2C1861B94F;\n\tFri, 19 Jul 2019 15:39:08 +0200 (CEST)",
            "from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com\n\t[62.23.145.76]) by dpdk.org (Postfix) with ESMTP id C9766325F\n\tfor <dev@dpdk.org>; Fri, 19 Jul 2019 15:38:59 +0200 (CEST)",
            "from glumotte.dev.6wind.com. (unknown [10.16.0.195])\n\tby proxy.6wind.com (Postfix) with ESMTP id ACC9A2ED326;\n\tFri, 19 Jul 2019 15:38:59 +0200 (CEST)"
        ],
        "From": "Olivier Matz <olivier.matz@6wind.com>",
        "To": "Vamsi Krishna Attunuru <vattunuru@marvell.com>,\n\tdev@dpdk.org",
        "Cc": "Andrew Rybchenko <arybchenko@solarflare.com>,\n\tThomas Monjalon <thomas@monjalon.net>,\n\tAnatoly Burakov <anatoly.burakov@intel.com>,\n\tJerin Jacob Kollanukkaran <jerinj@marvell.com>,\n\tKokkilagadda <kirankumark@marvell.com>,\n\tFerruh Yigit <ferruh.yigit@intel.com>",
        "Date": "Fri, 19 Jul 2019 15:38:43 +0200",
        "Message-Id": "<20190719133845.32432-3-olivier.matz@6wind.com>",
        "X-Mailer": "git-send-email 2.11.0",
        "In-Reply-To": "<20190719133845.32432-1-olivier.matz@6wind.com>",
        "References": "<CH2PR18MB338160CD8EF16EEB45EED387A6C80@CH2PR18MB3381.namprd18.prod.outlook.com>\n\t<20190719133845.32432-1-olivier.matz@6wind.com>",
        "Subject": "[dpdk-dev] [RFC 2/4] mempool: unalign size when calculating\n\trequired mem amount",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The size returned by rte_mempool_op_calc_mem_size_default() is aligned\nto the specified page size. This means that with big pages, the returned\namount is more that what we really need to populate the mempool.\n\nThis problem is tempered by the allocation method of\nrte_mempool_populate_default(): in some conditions (when\ntry_iova_contig_mempool=true), it first tries to allocate all objs\nmemory in an iova contiguous area, without the alignment constraint. If\nit fails, it fallbacks to the big aligned allocation, that can also\nfallback into several smaller allocations.\n\nThis commit changes rte_mempool_op_calc_mem_size_default() to return the\nunaligned amount of memory (the alignment constraint is still returned\nvia the *align argument), and removes the optimistic contiguous\nallocation done when try_iova_contig_mempool=true.\n\nThis will make the amount of allocated memory more predictible: it will\nbe more than the optimistic contiguous allocation, but less than the big\naligned allocation.\n\nThis opens the door for the next commits that will try to prevent objets\nfrom being located across pages.\n\nSigned-off-by: Olivier Matz <olivier.matz@6wind.com>\n---\n lib/librte_mempool/rte_mempool.c     | 44 ++++--------------------------------\n lib/librte_mempool/rte_mempool.h     |  2 +-\n lib/librte_mempool/rte_mempool_ops.c |  4 +++-\n 3 files changed, 9 insertions(+), 41 deletions(-)",
    "diff": "diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c\nindex 0f29e8712..335032dc8 100644\n--- a/lib/librte_mempool/rte_mempool.c\n+++ b/lib/librte_mempool/rte_mempool.c\n@@ -430,7 +430,6 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \tunsigned mz_id, n;\n \tint ret;\n \tbool need_iova_contig_obj;\n-\tbool try_iova_contig_mempool;\n \tbool alloc_in_ext_mem;\n \n \tret = mempool_ops_alloc_once(mp);\n@@ -477,18 +476,10 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t * wasting some space this way, but it's much nicer than looping around\n \t * trying to reserve each and every page size.\n \t *\n-\t * However, since size calculation will produce page-aligned sizes, it\n-\t * makes sense to first try and see if we can reserve the entire memzone\n-\t * in one contiguous chunk as well (otherwise we might end up wasting a\n-\t * 1G page on a 10MB memzone). If we fail to get enough contiguous\n-\t * memory, then we'll go and reserve space page-by-page.\n-\t *\n \t * We also have to take into account the fact that memory that we're\n \t * going to allocate from can belong to an externally allocated memory\n \t * area, in which case the assumption of IOVA as VA mode being\n-\t * synonymous with IOVA contiguousness will not hold. We should also try\n-\t * to go for contiguous memory even if we're in no-huge mode, because\n-\t * external memory may in fact be IOVA-contiguous.\n+\t * synonymous with IOVA contiguousness will not hold.\n \t */\n \n \t/* check if we can retrieve a valid socket ID */\n@@ -497,7 +488,6 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t\treturn -EINVAL;\n \talloc_in_ext_mem = (ret == 1);\n \tneed_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);\n-\ttry_iova_contig_mempool = false;\n \n \tif (!need_iova_contig_obj) {\n \t\tpg_sz = 0;\n@@ -506,7 +496,6 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t\tpg_sz = 0;\n \t\tpg_shift = 0;\n \t} else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {\n-\t\ttry_iova_contig_mempool = true;\n \t\tpg_sz = get_min_page_size(mp->socket_id);\n \t\tpg_shift = rte_bsf32(pg_sz);\n \t} else {\n@@ -518,12 +507,8 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t\tsize_t min_chunk_size;\n \t\tunsigned int flags;\n \n-\t\tif (try_iova_contig_mempool || pg_sz == 0)\n-\t\t\tmem_size = rte_mempool_ops_calc_mem_size(mp, n,\n-\t\t\t\t\t0, &min_chunk_size, &align);\n-\t\telse\n-\t\t\tmem_size = rte_mempool_ops_calc_mem_size(mp, n,\n-\t\t\t\t\tpg_shift, &min_chunk_size, &align);\n+\t\tmem_size = rte_mempool_ops_calc_mem_size(\n+\t\t\tmp, n, pg_shift, &min_chunk_size, &align);\n \n \t\tif (mem_size < 0) {\n \t\t\tret = mem_size;\n@@ -542,31 +527,12 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t\t/* if we're trying to reserve contiguous memory, add appropriate\n \t\t * memzone flag.\n \t\t */\n-\t\tif (try_iova_contig_mempool)\n+\t\tif (min_chunk_size == (size_t)mem_size)\n \t\t\tflags |= RTE_MEMZONE_IOVA_CONTIG;\n \n \t\tmz = rte_memzone_reserve_aligned(mz_name, mem_size,\n \t\t\t\tmp->socket_id, flags, align);\n \n-\t\t/* if we were trying to allocate contiguous memory, failed and\n-\t\t * minimum required contiguous chunk fits minimum page, adjust\n-\t\t * memzone size to the page size, and try again.\n-\t\t */\n-\t\tif (mz == NULL && try_iova_contig_mempool &&\n-\t\t\t\tmin_chunk_size <= pg_sz) {\n-\t\t\ttry_iova_contig_mempool = false;\n-\t\t\tflags &= ~RTE_MEMZONE_IOVA_CONTIG;\n-\n-\t\t\tmem_size = rte_mempool_ops_calc_mem_size(mp, n,\n-\t\t\t\t\tpg_shift, &min_chunk_size, &align);\n-\t\t\tif (mem_size < 0) {\n-\t\t\t\tret = mem_size;\n-\t\t\t\tgoto fail;\n-\t\t\t}\n-\n-\t\t\tmz = rte_memzone_reserve_aligned(mz_name, mem_size,\n-\t\t\t\tmp->socket_id, flags, align);\n-\t\t}\n \t\t/* don't try reserving with 0 size if we were asked to reserve\n \t\t * IOVA-contiguous memory.\n \t\t */\n@@ -594,7 +560,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t\telse\n \t\t\tiova = RTE_BAD_IOVA;\n \n-\t\tif (try_iova_contig_mempool || pg_sz == 0)\n+\t\tif (pg_sz == 0)\n \t\t\tret = rte_mempool_populate_iova(mp, mz->addr,\n \t\t\t\tiova, mz->len,\n \t\t\t\trte_mempool_memchunk_mz_free,\ndiff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h\nindex 8053f7a04..7bc10e699 100644\n--- a/lib/librte_mempool/rte_mempool.h\n+++ b/lib/librte_mempool/rte_mempool.h\n@@ -458,7 +458,7 @@ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);\n  * @param[out] align\n  *   Location for required memory chunk alignment.\n  * @return\n- *   Required memory size aligned at page boundary.\n+ *   Required memory size.\n  */\n typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,\n \t\tuint32_t obj_num,  uint32_t pg_shift,\ndiff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c\nindex e02eb702c..22c5251eb 100644\n--- a/lib/librte_mempool/rte_mempool_ops.c\n+++ b/lib/librte_mempool/rte_mempool_ops.c\n@@ -100,7 +100,9 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp)\n \treturn ops->get_count(mp);\n }\n \n-/* wrapper to notify new memory area to external mempool */\n+/* wrapper to calculate the memory size required to store given number\n+ * of objects\n+ */\n ssize_t\n rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,\n \t\t\t\tuint32_t obj_num, uint32_t pg_shift,\n",
    "prefixes": [
        "RFC",
        "2/4"
    ]
}