get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/62483/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 62483,
    "url": "http://patchwork.dpdk.org/api/patches/62483/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20191105153707.14645-7-olivier.matz@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191105153707.14645-7-olivier.matz@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191105153707.14645-7-olivier.matz@6wind.com",
    "date": "2019-11-05T15:37:05",
    "name": "[v4,6/7] mempool: prevent objects from being across pages",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "9d01ae5297b39bbf7fb6c4cb2e71a3935bd98985",
    "submitter": {
        "id": 8,
        "url": "http://patchwork.dpdk.org/api/people/8/?format=api",
        "name": "Olivier Matz",
        "email": "olivier.matz@6wind.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20191105153707.14645-7-olivier.matz@6wind.com/mbox/",
    "series": [
        {
            "id": 7258,
            "url": "http://patchwork.dpdk.org/api/series/7258/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=7258",
            "date": "2019-11-05T15:36:59",
            "name": "mempool: avoid objects allocations across pages",
            "version": 4,
            "mbox": "http://patchwork.dpdk.org/series/7258/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/62483/comments/",
    "check": "fail",
    "checks": "http://patchwork.dpdk.org/api/patches/62483/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8A26DA04A2;\n\tTue,  5 Nov 2019 16:38:20 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4A3461BFDB;\n\tTue,  5 Nov 2019 16:37:33 +0100 (CET)",
            "from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com\n [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id 8FD291BF6B\n for <dev@dpdk.org>; Tue,  5 Nov 2019 16:37:19 +0100 (CET)",
            "from glumotte.dev.6wind.com. (unknown [10.16.0.195])\n by proxy.6wind.com (Postfix) with ESMTP id 7ADB033B0F3;\n Tue,  5 Nov 2019 16:37:19 +0100 (CET)"
        ],
        "From": "Olivier Matz <olivier.matz@6wind.com>",
        "To": "dev@dpdk.org",
        "Cc": "Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <arybchenko@solarflare.com>,\n Ferruh Yigit <ferruh.yigit@linux.intel.com>,\n \"Giridharan, Ganesan\" <ggiridharan@rbbn.com>,\n Jerin Jacob Kollanukkaran <jerinj@marvell.com>,\n \"Kiran Kumar Kokkilagadda\" <kirankumark@marvell.com>,\n Stephen Hemminger <sthemmin@microsoft.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Vamsi Krishna Attunuru <vattunuru@marvell.com>,\n Hemant Agrawal <hemant.agrawal@nxp.com>, Nipun Gupta <nipun.gupta@nxp.com>,\n David Marchand <david.marchand@redhat.com>",
        "Date": "Tue,  5 Nov 2019 16:37:05 +0100",
        "Message-Id": "<20191105153707.14645-7-olivier.matz@6wind.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20191105153707.14645-1-olivier.matz@6wind.com>",
        "References": "<20190719133845.32432-1-olivier.matz@6wind.com>\n <20191105153707.14645-1-olivier.matz@6wind.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v4 6/7] mempool: prevent objects from being\n\tacross pages",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "When populating a mempool, ensure that objects are not located across\nseveral pages, except if user did not request iova contiguous objects.\n\nSigned-off-by: Vamsi Krishna Attunuru <vattunuru@marvell.com>\nSigned-off-by: Olivier Matz <olivier.matz@6wind.com>\nAcked-by: Nipun Gupta <nipun.gupta@nxp.com>\nAcked-by: Andrew Rybchenko <arybchenko@solarflare.com>\n---\n drivers/mempool/bucket/rte_mempool_bucket.c   | 10 ++-\n drivers/mempool/dpaa/dpaa_mempool.c           |  4 +-\n drivers/mempool/dpaa2/dpaa2_hw_mempool.c      |  4 +-\n .../mempool/octeontx/rte_mempool_octeontx.c   | 21 +++---\n drivers/mempool/octeontx2/otx2_mempool_ops.c  | 21 +++---\n lib/librte_mempool/rte_mempool.c              | 23 ++-----\n lib/librte_mempool/rte_mempool.h              | 27 ++++++--\n lib/librte_mempool/rte_mempool_ops_default.c  | 66 +++++++++++++++----\n 8 files changed, 119 insertions(+), 57 deletions(-)",
    "diff": "diff --git a/drivers/mempool/bucket/rte_mempool_bucket.c b/drivers/mempool/bucket/rte_mempool_bucket.c\nindex dfeaf4e45..5ce1ef16f 100644\n--- a/drivers/mempool/bucket/rte_mempool_bucket.c\n+++ b/drivers/mempool/bucket/rte_mempool_bucket.c\n@@ -401,6 +401,11 @@ bucket_alloc(struct rte_mempool *mp)\n \tstruct bucket_data *bd;\n \tunsigned int i;\n \tunsigned int bucket_header_size;\n+\tsize_t pg_sz;\n+\n+\trc = rte_mempool_get_page_size(mp, &pg_sz);\n+\tif (rc < 0)\n+\t\treturn rc;\n \n \tbd = rte_zmalloc_socket(\"bucket_pool\", sizeof(*bd),\n \t\t\t\tRTE_CACHE_LINE_SIZE, mp->socket_id);\n@@ -416,7 +421,8 @@ bucket_alloc(struct rte_mempool *mp)\n \tRTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE);\n \tbd->header_size = mp->header_size + bucket_header_size;\n \tbd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;\n-\tbd->bucket_mem_size = RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024;\n+\tbd->bucket_mem_size = RTE_MIN(pg_sz,\n+\t\t\t(size_t)(RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024));\n \tbd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /\n \t\tbd->total_elt_size;\n \tbd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);\n@@ -585,7 +591,7 @@ bucket_populate(struct rte_mempool *mp, unsigned int max_objs,\n \n \t\thdr->fill_cnt = 0;\n \t\thdr->lcore_id = LCORE_ID_ANY;\n-\t\trc = rte_mempool_op_populate_helper(mp,\n+\t\trc = rte_mempool_op_populate_helper(mp, 0,\n \t\t\t\t\t\t     RTE_MIN(bd->obj_per_bucket,\n \t\t\t\t\t\t\t     max_objs - n_objs),\n \t\t\t\t\t\t     iter + bucket_header_sz,\ndiff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c\nindex 27736e6c2..3a2528331 100644\n--- a/drivers/mempool/dpaa/dpaa_mempool.c\n+++ b/drivers/mempool/dpaa/dpaa_mempool.c\n@@ -341,8 +341,8 @@ dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,\n \t */\n \tTAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);\n \n-\treturn rte_mempool_op_populate_helper(mp, max_objs, vaddr, paddr, len,\n-\t\t\t\t\t       obj_cb, obj_cb_arg);\n+\treturn rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,\n+\t\t\t\t\t       len, obj_cb, obj_cb_arg);\n }\n \n static const struct rte_mempool_ops dpaa_mpool_ops = {\ndiff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c\nindex 8f8dbeada..36c93decf 100644\n--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c\n+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c\n@@ -421,8 +421,8 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,\n \t/* Insert entry into the PA->VA Table */\n \tdpaax_iova_table_update(paddr, vaddr, len);\n \n-\treturn rte_mempool_op_populate_helper(mp, max_objs, vaddr, paddr, len,\n-\t\t\t\t\t       obj_cb, obj_cb_arg);\n+\treturn rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,\n+\t\t\t\t\t       len, obj_cb, obj_cb_arg);\n }\n \n static const struct rte_mempool_ops dpaa2_mpool_ops = {\ndiff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c\nindex fff33e5c6..bd0070020 100644\n--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c\n+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c\n@@ -132,14 +132,15 @@ octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,\n \t\t\t     size_t *min_chunk_size, size_t *align)\n {\n \tssize_t mem_size;\n+\tsize_t total_elt_sz;\n \n-\t/*\n-\t * Simply need space for one more object to be able to\n-\t * fulfil alignment requirements.\n+\t/* Need space for one more obj on each chunk to fulfill\n+\t * alignment requirements.\n \t */\n-\tmem_size = rte_mempool_op_calc_mem_size_helper(mp, obj_num + 1,\n-\t\t\t\t\t\t\tpg_shift,\n-\t\t\t\t\t\t\tmin_chunk_size, align);\n+\ttotal_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;\n+\tmem_size = rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,\n+\t\t\t\t\t\ttotal_elt_sz, min_chunk_size,\n+\t\t\t\t\t\talign);\n \tif (mem_size >= 0) {\n \t\t/*\n \t\t * Memory area which contains objects must be physically\n@@ -168,7 +169,7 @@ octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,\n \ttotal_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;\n \n \t/* align object start address to a multiple of total_elt_sz */\n-\toff = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);\n+\toff = total_elt_sz - ((((uintptr_t)vaddr - 1) % total_elt_sz) + 1);\n \n \tif (len < off)\n \t\treturn -EINVAL;\n@@ -184,8 +185,10 @@ octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,\n \tif (ret < 0)\n \t\treturn ret;\n \n-\treturn rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova, len,\n-\t\t\t\t\t       obj_cb, obj_cb_arg);\n+\treturn rte_mempool_op_populate_helper(mp,\n+\t\t\t\t\tRTE_MEMPOOL_POPULATE_F_ALIGN_OBJ,\n+\t\t\t\t\tmax_objs, vaddr, iova, len,\n+\t\t\t\t\tobj_cb, obj_cb_arg);\n }\n \n static struct rte_mempool_ops octeontx_fpavf_ops = {\ndiff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c b/drivers/mempool/octeontx2/otx2_mempool_ops.c\nindex 3aea92a01..ea4b1c45d 100644\n--- a/drivers/mempool/octeontx2/otx2_mempool_ops.c\n+++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c\n@@ -713,12 +713,15 @@ static ssize_t\n otx2_npa_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,\n \t\t       uint32_t pg_shift, size_t *min_chunk_size, size_t *align)\n {\n-\t/*\n-\t * Simply need space for one more object to be able to\n-\t * fulfill alignment requirements.\n+\tsize_t total_elt_sz;\n+\n+\t/* Need space for one more obj on each chunk to fulfill\n+\t * alignment requirements.\n \t */\n-\treturn rte_mempool_op_calc_mem_size_helper(mp, obj_num + 1, pg_shift,\n-\t\t\t\t\t\t    min_chunk_size, align);\n+\ttotal_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;\n+\treturn rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,\n+\t\t\t\t\t\ttotal_elt_sz, min_chunk_size,\n+\t\t\t\t\t\talign);\n }\n \n static int\n@@ -735,7 +738,7 @@ otx2_npa_populate(struct rte_mempool *mp, unsigned int max_objs, void *vaddr,\n \ttotal_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;\n \n \t/* Align object start address to a multiple of total_elt_sz */\n-\toff = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);\n+\toff = total_elt_sz - ((((uintptr_t)vaddr - 1) % total_elt_sz) + 1);\n \n \tif (len < off)\n \t\treturn -EINVAL;\n@@ -749,8 +752,10 @@ otx2_npa_populate(struct rte_mempool *mp, unsigned int max_objs, void *vaddr,\n \tif (npa_lf_aura_range_update_check(mp->pool_id) < 0)\n \t\treturn -EBUSY;\n \n-\treturn rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova, len,\n-\t\t\t\t\t       obj_cb, obj_cb_arg);\n+\treturn rte_mempool_op_populate_helper(mp,\n+\t\t\t\t\tRTE_MEMPOOL_POPULATE_F_ALIGN_OBJ,\n+\t\t\t\t\tmax_objs, vaddr, iova, len,\n+\t\t\t\t\tobj_cb, obj_cb_arg);\n }\n \n static struct rte_mempool_ops otx2_npa_ops = {\ndiff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c\nindex 758c5410b..d3db9273d 100644\n--- a/lib/librte_mempool/rte_mempool.c\n+++ b/lib/librte_mempool/rte_mempool.c\n@@ -431,8 +431,6 @@ rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)\n \n \tif (!need_iova_contig_obj)\n \t\t*pg_sz = 0;\n-\telse if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA)\n-\t\t*pg_sz = 0;\n \telse if (rte_eal_has_hugepages() || alloc_in_ext_mem)\n \t\t*pg_sz = get_min_page_size(mp->socket_id);\n \telse\n@@ -481,17 +479,15 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t * then just set page shift and page size to 0, because the user has\n \t * indicated that there's no need to care about anything.\n \t *\n-\t * if we do need contiguous objects, there is also an option to reserve\n-\t * the entire mempool memory as one contiguous block of memory, in\n-\t * which case the page shift and alignment wouldn't matter as well.\n+\t * if we do need contiguous objects (if a mempool driver has its\n+\t * own calc_size() method returning min_chunk_size = mem_size),\n+\t * there is also an option to reserve the entire mempool memory\n+\t * as one contiguous block of memory.\n \t *\n \t * if we require contiguous objects, but not necessarily the entire\n-\t * mempool reserved space to be contiguous, then there are two options.\n-\t *\n-\t * if our IO addresses are virtual, not actual physical (IOVA as VA\n-\t * case), then no page shift needed - our memory allocation will give us\n-\t * contiguous IO memory as far as the hardware is concerned, so\n-\t * act as if we're getting contiguous memory.\n+\t * mempool reserved space to be contiguous, pg_sz will be != 0,\n+\t * and the default ops->populate() will take care of not placing\n+\t * objects across pages.\n \t *\n \t * if our IO addresses are physical, we may get memory from bigger\n \t * pages, or we might get memory from smaller pages, and how much of it\n@@ -504,11 +500,6 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t *\n \t * If we fail to get enough contiguous memory, then we'll go and\n \t * reserve space in smaller chunks.\n-\t *\n-\t * We also have to take into account the fact that memory that we're\n-\t * going to allocate from can belong to an externally allocated memory\n-\t * area, in which case the assumption of IOVA as VA mode being\n-\t * synonymous with IOVA contiguousness will not hold.\n \t */\n \n \tneed_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);\ndiff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h\nindex ad7cc6ad2..225bf9fc9 100644\n--- a/lib/librte_mempool/rte_mempool.h\n+++ b/lib/librte_mempool/rte_mempool.h\n@@ -491,6 +491,9 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,\n  *   Number of objects to be added in mempool.\n  * @param[in] pg_shift\n  *   LOG2 of the physical pages size. If set to 0, ignore page boundaries.\n+ * @param[in] chunk_reserve\n+ *   Amount of memory that must be reserved at the beginning of each page,\n+ *   or at the beginning of the memory area if pg_shift is 0.\n  * @param[out] min_chunk_size\n  *   Location for minimum size of the memory chunk which may be used to\n  *   store memory pool objects.\n@@ -501,7 +504,7 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,\n  */\n __rte_experimental\n ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,\n-\t\tuint32_t obj_num, uint32_t pg_shift,\n+\t\tuint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,\n \t\tsize_t *min_chunk_size, size_t *align);\n \n /**\n@@ -509,7 +512,7 @@ ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,\n  * objects.\n  *\n  * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,\n- * min_chunk_size, align).\n+ * 0, min_chunk_size, align).\n  */\n ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,\n \t\tuint32_t obj_num, uint32_t pg_shift,\n@@ -563,17 +566,31 @@ typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,\n \t\tvoid *vaddr, rte_iova_t iova, size_t len,\n \t\trte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);\n \n+/**\n+ * Align objects on addresses multiple of total_elt_sz.\n+ */\n+#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001\n+\n /**\n  * @warning\n  * @b EXPERIMENTAL: this API may change without prior notice.\n  *\n  * @internal Helper to populate memory pool object using provided memory\n- * chunk: just slice objects one by one.\n+ * chunk: just slice objects one by one, taking care of not\n+ * crossing page boundaries.\n+ *\n+ * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses\n+ * of object headers will be aligned on a multiple of total_elt_sz.\n+ * This feature is used by octeontx hardware.\n  *\n  * This function is internal to mempool library and mempool drivers.\n  *\n  * @param[in] mp\n  *   A pointer to the mempool structure.\n+ * @param[in] flags\n+ *   Logical OR of following flags:\n+ *   - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses\n+ *     multiple of total_elt_sz.\n  * @param[in] max_objs\n  *   Maximum number of objects to be added in mempool.\n  * @param[in] vaddr\n@@ -591,14 +608,14 @@ typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,\n  */\n __rte_experimental\n int rte_mempool_op_populate_helper(struct rte_mempool *mp,\n-\t\tunsigned int max_objs,\n+\t\tunsigned int flags, unsigned int max_objs,\n \t\tvoid *vaddr, rte_iova_t iova, size_t len,\n \t\trte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);\n \n /**\n  * Default way to populate memory pool object using provided memory chunk.\n  *\n- * Equivalent to rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,\n+ * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,\n  * len, obj_cb, obj_cb_arg).\n  */\n int rte_mempool_op_populate_default(struct rte_mempool *mp,\ndiff --git a/lib/librte_mempool/rte_mempool_ops_default.c b/lib/librte_mempool/rte_mempool_ops_default.c\nindex 0bfc63497..e6be7152b 100644\n--- a/lib/librte_mempool/rte_mempool_ops_default.c\n+++ b/lib/librte_mempool/rte_mempool_ops_default.c\n@@ -9,6 +9,7 @@\n ssize_t\n rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,\n \t\t\t\tuint32_t obj_num, uint32_t pg_shift,\n+\t\t\t\tsize_t chunk_reserve,\n \t\t\t\tsize_t *min_chunk_size, size_t *align)\n {\n \tsize_t total_elt_sz;\n@@ -19,10 +20,12 @@ rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,\n \tif (total_elt_sz == 0) {\n \t\tmem_size = 0;\n \t} else if (pg_shift == 0) {\n-\t\tmem_size = total_elt_sz * obj_num;\n+\t\tmem_size = total_elt_sz * obj_num + chunk_reserve;\n \t} else {\n \t\tpg_sz = (size_t)1 << pg_shift;\n-\t\tobj_per_page = pg_sz / total_elt_sz;\n+\t\tif (chunk_reserve >= pg_sz)\n+\t\t\treturn -EINVAL;\n+\t\tobj_per_page = (pg_sz - chunk_reserve) / total_elt_sz;\n \t\tif (obj_per_page == 0) {\n \t\t\t/*\n \t\t\t * Note that if object size is bigger than page size,\n@@ -30,8 +33,8 @@ rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,\n \t\t\t * of physically continuous pages big enough to store\n \t\t\t * at least one object.\n \t\t\t */\n-\t\t\tmem_size =\n-\t\t\t\tRTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;\n+\t\t\tmem_size = RTE_ALIGN_CEIL(total_elt_sz + chunk_reserve,\n+\t\t\t\t\t\tpg_sz) * obj_num;\n \t\t} else {\n \t\t\t/* In the best case, the allocator will return a\n \t\t\t * page-aligned address. For example, with 5 objs,\n@@ -42,7 +45,8 @@ rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,\n \t\t\t */\n \t\t\tobjs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;\n \t\t\t/* room required for the last page */\n-\t\t\tmem_size = objs_in_last_page * total_elt_sz;\n+\t\t\tmem_size = objs_in_last_page * total_elt_sz +\n+\t\t\t\tchunk_reserve;\n \t\t\t/* room required for other pages */\n \t\t\tmem_size += ((obj_num - objs_in_last_page) /\n \t\t\t\tobj_per_page) << pg_shift;\n@@ -67,24 +71,60 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,\n \t\t\t\tsize_t *min_chunk_size, size_t *align)\n {\n \treturn rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,\n-\t\t\t\t\t\tmin_chunk_size, align);\n+\t\t\t\t\t\t0, min_chunk_size, align);\n+}\n+\n+/* Returns -1 if object crosses a page boundary, else returns 0 */\n+static int\n+check_obj_bounds(char *obj, size_t pg_sz, size_t elt_sz)\n+{\n+\tif (pg_sz == 0)\n+\t\treturn 0;\n+\tif (elt_sz > pg_sz)\n+\t\treturn 0;\n+\tif (RTE_PTR_ALIGN(obj, pg_sz) != RTE_PTR_ALIGN(obj + elt_sz - 1, pg_sz))\n+\t\treturn -1;\n+\treturn 0;\n }\n \n int\n-rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int max_objs,\n-\t\t\tvoid *vaddr, rte_iova_t iova, size_t len,\n-\t\t\trte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)\n+rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int flags,\n+\t\t\tunsigned int max_objs, void *vaddr, rte_iova_t iova,\n+\t\t\tsize_t len, rte_mempool_populate_obj_cb_t *obj_cb,\n+\t\t\tvoid *obj_cb_arg)\n {\n-\tsize_t total_elt_sz;\n+\tchar *va = vaddr;\n+\tsize_t total_elt_sz, pg_sz;\n \tsize_t off;\n \tunsigned int i;\n \tvoid *obj;\n+\tint ret;\n+\n+\tret = rte_mempool_get_page_size(mp, &pg_sz);\n+\tif (ret < 0)\n+\t\treturn ret;\n \n \ttotal_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;\n \n-\tfor (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {\n+\tif (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)\n+\t\toff = total_elt_sz - (((uintptr_t)(va - 1) % total_elt_sz) + 1);\n+\telse\n+\t\toff = 0;\n+\tfor (i = 0; i < max_objs; i++) {\n+\t\t/* avoid objects to cross page boundaries */\n+\t\tif (check_obj_bounds(va + off, pg_sz, total_elt_sz) < 0) {\n+\t\t\toff += RTE_PTR_ALIGN_CEIL(va + off, pg_sz) - (va + off);\n+\t\t\tif (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)\n+\t\t\t\toff += total_elt_sz -\n+\t\t\t\t\t(((uintptr_t)(va + off - 1) %\n+\t\t\t\t\t\ttotal_elt_sz) + 1);\n+\t\t}\n+\n+\t\tif (off + total_elt_sz > len)\n+\t\t\tbreak;\n+\n \t\toff += mp->header_size;\n-\t\tobj = (char *)vaddr + off;\n+\t\tobj = va + off;\n \t\tobj_cb(mp, obj_cb_arg, obj,\n \t\t       (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));\n \t\trte_mempool_ops_enqueue_bulk(mp, &obj, 1);\n@@ -100,6 +140,6 @@ rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,\n \t\t\t\trte_mempool_populate_obj_cb_t *obj_cb,\n \t\t\t\tvoid *obj_cb_arg)\n {\n-\treturn rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,\n+\treturn rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,\n \t\t\t\t\tlen, obj_cb, obj_cb_arg);\n }\n",
    "prefixes": [
        "v4",
        "6/7"
    ]
}