From patchwork Tue Jan 23 13:15:57 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 34325 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D36001B16F; Tue, 23 Jan 2018 14:16:56 +0100 (CET) Received: from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com [148.163.129.52]) by dpdk.org (Postfix) with ESMTP id 0EB137CEF for ; Tue, 23 Jan 2018 14:16:53 +0100 (CET) X-Virus-Scanned: Proofpoint Essentials engine Received: from webmail.solarflare.com (webmail.solarflare.com [12.187.104.26]) (using TLSv1 with cipher ECDHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1-us4.ppe-hosted.com (Proofpoint Essentials ESMTP Server) with ESMTPS id 12659BC005E; Tue, 23 Jan 2018 13:16:52 +0000 (UTC) Received: from ocex03.SolarFlarecom.com (10.20.40.36) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25; Tue, 23 Jan 2018 05:16:49 -0800 Received: from opal.uk.solarflarecom.com (10.17.10.1) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25 via Frontend Transport; Tue, 23 Jan 2018 05:16:48 -0800 Received: from uklogin.uk.solarflarecom.com (uklogin.uk.solarflarecom.com [10.17.10.10]) by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id w0NDGl97006395; Tue, 23 Jan 2018 13:16:47 GMT Received: from uklogin.uk.solarflarecom.com (localhost.localdomain [127.0.0.1]) by uklogin.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id w0NDGleb010689; Tue, 23 Jan 2018 13:16:47 GMT From: Andrew Rybchenko To: CC: Olivier Matz Date: Tue, 23 Jan 2018 13:15:57 +0000 Message-ID: <1516713372-10572-3-git-send-email-arybchenko@solarflare.com> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1516713372-10572-1-git-send-email-arybchenko@solarflare.com> References: <1511539591-20966-1-git-send-email-arybchenko@solarflare.com> <1516713372-10572-1-git-send-email-arybchenko@solarflare.com> MIME-Version: 1.0 X-MDID: 1516713412-A9rI+8UP0xmF Subject: [dpdk-dev] [RFC v2 02/17] mempool: add op to calculate memory size to be allocated X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Size of memory chunk required to populate mempool objects depends on how objects are stored in the memory. Different mempool drivers may have different requirements and a new operation allows to calculate memory size in accordance with driver requirements and advertise requirements on minimum memory chunk size and alignment in a generic way. Suggested-by: Olivier Matz Signed-off-by: Andrew Rybchenko --- lib/librte_mempool/rte_mempool.c | 95 ++++++++++++++++++++++-------- lib/librte_mempool/rte_mempool.h | 63 +++++++++++++++++++- lib/librte_mempool/rte_mempool_ops.c | 18 ++++++ lib/librte_mempool/rte_mempool_version.map | 8 +++ 4 files changed, 159 insertions(+), 25 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index e783b9a..1f54f95 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -233,13 +233,14 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, return sz->total_size; } - /* - * Calculate maximum amount of memory required to store given number of objects. + * Internal function to calculate required memory chunk size shared + * by default implementation of the corresponding callback and + * deprecated external function. */ -size_t -rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, - unsigned int flags) +static size_t +rte_mempool_xmem_size_int(uint32_t elt_num, size_t total_elt_sz, + uint32_t pg_shift, unsigned int flags) { size_t obj_per_page, pg_num, pg_sz; unsigned int mask; @@ -264,6 +265,49 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, return pg_num << pg_shift; } +ssize_t +rte_mempool_calc_mem_size_def(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, + __rte_unused size_t *align) +{ + unsigned int mp_flags; + int ret; + size_t total_elt_sz; + size_t mem_size; + + /* Get mempool capabilities */ + mp_flags = 0; + ret = rte_mempool_ops_get_capabilities(mp, &mp_flags); + if ((ret < 0) && (ret != -ENOTSUP)) + return ret; + + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; + + mem_size = rte_mempool_xmem_size_int(obj_num, total_elt_sz, pg_shift, + mp->flags | mp_flags); + + if (mp_flags & MEMPOOL_F_CAPA_PHYS_CONTIG) + *min_chunk_size = mem_size; + else + *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz); + + /* No extra align requirements by default */ + + return mem_size; +} + +/* + * Calculate maximum amount of memory required to store given number of objects. + */ +size_t +rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift, + unsigned int flags) +{ + return rte_mempool_xmem_size_int(elt_num, total_elt_sz, pg_shift, + flags); +} + /* * Calculate how much memory would be actually required with the * given memory footprint to store required number of elements. @@ -570,25 +614,16 @@ rte_mempool_populate_default(struct rte_mempool *mp) unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; - size_t size, total_elt_sz, align, pg_sz, pg_shift; + ssize_t mem_size; + size_t align, pg_sz, pg_shift; rte_iova_t iova; unsigned mz_id, n; - unsigned int mp_flags; int ret; /* mempool must not be populated */ if (mp->nb_mem_chunks != 0) return -EEXIST; - /* Get mempool capabilities */ - mp_flags = 0; - ret = rte_mempool_ops_get_capabilities(mp, &mp_flags); - if ((ret < 0) && (ret != -ENOTSUP)) - return ret; - - /* update mempool capabilities */ - mp->flags |= mp_flags; - if (rte_eal_has_hugepages()) { pg_shift = 0; /* not needed, zone is physically contiguous */ pg_sz = 0; @@ -599,10 +634,15 @@ rte_mempool_populate_default(struct rte_mempool *mp) align = pg_sz; } - total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { - size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift, - mp->flags); + size_t min_chunk_size; + + mem_size = rte_mempool_ops_calc_mem_size(mp, n, pg_shift, + &min_chunk_size, &align); + if (mem_size < 0) { + ret = mem_size; + goto fail; + } ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); @@ -611,7 +651,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } - mz = rte_memzone_reserve_aligned(mz_name, size, + mz = rte_memzone_reserve_aligned(mz_name, mem_size, mp->socket_id, mz_flags, align); /* not enough memory, retry with the biggest zone we have */ if (mz == NULL) @@ -622,6 +662,12 @@ rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } + if (mz->len < min_chunk_size) { + rte_memzone_free(mz); + ret = -ENOMEM; + goto fail; + } + if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) iova = RTE_BAD_IOVA; else @@ -654,13 +700,14 @@ rte_mempool_populate_default(struct rte_mempool *mp) static size_t get_anon_size(const struct rte_mempool *mp) { - size_t size, total_elt_sz, pg_sz, pg_shift; + size_t size, pg_sz, pg_shift; + size_t min_chunk_size; + size_t align; pg_sz = getpagesize(); pg_shift = rte_bsf32(pg_sz); - total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; - size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift, - mp->flags); + size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift, + &min_chunk_size, &align); return size; } diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index e21026a..be8a371 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -428,6 +428,39 @@ typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp, typedef int (*rte_mempool_ops_register_memory_area_t) (const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len); +/** + * Calculate memory size required to store specified number of objects. + * + * Note that if object size is bigger then page size, then it assumes + * that pages are grouped in subsets of physically continuous pages big + * enough to store at least one object. + * + * @param mp + * Pointer to the memory pool. + * @param obj_num + * Number of objects. + * @param pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param align + * Location with required memory chunk alignment. + * @return + * Required memory size aligned at page boundary. + */ +typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** + * Default way to calculate memory size required to store specified + * number of objects. + */ +ssize_t rte_mempool_calc_mem_size_def(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + /** Structure defining mempool operations structure */ struct rte_mempool_ops { char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */ @@ -444,6 +477,11 @@ struct rte_mempool_ops { * Notify new memory area to mempool */ rte_mempool_ops_register_memory_area_t register_memory_area; + /** + * Optional callback to calculate memory size required to + * store specified number of objects. + */ + rte_mempool_calc_mem_size_t calc_mem_size; } __rte_cache_aligned; #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */ @@ -593,6 +631,29 @@ rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len); /** + * @internal wrapper for mempool_ops calc_mem_size callback. + * API to calculate size of memory required to store specified number of + * object. + * + * @param mp + * Pointer to the memory pool. + * @param obj_num + * Number of objects. + * @param pg_shift + * LOG2 of the physical pages size. If set to 0, ignore page boundaries. + * @param min_chunk_size + * Location for minimum size of the memory chunk which may be used to + * store memory pool objects. + * @param align + * Location with required memory chunk alignment. + * @return + * Required memory size aligned at page boundary. + */ +ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align); + +/** * @internal wrapper for mempool_ops free callback. * * @param mp @@ -1562,7 +1623,7 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, * of objects. Assume that the memory buffer will be aligned at page * boundary. * - * Note that if object size is bigger then page size, then it assumes + * Note that if object size is bigger than page size, then it assumes * that pages are grouped in subsets of physically continuous pages big * enough to store at least one object. * diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c index 92b9f90..d048b37 100644 --- a/lib/librte_mempool/rte_mempool_ops.c +++ b/lib/librte_mempool/rte_mempool_ops.c @@ -88,6 +88,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h) ops->get_count = h->get_count; ops->get_capabilities = h->get_capabilities; ops->register_memory_area = h->register_memory_area; + ops->calc_mem_size = h->calc_mem_size; rte_spinlock_unlock(&rte_mempool_ops_table.sl); @@ -152,6 +153,23 @@ rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr, return ops->register_memory_area(mp, vaddr, iova, len); } +/* wrapper to notify new memory area to external mempool */ +ssize_t +rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, + uint32_t obj_num, uint32_t pg_shift, + size_t *min_chunk_size, size_t *align) +{ + struct rte_mempool_ops *ops; + + ops = rte_mempool_get_ops(mp->ops_index); + + if (ops->calc_mem_size == NULL) + return rte_mempool_calc_mem_size_def(mp, obj_num, pg_shift, + min_chunk_size, align); + + return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align); +} + /* sets mempool ops previously registered by rte_mempool_register_ops. */ int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map index 62b76f9..9fa7270 100644 --- a/lib/librte_mempool/rte_mempool_version.map +++ b/lib/librte_mempool/rte_mempool_version.map @@ -51,3 +51,11 @@ DPDK_17.11 { rte_mempool_populate_iova_tab; } DPDK_16.07; + +DPDK_18.05 { + global: + + rte_mempool_calc_mem_size_def; + +} DPDK_17.11; +