mempool: clarify default populate function
diff mbox series

Message ID 20191008093405.23533-1-olivier.matz@6wind.com
State Accepted, archived
Delegated to: David Marchand
Headers show
Series
  • mempool: clarify default populate function
Related show

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-compilation success Compile Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-mellanox-Performance success Performance Testing PASS

Commit Message

Olivier Matz Oct. 8, 2019, 9:34 a.m. UTC
No functional change. Clarify the populate function to make
the next commit easier to understand.

Rename the variables:
- to avoid negation in the name
- to have more understandable names

Remove useless variable (no_pageshift is equivalent to pg_sz == 0).

Remove duplicate affectation of "external" variable.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
---

This patch comes from this series:
http://patchwork.dpdk.org/project/dpdk/list/?series=5624


 lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++---------------
 1 file changed, 26 insertions(+), 24 deletions(-)

Comments

David Marchand Oct. 17, 2019, 5 a.m. UTC | #1
On Tue, Oct 8, 2019 at 11:35 AM Olivier Matz <olivier.matz@6wind.com> wrote:
>
> No functional change. Clarify the populate function to make
> the next commit easier to understand.

After discussion offlist, changed this:
    Clarify the populate function to make future changes easier to
    understand.

>
> Rename the variables:
> - to avoid negation in the name
> - to have more understandable names
>
> Remove useless variable (no_pageshift is equivalent to pg_sz == 0).
>
> Remove duplicate affectation of "external" variable.
>
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>

Applied, thanks.



--
David Marchand

Patch
diff mbox series

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 7260ce0be..0f29e8712 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -429,24 +429,18 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 	rte_iova_t iova;
 	unsigned mz_id, n;
 	int ret;
-	bool no_contig, try_contig, no_pageshift, external;
+	bool need_iova_contig_obj;
+	bool try_iova_contig_mempool;
+	bool alloc_in_ext_mem;
 
 	ret = mempool_ops_alloc_once(mp);
 	if (ret != 0)
 		return ret;
 
-	/* check if we can retrieve a valid socket ID */
-	ret = rte_malloc_heap_socket_is_external(mp->socket_id);
-	if (ret < 0)
-		return -EINVAL;
-	external = ret;
-
 	/* mempool must not be populated */
 	if (mp->nb_mem_chunks != 0)
 		return -EEXIST;
 
-	no_contig = mp->flags & MEMPOOL_F_NO_IOVA_CONTIG;
-
 	/*
 	 * the following section calculates page shift and page size values.
 	 *
@@ -496,16 +490,23 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 	 * to go for contiguous memory even if we're in no-huge mode, because
 	 * external memory may in fact be IOVA-contiguous.
 	 */
-	external = rte_malloc_heap_socket_is_external(mp->socket_id) == 1;
-	no_pageshift = no_contig ||
-			(!external && rte_eal_iova_mode() == RTE_IOVA_VA);
-	try_contig = !no_contig && !no_pageshift &&
-			(rte_eal_has_hugepages() || external);
 
-	if (no_pageshift) {
+	/* check if we can retrieve a valid socket ID */
+	ret = rte_malloc_heap_socket_is_external(mp->socket_id);
+	if (ret < 0)
+		return -EINVAL;
+	alloc_in_ext_mem = (ret == 1);
+	need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
+	try_iova_contig_mempool = false;
+
+	if (!need_iova_contig_obj) {
+		pg_sz = 0;
+		pg_shift = 0;
+	} else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) {
 		pg_sz = 0;
 		pg_shift = 0;
-	} else if (try_contig) {
+	} else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {
+		try_iova_contig_mempool = true;
 		pg_sz = get_min_page_size(mp->socket_id);
 		pg_shift = rte_bsf32(pg_sz);
 	} else {
@@ -517,7 +518,7 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 		size_t min_chunk_size;
 		unsigned int flags;
 
-		if (try_contig || no_pageshift)
+		if (try_iova_contig_mempool || pg_sz == 0)
 			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
 					0, &min_chunk_size, &align);
 		else
@@ -541,7 +542,7 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 		/* if we're trying to reserve contiguous memory, add appropriate
 		 * memzone flag.
 		 */
-		if (try_contig)
+		if (try_iova_contig_mempool)
 			flags |= RTE_MEMZONE_IOVA_CONTIG;
 
 		mz = rte_memzone_reserve_aligned(mz_name, mem_size,
@@ -551,8 +552,9 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 		 * minimum required contiguous chunk fits minimum page, adjust
 		 * memzone size to the page size, and try again.
 		 */
-		if (mz == NULL && try_contig && min_chunk_size <= pg_sz) {
-			try_contig = false;
+		if (mz == NULL && try_iova_contig_mempool &&
+				min_chunk_size <= pg_sz) {
+			try_iova_contig_mempool = false;
 			flags &= ~RTE_MEMZONE_IOVA_CONTIG;
 
 			mem_size = rte_mempool_ops_calc_mem_size(mp, n,
@@ -587,12 +589,12 @@  rte_mempool_populate_default(struct rte_mempool *mp)
 			goto fail;
 		}
 
-		if (no_contig)
-			iova = RTE_BAD_IOVA;
-		else
+		if (need_iova_contig_obj)
 			iova = mz->iova;
+		else
+			iova = RTE_BAD_IOVA;
 
-		if (no_pageshift || try_contig)
+		if (try_iova_contig_mempool || pg_sz == 0)
 			ret = rte_mempool_populate_iova(mp, mz->addr,
 				iova, mz->len,
 				rte_mempool_memchunk_mz_free,