@@ -72,8 +72,7 @@ Deprecation Notices
- removal of ``get_capabilities`` mempool ops and related flags.
- substitute ``register_memory_area`` with ``populate`` ops.
- - addition of new ops to customize required memory chunk calculation,
- customize objects population and allocate contiguous
+ - addition of new ops to customize objects population and allocate contiguous
block of objects if underlying driver supports it.
* mbuf: The control mbuf API will be removed in v18.05. The impacted
@@ -80,6 +80,11 @@ ABI Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Changed rte_mempool_ops structure.**
+
+ A new callback ``calc_mem_size`` has been added to ``rte_mempool_ops``
+ to allow to customize required memory size calculation.
+
Removed Items
-------------
@@ -152,7 +157,7 @@ The libraries prepended with a plus sign were incremented in this version.
librte_latencystats.so.1
librte_lpm.so.2
librte_mbuf.so.3
- librte_mempool.so.3
+ + librte_mempool.so.4
+ librte_meter.so.2
librte_metrics.so.1
librte_net.so.1
@@ -11,11 +11,12 @@ LDLIBS += -lrte_eal -lrte_ring
EXPORT_MAP := rte_mempool_version.map
-LIBABIVER := 3
+LIBABIVER := 4
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops_default.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-version = 3
-sources = files('rte_mempool.c', 'rte_mempool_ops.c')
+version = 4
+sources = files('rte_mempool.c', 'rte_mempool_ops.c',
+ 'rte_mempool_ops_default.c')
headers = files('rte_mempool.h')
deps += ['ring']
@@ -561,10 +561,10 @@ rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
- size_t size, total_elt_sz, align, pg_sz, pg_shift;
+ ssize_t mem_size;
+ size_t align, pg_sz, pg_shift;
rte_iova_t iova;
unsigned mz_id, n;
- unsigned int mp_flags;
int ret;
ret = mempool_ops_alloc_once(mp);
@@ -575,29 +575,23 @@ rte_mempool_populate_default(struct rte_mempool *mp)
if (mp->nb_mem_chunks != 0)
return -EEXIST;
- /* Get mempool capabilities */
- mp_flags = 0;
- ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
- if ((ret < 0) && (ret != -ENOTSUP))
- return ret;
-
- /* update mempool capabilities */
- mp->flags |= mp_flags;
-
if (rte_eal_has_hugepages()) {
pg_shift = 0; /* not needed, zone is physically contiguous */
pg_sz = 0;
- align = RTE_CACHE_LINE_SIZE;
} else {
pg_sz = getpagesize();
pg_shift = rte_bsf32(pg_sz);
- align = pg_sz;
}
- total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
- size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift,
- mp->flags);
+ size_t min_chunk_size;
+
+ mem_size = rte_mempool_ops_calc_mem_size(mp, n, pg_shift,
+ &min_chunk_size, &align);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto fail;
+ }
ret = snprintf(mz_name, sizeof(mz_name),
RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
@@ -606,7 +600,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
goto fail;
}
- mz = rte_memzone_reserve_aligned(mz_name, size,
+ mz = rte_memzone_reserve_aligned(mz_name, mem_size,
mp->socket_id, mz_flags, align);
/* not enough memory, retry with the biggest zone we have */
if (mz == NULL)
@@ -617,6 +611,12 @@ rte_mempool_populate_default(struct rte_mempool *mp)
goto fail;
}
+ if (mz->len < min_chunk_size) {
+ rte_memzone_free(mz);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
iova = RTE_BAD_IOVA;
else
@@ -649,13 +649,14 @@ rte_mempool_populate_default(struct rte_mempool *mp)
static size_t
get_anon_size(const struct rte_mempool *mp)
{
- size_t size, total_elt_sz, pg_sz, pg_shift;
+ size_t size, pg_sz, pg_shift;
+ size_t min_chunk_size;
+ size_t align;
pg_sz = getpagesize();
pg_shift = rte_bsf32(pg_sz);
- total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift,
- mp->flags);
+ size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
+ &min_chunk_size, &align);
return size;
}
@@ -400,6 +400,62 @@ typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
typedef int (*rte_mempool_ops_register_memory_area_t)
(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
+/**
+ * Calculate memory size required to store given number of objects.
+ *
+ * If mempool objects are not required to be IOVA-contiguous
+ * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
+ * virtually contiguous chunk size. Otherwise, if mempool objects must
+ * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear),
+ * min_chunk_size defines IOVA-contiguous chunk size.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[in] obj_num
+ * Number of objects.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size aligned at page boundary.
+ */
+typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
+/**
+ * Default way to calculate memory size required to store given number of
+ * objects.
+ *
+ * If page boundaries may be ignored, it is just a product of total
+ * object size including header and trailer and number of objects.
+ * Otherwise, it is a number of pages required to store given number of
+ * objects without crossing page boundary.
+ *
+ * Note that if object size is bigger than page size, then it assumes
+ * that pages are grouped in subsets of physically continuous pages big
+ * enough to store at least one object.
+ *
+ * If mempool driver requires object addresses to be block size aligned
+ * (MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS), space for one extra element is
+ * reserved to be able to meet the requirement.
+ *
+ * Minimum size of memory chunk is either all required space, if
+ * capabilities say that whole memory area must be physically contiguous
+ * (MEMPOOL_F_CAPA_PHYS_CONTIG), or a maximum of the page size and total
+ * element size.
+ *
+ * Required memory chunk alignment is a maximum of page size and cache
+ * line size.
+ */
+ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@@ -416,6 +472,11 @@ struct rte_mempool_ops {
* Notify new memory area to mempool
*/
rte_mempool_ops_register_memory_area_t register_memory_area;
+ /**
+ * Optional callback to calculate memory size required to
+ * store specified number of objects.
+ */
+ rte_mempool_calc_mem_size_t calc_mem_size;
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@@ -565,6 +626,29 @@ rte_mempool_ops_register_memory_area(const struct rte_mempool *mp,
char *vaddr, rte_iova_t iova, size_t len);
/**
+ * @internal wrapper for mempool_ops calc_mem_size callback.
+ * API to calculate size of memory required to store specified number of
+ * object.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[in] obj_num
+ * Number of objects.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size aligned at page boundary.
+ */
+ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
+/**
* @internal wrapper for mempool_ops free callback.
*
* @param mp
@@ -1534,7 +1618,7 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
* of objects. Assume that the memory buffer will be aligned at page
* boundary.
*
- * Note that if object size is bigger then page size, then it assumes
+ * Note that if object size is bigger than page size, then it assumes
* that pages are grouped in subsets of physically continuous pages big
* enough to store at least one object.
*
@@ -59,6 +59,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
ops->get_count = h->get_count;
ops->get_capabilities = h->get_capabilities;
ops->register_memory_area = h->register_memory_area;
+ ops->calc_mem_size = h->calc_mem_size;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
@@ -123,6 +124,23 @@ rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
return ops->register_memory_area(mp, vaddr, iova, len);
}
+/* wrapper to notify new memory area to external mempool */
+ssize_t
+rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ if (ops->calc_mem_size == NULL)
+ return rte_mempool_op_calc_mem_size_default(mp, obj_num,
+ pg_shift, min_chunk_size, align);
+
+ return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
+}
+
/* sets mempool ops previously registered by rte_mempool_register_ops. */
int
rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
new file mode 100644
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ * Copyright(c) 2018 Solarflare Communications Inc.
+ */
+
+#include <rte_mempool.h>
+
+ssize_t
+rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ unsigned int mp_flags;
+ int ret;
+ size_t total_elt_sz;
+ size_t mem_size;
+
+ /* Get mempool capabilities */
+ mp_flags = 0;
+ ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
+ if ((ret < 0) && (ret != -ENOTSUP))
+ return ret;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ mem_size = rte_mempool_xmem_size(obj_num, total_elt_sz, pg_shift,
+ mp->flags | mp_flags);
+
+ if (mp_flags & MEMPOOL_F_CAPA_PHYS_CONTIG)
+ *min_chunk_size = mem_size;
+ else
+ *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
+
+ *align = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, (size_t)1 << pg_shift);
+
+ return mem_size;
+}
@@ -51,3 +51,10 @@ DPDK_17.11 {
rte_mempool_populate_iova_tab;
} DPDK_16.07;
+
+DPDK_18.05 {
+ global:
+
+ rte_mempool_op_calc_mem_size_default;
+
+} DPDK_17.11;