@@ -1024,21 +1024,13 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq)
rte_lcore_id());
void **cache_objs;
- if (cache == NULL || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -1056,16 +1048,13 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
@@ -1335,21 +1324,13 @@ idpf_tx_splitq_free_bufs_avx512(struct idpf_tx_queue *txq)
rte_lcore_id());
void **cache_objs;
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -1367,16 +1348,13 @@ idpf_tx_splitq_free_bufs_avx512(struct idpf_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m)) {
free[0] = m;
@@ -51,8 +51,6 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
struct bman_pool_params params = {
.flags = BMAN_POOL_FLAG_DYNAMIC_BPID
};
- unsigned int lcore_id;
- struct rte_mempool_cache *cache;
MEMPOOL_INIT_FUNC_TRACE();
@@ -120,18 +118,6 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
sizeof(struct dpaa_bp_info));
mp->pool_data = (void *)bp_info;
- /* Update per core mempool cache threshold to optimal value which is
- * number of buffers that can be released to HW buffer pool in
- * a single API call.
- */
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- cache = &mp->local_cache[lcore_id];
- DPAA_MEMPOOL_DEBUG("lCore %d: cache->flushthresh %d -> %d",
- lcore_id, cache->flushthresh,
- (uint32_t)(cache->size + DPAA_MBUF_MAX_ACQ_REL));
- if (cache->flushthresh)
- cache->flushthresh = cache->size + DPAA_MBUF_MAX_ACQ_REL;
- }
DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
return 0;
@@ -234,7 +220,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
count, bp_info->bpid);
- if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
+ if (unlikely(count >= RTE_MEMPOOL_CACHE_MAX_SIZE)) {
DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
count);
return -1;
@@ -44,8 +44,6 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
struct dpaa2_bp_info *bp_info;
struct dpbp_attr dpbp_attr;
uint32_t bpid;
- unsigned int lcore_id;
- struct rte_mempool_cache *cache;
int ret;
avail_dpbp = dpaa2_alloc_dpbp_dev();
@@ -134,18 +132,6 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
h_bp_list = bp_list;
- /* Update per core mempool cache threshold to optimal value which is
- * number of buffers that can be released to HW buffer pool in
- * a single API call.
- */
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- cache = &mp->local_cache[lcore_id];
- DPAA2_MEMPOOL_DEBUG("lCore %d: cache->flushthresh %d -> %d",
- lcore_id, cache->flushthresh,
- (uint32_t)(cache->size + DPAA2_MBUF_MAX_ACQ_REL));
- if (cache->flushthresh)
- cache->flushthresh = cache->size + DPAA2_MBUF_MAX_ACQ_REL;
- }
return 0;
err3:
@@ -783,18 +783,13 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
- if (!cache || n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ if (!cache || unlikely(n + cache->len > cache->size)) {
rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
cache_objs = &cache->objs[cache->len];
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it
- * crosses the cache flush threshold) is flushed to the ring.
- */
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -812,12 +807,10 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk
- (mp, &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
@@ -1873,21 +1873,13 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
rte_lcore_id());
void **cache_objs;
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -1905,16 +1897,13 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m)) {
free[0] = m;
@@ -888,21 +888,13 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it
- * crosses the cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -920,16 +912,13 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk
- (mp, &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m)) {
free[0] = m;
@@ -112,7 +112,6 @@ RTE_TRACE_POINT(
rte_trace_point_emit_i32(socket_id);
rte_trace_point_emit_ptr(cache);
rte_trace_point_emit_u32(cache->len);
- rte_trace_point_emit_u32(cache->flushthresh);
)
RTE_TRACE_POINT(
@@ -50,11 +50,6 @@ static void
mempool_event_callback_invoke(enum rte_mempool_event event,
struct rte_mempool *mp);
-/* Note: avoid using floating point since that compiler
- * may not think that is constant.
- */
-#define CALC_CACHE_FLUSHTHRESH(c) (((c) * 3) / 2)
-
#if defined(RTE_ARCH_X86)
/*
* return the greatest common divisor between a and b (fast algorithm)
@@ -746,13 +741,12 @@ rte_mempool_free(struct rte_mempool *mp)
static void
mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
{
- /* Check that cache have enough space for flush threshold */
- RTE_BUILD_BUG_ON(CALC_CACHE_FLUSHTHRESH(RTE_MEMPOOL_CACHE_MAX_SIZE) >
+ /* Check that cache have enough space for size */
+ RTE_BUILD_BUG_ON(RTE_MEMPOOL_CACHE_MAX_SIZE >
RTE_SIZEOF_FIELD(struct rte_mempool_cache, objs) /
RTE_SIZEOF_FIELD(struct rte_mempool_cache, objs[0]));
cache->size = size;
- cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
cache->len = 0;
}
@@ -836,7 +830,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
/* asked cache too big */
if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
- CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
+ cache_size > n) {
rte_errno = EINVAL;
return NULL;
}
@@ -89,10 +89,8 @@ struct __rte_cache_aligned rte_mempool_debug_stats {
*/
struct __rte_cache_aligned rte_mempool_cache {
uint32_t size; /**< Size of the cache */
- uint32_t flushthresh; /**< Threshold before we flush excess elements */
uint32_t len; /**< Current cache count */
#ifdef RTE_LIBRTE_MEMPOOL_STATS
- uint32_t unused;
/*
* Alternative location for the most frequently updated mempool statistics (per-lcore),
* providing faster update access when using a mempool cache.
@@ -1030,7 +1028,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
* If cache_size is non-zero, the rte_mempool library will try to
* limit the accesses to the common lockless pool, by maintaining a
* per-lcore object cache. This argument must be lower or equal to
- * RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
+ * RTE_MEMPOOL_CACHE_MAX_SIZE and n. It is advised to choose
* cache_size to have "n modulo cache_size == 0": if this is
* not the case, some elements will always stay in the pool and will
* never be used. The access to the per-lcore table is of course
@@ -1376,52 +1374,51 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache,
*/
static __rte_always_inline void
rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned int n, struct rte_mempool_cache *cache)
+ unsigned int n, struct rte_mempool_cache * const cache)
{
void **cache_objs;
- /* No cache provided */
- if (unlikely(cache == NULL))
+ /* No cache provided? */
+ if (unlikely(cache == NULL)) {
+ /* Increment stats now, adding in mempool always succeeds. */
+ RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
+
goto driver_enqueue;
+ }
- /* increment stat now, adding in mempool always success */
+ /* Increment stats now, adding in mempool always succeeds. */
RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
- /* The request itself is too big for the cache */
- if (unlikely(n > cache->flushthresh))
- goto driver_enqueue_stats_incremented;
-
- /*
- * The cache follows the following algorithm:
- * 1. If the objects cannot be added to the cache without crossing
- * the flush threshold, flush the cache to the backend.
- * 2. Add the objects to the cache.
- */
-
- if (cache->len + n <= cache->flushthresh) {
- cache_objs = &cache->objs[cache->len];
- cache->len += n;
- } else {
- cache_objs = &cache->objs[0];
- rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
- cache->len = n;
- }
+ /* The request itself is too big for cache storage? */
+ if (unlikely(n >= RTE_MEMPOOL_CACHE_MAX_SIZE))
+ goto driver_enqueue;
/* Add the objects to the cache. */
+ cache_objs = &cache->objs[cache->len];
+ cache->len += n;
rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
- return;
-
-driver_enqueue:
+ /* Cache size not exceeded? */
+ if (likely(cache->len <= cache->size))
+ return;
- /* increment stat now, adding in mempool always success */
- RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
- RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
+ /*
+ * Cache size exceeded.
+ * Flush a CPU cache line (or mempool cache) size aligned
+ * bulk of objects to the backend, as much as we can.
+ */
+ if (likely(RTE_CACHE_LINE_SIZE / sizeof(void *) <= cache->size))
+ n = cache->len & ~(RTE_CACHE_LINE_SIZE / sizeof(void *) - 1);
+ else
+ n = cache->len - cache->len % cache->size;
+ cache->len -= n;
+ obj_table = &cache->objs[cache->len];
-driver_enqueue_stats_incremented:
+driver_enqueue:
- /* push objects to the backend */
+ /* Push the objects to the backend. */
rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
}
@@ -1490,135 +1487,185 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
}
/**
- * @internal Get several objects from the mempool; used internally.
+ * @internal Get several objects from the mempool; used internally when
+ * the number of objects exceeds what is available in the mempool cache.
* @param mp
* A pointer to the mempool structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to get, must be strictly positive.
+ * Must be more than available in the mempool cache, i.e.:
+ * n > cache->len
* @param cache
- * A pointer to a mempool cache structure. May be NULL if not needed.
+ * A pointer to a mempool cache structure. Not NULL.
* @return
* - 0: Success.
* - <0: Error; code of driver dequeue function.
*/
-static __rte_always_inline int
-rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
- unsigned int n, struct rte_mempool_cache *cache)
+static int
+rte_mempool_do_generic_get_split(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache * const cache)
{
int ret;
unsigned int remaining;
uint32_t index, len;
void **cache_objs;
+ const uint32_t cache_size = cache->size;
- /* No cache provided */
- if (unlikely(cache == NULL)) {
- remaining = n;
- goto driver_dequeue;
- }
-
- /* The cache is a stack, so copy will be in reverse order. */
+ /* Serve the first part of the request from the cache to return hot objects first. */
cache_objs = &cache->objs[cache->len];
+ len = cache->len;
+ remaining = n - len;
+ for (index = 0; index < len; index++)
+ *obj_table++ = *--cache_objs;
+
+ /* At this point, the cache is empty. */
- if (__rte_constant(n) && n <= cache->len) {
+ /* More than can be served from a full cache? */
+ if (unlikely(remaining >= cache_size)) {
/*
- * The request size is known at build time, and
- * the entire request can be satisfied from the cache,
- * so let the compiler unroll the fixed length copy loop.
+ * Serve the following part of the request directly from the backend
+ * in multipla of CPU cache line (or mempool cache) size, as much as we can.
*/
- cache->len -= n;
- for (index = 0; index < n; index++)
- *obj_table++ = *--cache_objs;
-
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
-
- return 0;
- }
+ if (likely(RTE_CACHE_LINE_SIZE / sizeof(void *) <= cache_size))
+ len = remaining & ~(RTE_CACHE_LINE_SIZE / sizeof(void *) - 1);
+ else
+ len = remaining - remaining % cache_size;
+ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, len);
+ if (unlikely(ret < 0)) {
+ /*
+ * No further action is required to roll back the request,
+ * as objects in the cache are intact, and no objects have
+ * been dequeued from the backend.
+ */
- /*
- * Use the cache as much as we have to return hot objects first.
- * If the request size 'n' is known at build time, the above comparison
- * ensures that n > cache->len here, so omit RTE_MIN().
- */
- len = __rte_constant(n) ? cache->len : RTE_MIN(n, cache->len);
- cache->len -= len;
- remaining = n - len;
- for (index = 0; index < len; index++)
- *obj_table++ = *--cache_objs;
+ goto fail;
+ }
- /*
- * If the request size 'n' is known at build time, the case
- * where the entire request can be satisfied from the cache
- * has already been handled above, so omit handling it here.
- */
- if (!__rte_constant(n) && remaining == 0) {
- /* The entire request is satisfied from the cache. */
+ remaining -= len;
+ obj_table += len;
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
+ if (remaining == 0) {
+ /* Update the state of the cache before returning. */
+ cache->len = 0;
- return 0;
+ goto success;
+ }
}
- /* if dequeue below would overflow mem allocated for cache */
- if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
- goto driver_dequeue;
-
- /* Fill the cache from the backend; fetch size + remaining objects. */
- ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
- cache->size + remaining);
+ /* Fill the entire cache from the backend. */
+ ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs, cache_size);
if (unlikely(ret < 0)) {
/*
- * We are buffer constrained, and not able to allocate
- * cache + remaining.
- * Do not fill the cache, just satisfy the remaining part of
- * the request directly from the backend.
+ * Last resort: Try only the remaining part of the request,
+ * served directly from the backend.
*/
- goto driver_dequeue;
+ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
+ if (unlikely(ret == 0)) {
+ /* Update the state of the cache before returning. */
+ cache->len = 0;
+
+ goto success;
+ }
+
+ /* Roll back. */
+ if (cache->len + remaining == n) {
+ /*
+ * No further action is required to roll back the request,
+ * as objects in the cache are intact, and no objects have
+ * been dequeued from the backend.
+ */
+ } else {
+ /* Update the state of the cache before putting back the objects. */
+ cache->len = 0;
+
+ len = n - remaining;
+ obj_table -= len;
+ rte_mempool_do_generic_put(mp, obj_table, len, cache);
+ }
+
+ goto fail;
}
- /* Satisfy the remaining part of the request from the filled cache. */
- cache_objs = &cache->objs[cache->size + remaining];
+ /* Serve the remaining part of the request from the filled cache. */
+ cache_objs = &cache->objs[cache_size];
for (index = 0; index < remaining; index++)
*obj_table++ = *--cache_objs;
- cache->len = cache->size;
+ cache->len = cache_size - remaining;
+
+success:
RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
return 0;
-driver_dequeue:
+fail:
- /* Get remaining objects directly from the backend. */
- ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
- if (ret < 0) {
- if (likely(cache != NULL)) {
- cache->len = n - remaining;
- /*
- * No further action is required to roll the first part
- * of the request back into the cache, as objects in
- * the cache are intact.
- */
- }
+ return ret;
+}
+
+/**
+ * @internal Get several objects from the mempool; used internally.
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to get, must be strictly positive.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success.
+ * - <0: Error; code of driver dequeue function.
+ */
+static __rte_always_inline int
+rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache * const cache)
+{
+ /* Cache provided? */
+ if (likely(cache != NULL)) {
+ /* The request can be served entirely from the cache? */
+ if (likely(n <= cache->len)) {
+ unsigned int index;
+ void **cache_objs;
- RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
- RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
- } else {
- if (likely(cache != NULL)) {
RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
+
+ /*
+ * The cache is a stack, so copy will be in reverse order.
+ * If the request size is known at build time,
+ * the compiler will unroll the fixed length copy loop.
+ */
+ cache_objs = &cache->objs[cache->len];
+ cache->len -= n;
+ for (index = 0; index < n; index++)
+ *obj_table++ = *--cache_objs;
+
+ return 0;
+ } else
+ return rte_mempool_do_generic_get_split(mp, obj_table, n, cache);
+ } else {
+ int ret;
+
+ /* Get the objects directly from the backend. */
+ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
+ if (unlikely(ret < 0)) {
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
} else {
RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
}
- }
- return ret;
+ return ret;
+ }
}
/**