@@ -1024,21 +1024,13 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq)
rte_lcore_id());
void **cache_objs;
- if (cache == NULL || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -1056,16 +1048,13 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
@@ -1335,21 +1324,13 @@ idpf_tx_splitq_free_bufs_avx512(struct idpf_tx_queue *txq)
rte_lcore_id());
void **cache_objs;
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -1367,16 +1348,13 @@ idpf_tx_splitq_free_bufs_avx512(struct idpf_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m)) {
free[0] = m;
@@ -51,8 +51,6 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
struct bman_pool_params params = {
.flags = BMAN_POOL_FLAG_DYNAMIC_BPID
};
- unsigned int lcore_id;
- struct rte_mempool_cache *cache;
MEMPOOL_INIT_FUNC_TRACE();
@@ -120,18 +118,6 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
sizeof(struct dpaa_bp_info));
mp->pool_data = (void *)bp_info;
- /* Update per core mempool cache threshold to optimal value which is
- * number of buffers that can be released to HW buffer pool in
- * a single API call.
- */
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- cache = &mp->local_cache[lcore_id];
- DPAA_MEMPOOL_DEBUG("lCore %d: cache->flushthresh %d -> %d",
- lcore_id, cache->flushthresh,
- (uint32_t)(cache->size + DPAA_MBUF_MAX_ACQ_REL));
- if (cache->flushthresh)
- cache->flushthresh = cache->size + DPAA_MBUF_MAX_ACQ_REL;
- }
DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
return 0;
@@ -234,7 +220,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
count, bp_info->bpid);
- if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
+ if (unlikely(count >= RTE_MEMPOOL_CACHE_MAX_SIZE)) {
DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
count);
return -1;
@@ -44,8 +44,6 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
struct dpaa2_bp_info *bp_info;
struct dpbp_attr dpbp_attr;
uint32_t bpid;
- unsigned int lcore_id;
- struct rte_mempool_cache *cache;
int ret;
avail_dpbp = dpaa2_alloc_dpbp_dev();
@@ -134,18 +132,6 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
h_bp_list = bp_list;
- /* Update per core mempool cache threshold to optimal value which is
- * number of buffers that can be released to HW buffer pool in
- * a single API call.
- */
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- cache = &mp->local_cache[lcore_id];
- DPAA2_MEMPOOL_DEBUG("lCore %d: cache->flushthresh %d -> %d",
- lcore_id, cache->flushthresh,
- (uint32_t)(cache->size + DPAA2_MBUF_MAX_ACQ_REL));
- if (cache->flushthresh)
- cache->flushthresh = cache->size + DPAA2_MBUF_MAX_ACQ_REL;
- }
return 0;
err3:
@@ -783,18 +783,13 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
- if (!cache || n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ if (!cache || unlikely(n + cache->len > cache->size)) {
rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
cache_objs = &cache->objs[cache->len];
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it
- * crosses the cache flush threshold) is flushed to the ring.
- */
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -812,12 +807,10 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk
- (mp, &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
@@ -1873,21 +1873,13 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
rte_lcore_id());
void **cache_objs;
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -1905,16 +1897,13 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m)) {
free[0] = m;
@@ -888,21 +888,13 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+ if (!cache || unlikely(n + cache->len > cache->size)) {
+ rte_mempool_generic_put(mp, (void *)txep, n, cache);
goto done;
}
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it
- * crosses the cache flush threshold) is flushed to the ring.
- */
+ cache_objs = &cache->objs[cache->len];
+
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
@@ -920,16 +912,13 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
}
cache->len += n;
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk
- (mp, &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
+ /* Increment stat. */
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
+ RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+
goto done;
}
-normal:
m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m)) {
free[0] = m;
@@ -112,7 +112,6 @@ RTE_TRACE_POINT(
rte_trace_point_emit_i32(socket_id);
rte_trace_point_emit_ptr(cache);
rte_trace_point_emit_u32(cache->len);
- rte_trace_point_emit_u32(cache->flushthresh);
)
RTE_TRACE_POINT(
@@ -50,11 +50,6 @@ static void
mempool_event_callback_invoke(enum rte_mempool_event event,
struct rte_mempool *mp);
-/* Note: avoid using floating point since that compiler
- * may not think that is constant.
- */
-#define CALC_CACHE_FLUSHTHRESH(c) (((c) * 3) / 2)
-
#if defined(RTE_ARCH_X86)
/*
* return the greatest common divisor between a and b (fast algorithm)
@@ -746,13 +741,12 @@ rte_mempool_free(struct rte_mempool *mp)
static void
mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
{
- /* Check that cache have enough space for flush threshold */
- RTE_BUILD_BUG_ON(CALC_CACHE_FLUSHTHRESH(RTE_MEMPOOL_CACHE_MAX_SIZE) >
+ /* Check that cache have enough space for size */
+ RTE_BUILD_BUG_ON(RTE_MEMPOOL_CACHE_MAX_SIZE >
RTE_SIZEOF_FIELD(struct rte_mempool_cache, objs) /
RTE_SIZEOF_FIELD(struct rte_mempool_cache, objs[0]));
cache->size = size;
- cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
cache->len = 0;
}
@@ -836,7 +830,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
/* asked cache too big */
if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
- CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
+ cache_size > n) {
rte_errno = EINVAL;
return NULL;
}
@@ -1046,8 +1040,9 @@ rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
cache_count = mp->local_cache[lcore_id].len;
- fprintf(f, " cache_count[%u]=%"PRIu32"\n",
- lcore_id, cache_count);
+ if (cache_count > 0)
+ fprintf(f, " cache_count[%u]=%"PRIu32"\n",
+ lcore_id, cache_count);
count += cache_count;
}
fprintf(f, " total_cache_count=%u\n", count);
@@ -89,10 +89,8 @@ struct __rte_cache_aligned rte_mempool_debug_stats {
*/
struct __rte_cache_aligned rte_mempool_cache {
uint32_t size; /**< Size of the cache */
- uint32_t flushthresh; /**< Threshold before we flush excess elements */
uint32_t len; /**< Current cache count */
#ifdef RTE_LIBRTE_MEMPOOL_STATS
- uint32_t unused;
/*
* Alternative location for the most frequently updated mempool statistics (per-lcore),
* providing faster update access when using a mempool cache.
@@ -1030,7 +1028,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
* If cache_size is non-zero, the rte_mempool library will try to
* limit the accesses to the common lockless pool, by maintaining a
* per-lcore object cache. This argument must be lower or equal to
- * RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
+ * RTE_MEMPOOL_CACHE_MAX_SIZE and n. It is advised to choose
* cache_size to have "n modulo cache_size == 0": if this is
* not the case, some elements will always stay in the pool and will
* never be used. The access to the per-lcore table is of course
@@ -1376,52 +1374,55 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache,
*/
static __rte_always_inline void
rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned int n, struct rte_mempool_cache *cache)
+ unsigned int n, struct rte_mempool_cache * const cache)
{
void **cache_objs;
- /* No cache provided */
- if (unlikely(cache == NULL))
+ /* No cache provided? */
+ if (unlikely(cache == NULL)) {
+ /* Increment stats now, adding in mempool always succeeds. */
+ RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
+
goto driver_enqueue;
+ }
- /* increment stat now, adding in mempool always success */
+ /* Increment stats now, adding in mempool always succeeds. */
RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
- /* The request itself is too big for the cache */
- if (unlikely(n > cache->flushthresh))
- goto driver_enqueue_stats_incremented;
-
- /*
- * The cache follows the following algorithm:
- * 1. If the objects cannot be added to the cache without crossing
- * the flush threshold, flush the cache to the backend.
- * 2. Add the objects to the cache.
- */
+ /* The request itself is too big for cache storage? */
+ if (unlikely(n >= RTE_MEMPOOL_CACHE_MAX_SIZE))
+ goto driver_enqueue;
- if (cache->len + n <= cache->flushthresh) {
- cache_objs = &cache->objs[cache->len];
- cache->len += n;
- } else {
- cache_objs = &cache->objs[0];
- rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
- cache->len = n;
- }
+ /* The request itself warrants bypassing the cache? */
+ if (unlikely(n >= cache->size))
+ goto driver_enqueue;
/* Add the objects to the cache. */
+ cache_objs = &cache->objs[cache->len];
+ cache->len += n;
rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
- return;
-
-driver_enqueue:
+ /* Cache size not exceeded? */
+ if (likely(cache->len <= cache->size))
+ return;
- /* increment stat now, adding in mempool always success */
- RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
- RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
+ /*
+ * Cache size exceeded.
+ * Flush a (CPU cache line size aligned, if mempool cache size allows)
+ * bulk of objects to the backend, as much as we can.
+ */
+ if (likely(cache->size >= RTE_CACHE_LINE_SIZE / sizeof(void *)))
+ n = RTE_ALIGN_FLOOR(cache->len, RTE_CACHE_LINE_SIZE / sizeof(void *));
+ else
+ n = cache->len;
+ cache->len -= n;
+ obj_table = &cache->objs[cache->len];
-driver_enqueue_stats_incremented:
+driver_enqueue:
- /* push objects to the backend */
+ /* Push the objects to the backend. */
rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
}
@@ -1505,86 +1506,89 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
*/
static __rte_always_inline int
rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
- unsigned int n, struct rte_mempool_cache *cache)
+ unsigned int n, struct rte_mempool_cache * const cache)
{
int ret;
unsigned int remaining;
uint32_t index, len;
void **cache_objs;
- /* No cache provided */
- if (unlikely(cache == NULL)) {
- remaining = n;
+ /* No cache provided? */
+ if (unlikely(cache == NULL))
+ goto driver_dequeue;
+
+ /* The request itself is too big for cache storage? */
+ if (unlikely(n >= RTE_MEMPOOL_CACHE_MAX_SIZE))
+ goto driver_dequeue;
+
+ /* The request itself warrants bypassing the cache? */
+ if (unlikely(n >= cache->size))
goto driver_dequeue;
- }
/* The cache is a stack, so copy will be in reverse order. */
- cache_objs = &cache->objs[cache->len];
+ len = cache->len;
- if (__rte_constant(n) && n <= cache->len) {
- /*
- * The request size is known at build time, and
- * the entire request can be satisfied from the cache,
- * so let the compiler unroll the fixed length copy loop.
- */
- cache->len -= n;
- for (index = 0; index < n; index++)
- *obj_table++ = *--cache_objs;
+ /* The entire request can be served from the cache? */
+ if (n <= len) {
+ if (__rte_constant(n)) {
+ /*
+ * The request size 'n' is known at build time,
+ * so let the compiler unroll the fixed length copy loop.
+ */
+ cache_objs = &cache->objs[len];
+ cache->len = len - n;
+ for (index = 0; index < n; index++)
+ *obj_table++ = *--cache_objs;
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
+ goto cache_success;
+ } else {
+ remaining = n;
- return 0;
+ goto cache_dequeue;
+ }
}
- /*
- * Use the cache as much as we have to return hot objects first.
- * If the request size 'n' is known at build time, the above comparison
- * ensures that n > cache->len here, so omit RTE_MIN().
- */
- len = __rte_constant(n) ? cache->len : RTE_MIN(n, cache->len);
- cache->len -= len;
+ /* Serve the first part of the request from the cache to return hot objects first. */
+ cache_objs = &cache->objs[len];
remaining = n - len;
for (index = 0; index < len; index++)
*obj_table++ = *--cache_objs;
+ /* At this point, the cache is empty. */
+
/*
- * If the request size 'n' is known at build time, the case
- * where the entire request can be satisfied from the cache
- * has already been handled above, so omit handling it here.
+ * Fill the cache from the backend; fetch cache size + remaining objects.
+ * Round down to a CPU cache line size aligned bulk, if mempool cache size allows.
*/
- if (!__rte_constant(n) && remaining == 0) {
- /* The entire request is satisfied from the cache. */
-
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
-
- return 0;
- }
-
- /* if dequeue below would overflow mem allocated for cache */
- if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
- goto driver_dequeue;
-
- /* Fill the cache from the backend; fetch size + remaining objects. */
- ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
- cache->size + remaining);
+ if (likely(cache->size >= RTE_CACHE_LINE_SIZE / sizeof(void *)))
+ len = RTE_ALIGN_FLOOR(cache->size + remaining,
+ RTE_CACHE_LINE_SIZE / sizeof(void *));
+ else
+ len = cache->size + remaining;
+ ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs, len);
if (unlikely(ret < 0)) {
/*
- * We are buffer constrained, and not able to allocate
- * cache + remaining.
- * Do not fill the cache, just satisfy the remaining part of
- * the request directly from the backend.
+ * Retry; fetch only the remaining objects.
+ * Round up to a CPU cache line size aligned bulk, if mempool cache size allows.
*/
- goto driver_dequeue;
+ if (likely(cache->size >= RTE_CACHE_LINE_SIZE / sizeof(void *)))
+ len = RTE_ALIGN_CEIL(remaining, RTE_CACHE_LINE_SIZE / sizeof(void *));
+ else
+ len = remaining;
+ ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs, len);
+ if (unlikely(ret < 0))
+ goto fail;
}
- /* Satisfy the remaining part of the request from the filled cache. */
- cache_objs = &cache->objs[cache->size + remaining];
+cache_dequeue:
+
+ /* Serve the remaining part of the request from the cache. */
+ cache_objs = &cache->objs[len];
+ cache->len = len - remaining;
for (index = 0; index < remaining; index++)
*obj_table++ = *--cache_objs;
- cache->len = cache->size;
+cache_success:
RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
@@ -1593,31 +1597,31 @@ rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
driver_dequeue:
- /* Get remaining objects directly from the backend. */
- ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
+ /* Get the objects directly from the backend. */
+ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
- if (ret < 0) {
- if (likely(cache != NULL)) {
- cache->len = n - remaining;
- /*
- * No further action is required to roll the first part
- * of the request back into the cache, as objects in
- * the cache are intact.
- */
- }
-
- RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
- RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
- } else {
- if (likely(cache != NULL)) {
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
- RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
- } else {
+ if (likely(ret == 0)) {
+ if (unlikely(cache == NULL)) {
RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
+
+ return 0;
}
+
+ goto cache_success;
}
+fail:
+
+ /*
+ * No further action is required to roll back the request,
+ * as objects in the cache are intact, and no objects have
+ * been dequeued from the backend.
+ */
+
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
+
return ret;
}