These handlers allow an application to create a mempool based on the
lock-free ring, with any combination of single/multi producer/consumer.
Also, add a note to the programmer's guide's "known issues" section.
Signed-off-by: Gage Eads <gage.eads@intel.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
---
doc/guides/prog_guide/env_abstraction_layer.rst | 10 +++++
drivers/mempool/ring/Makefile | 1 +
drivers/mempool/ring/meson.build | 2 +
drivers/mempool/ring/rte_mempool_ring.c | 58 +++++++++++++++++++++++--
4 files changed, 68 insertions(+), 3 deletions(-)
@@ -541,6 +541,16 @@ Known Issues
5. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.
+ Alternatively, 32-bit and x86_64 applications can use the lock-free ring
+ mempool handler. When considering it, note that:
+
+ - Among 64-bit architectures it is currently limited to the x86_64 platform,
+ because it uses a function (16-byte compare-and-swap) that is not yet
+ available on other platforms.
+ - It has worse average-case performance than the non-preemptive rte_ring, but
+ software caching (e.g. the mempool cache) can mitigate this by reducing the
+ number of handler operations.
+
+ rte_timer
Running ``rte_timer_manage()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
@@ -10,6 +10,7 @@ LIB = librte_mempool_ring.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
EXPORT_MAP := rte_mempool_ring_version.map
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+allow_experimental_apis = true
+
sources = files('rte_mempool_ring.c')
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2010-2019 Intel Corporation
*/
#include <stdio.h>
@@ -47,11 +47,11 @@ common_ring_get_count(const struct rte_mempool *mp)
static int
-common_ring_alloc(struct rte_mempool *mp)
+__common_ring_alloc(struct rte_mempool *mp, int rg_flags)
{
- int rg_flags = 0, ret;
char rg_name[RTE_RING_NAMESIZE];
struct rte_ring *r;
+ int ret;
ret = snprintf(rg_name, sizeof(rg_name),
RTE_MEMPOOL_MZ_FORMAT, mp->name);
@@ -82,6 +82,18 @@ common_ring_alloc(struct rte_mempool *mp)
return 0;
}
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+ return __common_ring_alloc(mp, 0);
+}
+
+static int
+common_ring_alloc_lf(struct rte_mempool *mp)
+{
+ return __common_ring_alloc(mp, RING_F_LF);
+}
+
static void
common_ring_free(struct rte_mempool *mp)
{
@@ -130,7 +142,47 @@ static const struct rte_mempool_ops ops_sp_mc = {
.get_count = common_ring_get_count,
};
+static const struct rte_mempool_ops ops_mp_mc_lf = {
+ .name = "ring_mp_mc_lf",
+ .alloc = common_ring_alloc_lf,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_sc_lf = {
+ .name = "ring_sp_sc_lf",
+ .alloc = common_ring_alloc_lf,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_mp_sc_lf = {
+ .name = "ring_mp_sc_lf",
+ .alloc = common_ring_alloc_lf,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_mc_lf = {
+ .name = "ring_sp_mc_lf",
+ .alloc = common_ring_alloc_lf,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
MEMPOOL_REGISTER_OPS(ops_mp_mc);
MEMPOOL_REGISTER_OPS(ops_sp_sc);
MEMPOOL_REGISTER_OPS(ops_mp_sc);
MEMPOOL_REGISTER_OPS(ops_sp_mc);
+MEMPOOL_REGISTER_OPS(ops_mp_mc_lf);
+MEMPOOL_REGISTER_OPS(ops_sp_sc_lf);
+MEMPOOL_REGISTER_OPS(ops_mp_sc_lf);
+MEMPOOL_REGISTER_OPS(ops_sp_mc_lf);