[v4,7/9] eal: add lcore init callbacks

Message ID 20200626144736.11011-8-david.marchand@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Register non-EAL threads as lcore |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

David Marchand June 26, 2020, 2:47 p.m. UTC
  DPDK components and applications can have their say when a new lcore is
initialized. For this, they can register a callback for initializing and
releasing their private data.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
Changes since v2:
- added missing test,
- fixed rollback on lcore register,

Changes since v1:
- added unit test (since missing some coverage, for v3),
- preferred callback and removed mention of notification,

---
 app/test/test_lcores.c                   | 230 +++++++++++++++++++++++
 lib/librte_eal/common/eal_common_lcore.c | 138 +++++++++++++-
 lib/librte_eal/common/eal_private.h      |   3 +-
 lib/librte_eal/include/rte_lcore.h       |  68 +++++++
 lib/librte_eal/rte_eal_version.map       |   2 +
 5 files changed, 435 insertions(+), 6 deletions(-)
  

Comments

Ananyev, Konstantin June 29, 2020, 12:46 p.m. UTC | #1
> diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
> index a61824a779..52c46a4cea 100644
> --- a/lib/librte_eal/common/eal_common_lcore.c
> +++ b/lib/librte_eal/common/eal_common_lcore.c
> @@ -224,11 +224,114 @@ rte_socket_id_by_idx(unsigned int idx)
>  }
> 
>  static rte_spinlock_t lcore_lock = RTE_SPINLOCK_INITIALIZER;
> +struct lcore_callback {
> +	TAILQ_ENTRY(lcore_callback) next;
> +	char *name;
> +	rte_lcore_init_cb init;
> +	rte_lcore_uninit_cb uninit;
> +	void *arg;
> +};
> +static TAILQ_HEAD(lcore_callbacks_head, lcore_callback) lcore_callbacks =
> +	TAILQ_HEAD_INITIALIZER(lcore_callbacks);
> +
> +static int
> +callback_init(struct lcore_callback *callback, unsigned int lcore_id)
> +{
> +	if (callback->init == NULL)
> +		return 0;
> +	RTE_LOG(DEBUG, EAL, "Call init for lcore callback %s, lcore_id %u\n",
> +		callback->name, lcore_id);
> +	return callback->init(lcore_id, callback->arg);
> +}
> +
> +static void
> +callback_uninit(struct lcore_callback *callback, unsigned int lcore_id)
> +{
> +	if (callback->uninit == NULL)
> +		return;
> +	RTE_LOG(DEBUG, EAL, "Call uninit for lcore callback %s, lcore_id %u\n",
> +		callback->name, lcore_id);
> +	callback->uninit(lcore_id, callback->arg);
> +}
> +
> +void *
> +rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
> +	rte_lcore_uninit_cb uninit, void *arg)
> +{
> +	struct rte_config *cfg = rte_eal_get_configuration();
> +	struct lcore_callback *callback;
> +	unsigned int lcore_id;
> +
> +	callback = calloc(1, sizeof(*callback));
> +	if (callback == NULL)
> +		return NULL;
> +	if (asprintf(&callback->name, "%s-%p", name, arg) == -1) {

If name is null, I presume asprintf() would segfault, right?
As this is a public (and control-plane) API, I think it is worth to
add formal checking for input parameters.
Same for other new public functions. 

> +		free(callback);
> +		return NULL;
> +	}
> +	callback->init = init;
> +	callback->uninit = uninit;
> +	callback->arg = arg;
> +	rte_spinlock_lock(&lcore_lock);
> +	if (callback->init == NULL)
> +		goto no_init;
> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		if (cfg->lcore_role[lcore_id] == ROLE_OFF)
> +			continue;
> +		if (callback_init(callback, lcore_id) == 0)
> +			continue;
> +		/* Callback refused init for this lcore, uninitialize all
> +		 * previous lcore.
> +		 */
> +		for (; lcore_id != 0; lcore_id--) {
> +			if (cfg->lcore_role[lcore_id - 1] == ROLE_OFF)
> +				continue;
> +			callback_uninit(callback, lcore_id - 1);
> +		}

Just as a nit, you can do here instead:
while (lcore_id-- != 0) { if (cfg->lcore_role[lcore_id] == ROLE_OFF) ...}
To avoid 'lcore_id -1' inside loop body.
Might look a bit nicer.

> +		free(callback);

I think you forgot:
free(callback->name);
here.
Might be worth to have a separate helper function around these 2 frees.

> +		callback = NULL;
> +		goto out;
> +	}
> +no_init:
> +	TAILQ_INSERT_TAIL(&lcore_callbacks, callback, next);
> +	RTE_LOG(DEBUG, EAL, "Registered new lcore callback %s (%sinit, %suninit).\n",
> +		callback->name, callback->init == NULL ? "NO " : "",
> +		callback->uninit == NULL ? "NO " : "");
> +out:
> +	rte_spinlock_unlock(&lcore_lock);
> +	return callback;
> +}
> +
> +void
> +rte_lcore_callback_unregister(void *handle)
> +{
> +	struct rte_config *cfg = rte_eal_get_configuration();
> +	struct lcore_callback *callback = handle;
> +	unsigned int lcore_id;
> +
> +	rte_spinlock_lock(&lcore_lock);
> +	if (callback->uninit == NULL)
> +		goto no_uninit;
> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		if (cfg->lcore_role[lcore_id] == ROLE_OFF)
> +			continue;
> +		callback_uninit(callback, lcore_id);
> +	}
> +no_uninit:
> +	TAILQ_REMOVE(&lcore_callbacks, callback, next);
> +	rte_spinlock_unlock(&lcore_lock);
> +	RTE_LOG(DEBUG, EAL, "Unregistered lcore callback %s-%p.\n",
> +		callback->name, callback->arg);
> +	free(callback->name);
> +	free(callback);
> +}
> 
>  unsigned int
>  eal_lcore_non_eal_allocate(void)
>  {
>  	struct rte_config *cfg = rte_eal_get_configuration();
> +	struct lcore_callback *callback;
> +	struct lcore_callback *prev;
>  	unsigned int lcore_id;
> 
>  	if (cfg->process_type == RTE_PROC_SECONDARY ||
> @@ -244,8 +347,29 @@ eal_lcore_non_eal_allocate(void)
>  		cfg->lcore_count++;
>  		break;
>  	}
> -	if (lcore_id == RTE_MAX_LCORE)
> +	if (lcore_id == RTE_MAX_LCORE) {
>  		RTE_LOG(DEBUG, EAL, "No lcore available.\n");
> +		goto out;
> +	}
> +	TAILQ_FOREACH(callback, &lcore_callbacks, next) {
> +		if (callback_init(callback, lcore_id) == 0)
> +			continue;
> +		/* Callback refused init for this lcore, call uninit for all
> +		 * previous callbacks.
> +		 */
> +		prev = TAILQ_PREV(callback, lcore_callbacks_head, next);
> +		while (prev != NULL) {
> +			callback_uninit(prev, lcore_id);
> +			prev = TAILQ_PREV(prev, lcore_callbacks_head, next);
> +		}
> +		RTE_LOG(DEBUG, EAL, "Initialization refused for lcore %u.\n",
> +			lcore_id);
> +		cfg->lcore_role[lcore_id] = ROLE_OFF;
> +		cfg->lcore_count--;
> +		lcore_id = RTE_MAX_LCORE;
> +		goto out;
> +	}
> +out:
>  	rte_spinlock_unlock(&lcore_lock);
>  	return lcore_id;
>  }
> @@ -254,11 +378,15 @@ void
>  eal_lcore_non_eal_release(unsigned int lcore_id)
>  {
>  	struct rte_config *cfg = rte_eal_get_configuration();
> +	struct lcore_callback *callback;
> 
>  	rte_spinlock_lock(&lcore_lock);
> -	if (cfg->lcore_role[lcore_id] == ROLE_NON_EAL) {
> -		cfg->lcore_role[lcore_id] = ROLE_OFF;
> -		cfg->lcore_count--;
> -	}
> +	if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
> +		goto out;
> +	TAILQ_FOREACH(callback, &lcore_callbacks, next)
> +		callback_uninit(callback, lcore_id);
> +	cfg->lcore_role[lcore_id] = ROLE_OFF;
> +	cfg->lcore_count--;
> +out:
>  	rte_spinlock_unlock(&lcore_lock);
>  }
  
Olivier Matz June 30, 2020, 10:09 a.m. UTC | #2
On Fri, Jun 26, 2020 at 04:47:34PM +0200, David Marchand wrote:
> DPDK components and applications can have their say when a new lcore is
> initialized. For this, they can register a callback for initializing and
> releasing their private data.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>

[...]

> +/**
> + * Register callbacks invoked when initializing and uninitializing a lcore.
> + *
> + * This function calls the init callback with all initialized lcores.
> + * Any error reported by the init callback triggers a rollback calling the
> + * uninit callback for each lcore.
> + * If this step succeeds, the callbacks are put in the lcore callbacks list
> + * that will get called for each lcore allocation/release.
> + *
> + * Note: callbacks execution is serialised under a lock protecting the lcores
> + * and callbacks list.
> + *
> + * @param name
> + *   A name serving as a small description for this callback.
> + * @param init
> + *   The callback invoked when a lcore_id is initialized.
> + * @param uninit
> + *   The callback invoked when a lcore_id is uninitialized.

nit: it could be highlighted that init or uninit can be NULL.

> + * @param arg
> + *   An optional argument that gets passed to the callback when it gets
> + *   invoked.
> + * @return
> + *   On success, returns an opaque pointer for the registered object.
> + *   On failure (either memory allocation issue in the function itself or an
> + *   error is returned by the init callback itself), returns NULL.
> + */
> +__rte_experimental
> +void *
> +rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
> +	rte_lcore_uninit_cb uninit, void *arg);
  
Olivier Matz June 30, 2020, 10:15 a.m. UTC | #3
On Fri, Jun 26, 2020 at 04:47:34PM +0200, David Marchand wrote:
> DPDK components and applications can have their say when a new lcore is
> initialized. For this, they can register a callback for initializing and
> releasing their private data.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>

2 more minor comments.

> diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
> index 864bcbade7..e36dceedf9 100644
> --- a/app/test/test_lcores.c
> +++ b/app/test/test_lcores.c
> @@ -5,6 +5,7 @@
>  #include <pthread.h>
>  #include <string.h>
>  
> +#include <rte_common.h>
>  #include <rte_lcore.h>
>  
>  #include "test.h"
> @@ -113,6 +114,229 @@ test_non_eal_lcores(unsigned int eal_threads_count)
>  	return ret;
>  }
>  
> +struct limit_lcore_context {
> +	unsigned int init;
> +	unsigned int max;
> +	unsigned int uninit;
> +};
> +static int
> +limit_lcores_init(unsigned int lcore_id __rte_unused, void *arg)
> +{
> +	struct limit_lcore_context *l = arg;
> +
> +	l->init++;
> +	if (l->init > l->max)
> +		return -1;
> +	return 0;
> +}
> +static void
> +limit_lcores_uninit(unsigned int lcore_id __rte_unused, void *arg)
> +{
> +	struct limit_lcore_context *l = arg;
> +
> +	l->uninit++;
> +}

missing empty lines

[...]

> +static int
> +test_non_eal_lcores_callback(unsigned int eal_threads_count)
> +{
> +	struct thread_context thread_contexts[2];
> +	unsigned int non_eal_threads_count;
> +	struct limit_lcore_context l[2];
> +	unsigned int registered_count;
> +	struct thread_context *t;
> +	void *handle[2];
> +	unsigned int i;
> +	int ret;
> +
> +	memset(l, 0, sizeof(l));
> +	handle[0] = handle[1] = NULL;
> +	non_eal_threads_count = 0;
> +	registered_count = 0;
> +

what about initializing it at declaration?

	struct thread_context thread_contexts[2] = {};
	struct limit_lcore_context l[2] = {};
	void *handle[2] = {};
  

Patch

diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 864bcbade7..e36dceedf9 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -5,6 +5,7 @@ 
 #include <pthread.h>
 #include <string.h>
 
+#include <rte_common.h>
 #include <rte_lcore.h>
 
 #include "test.h"
@@ -113,6 +114,229 @@  test_non_eal_lcores(unsigned int eal_threads_count)
 	return ret;
 }
 
+struct limit_lcore_context {
+	unsigned int init;
+	unsigned int max;
+	unsigned int uninit;
+};
+static int
+limit_lcores_init(unsigned int lcore_id __rte_unused, void *arg)
+{
+	struct limit_lcore_context *l = arg;
+
+	l->init++;
+	if (l->init > l->max)
+		return -1;
+	return 0;
+}
+static void
+limit_lcores_uninit(unsigned int lcore_id __rte_unused, void *arg)
+{
+	struct limit_lcore_context *l = arg;
+
+	l->uninit++;
+}
+
+static int
+test_lcores_callback(unsigned int eal_threads_count)
+{
+	struct limit_lcore_context l;
+	void *handle;
+
+	/* Refuse last lcore => callback register error. */
+	memset(&l, 0, sizeof(l));
+	l.max = eal_threads_count - 1;
+	handle = rte_lcore_callback_register("limit", limit_lcores_init,
+		limit_lcores_uninit, &l);
+	if (handle != NULL) {
+		printf("lcore callback register should have failed\n");
+		goto error;
+	}
+	/* Refusal happens at the n th call to the init callback.
+	 * Besides, n - 1 were accepted, so we expect as many uninit calls when
+	 * the rollback happens.
+	 */
+	if (l.init != eal_threads_count) {
+		printf("lcore callback register failed but incorrect init calls, expected %u, got %u\n",
+			eal_threads_count, l.init);
+		goto error;
+	}
+	if (l.uninit != eal_threads_count - 1) {
+		printf("lcore callback register failed but incorrect uninit calls, expected %u, got %u\n",
+			eal_threads_count - 1, l.uninit);
+		goto error;
+	}
+
+	/* Accept all lcore and unregister. */
+	memset(&l, 0, sizeof(l));
+	l.max = eal_threads_count;
+	handle = rte_lcore_callback_register("limit", limit_lcores_init,
+		limit_lcores_uninit, &l);
+	if (handle == NULL) {
+		printf("lcore callback register failed\n");
+		goto error;
+	}
+	if (l.uninit != 0) {
+		printf("lcore callback register succeeded but incorrect uninit calls, expected 0, got %u\n",
+			l.uninit);
+		goto error;
+	}
+	rte_lcore_callback_unregister(handle);
+	handle = NULL;
+	if (l.init != eal_threads_count) {
+		printf("lcore callback unregister done but incorrect init calls, expected %u, got %u\n",
+			eal_threads_count, l.init);
+		goto error;
+	}
+	if (l.uninit != eal_threads_count) {
+		printf("lcore callback unregister done but incorrect uninit calls, expected %u, got %u\n",
+			eal_threads_count, l.uninit);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	if (handle != NULL)
+		rte_lcore_callback_unregister(handle);
+
+	return -1;
+}
+
+static int
+test_non_eal_lcores_callback(unsigned int eal_threads_count)
+{
+	struct thread_context thread_contexts[2];
+	unsigned int non_eal_threads_count;
+	struct limit_lcore_context l[2];
+	unsigned int registered_count;
+	struct thread_context *t;
+	void *handle[2];
+	unsigned int i;
+	int ret;
+
+	memset(l, 0, sizeof(l));
+	handle[0] = handle[1] = NULL;
+	non_eal_threads_count = 0;
+	registered_count = 0;
+
+	/* This test requires two empty slots to be sure lcore init refusal is
+	 * because of callback execution.
+	 */
+	if (eal_threads_count + 2 >= RTE_MAX_LCORE)
+		return 0;
+
+	/* Register two callbacks:
+	 * - first one accepts any lcore,
+	 * - second one accepts all EAL lcore + one more for the first non-EAL
+	 *   thread, then refuses the next lcore.
+	 */
+	l[0].max = UINT_MAX;
+	handle[0] = rte_lcore_callback_register("no_limit", limit_lcores_init,
+		limit_lcores_uninit, &l[0]);
+	if (handle[0] == NULL) {
+		printf("lcore callback [0] register failed\n");
+		goto error;
+	}
+	l[1].max = eal_threads_count + 1;
+	handle[1] = rte_lcore_callback_register("limit", limit_lcores_init,
+		limit_lcores_uninit, &l[1]);
+	if (handle[1] == NULL) {
+		printf("lcore callback [1] register failed\n");
+		goto error;
+	}
+	if (l[0].init != eal_threads_count || l[1].init != eal_threads_count) {
+		printf("lcore callbacks register succeeded but incorrect init calls, expected %u, %u, got %u, %u\n",
+			eal_threads_count, eal_threads_count,
+			l[0].init, l[1].init);
+		goto error;
+	}
+	if (l[0].uninit != 0 || l[1].uninit != 0) {
+		printf("lcore callbacks register succeeded but incorrect uninit calls, expected 0, 1, got %u, %u\n",
+			l[0].uninit, l[1].uninit);
+		goto error;
+	}
+	/* First thread that expects a valid lcore id. */
+	t = &thread_contexts[0];
+	t->state = INIT;
+	t->registered_count = &registered_count;
+	t->lcore_id_any = false;
+	if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+		goto cleanup_threads;
+	non_eal_threads_count++;
+	while (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=
+			non_eal_threads_count)
+		;
+	if (l[0].init != eal_threads_count + 1 ||
+			l[1].init != eal_threads_count + 1) {
+		printf("Incorrect init calls, expected %u, %u, got %u, %u\n",
+			eal_threads_count + 1, eal_threads_count + 1,
+			l[0].init, l[1].init);
+		goto cleanup_threads;
+	}
+	if (l[0].uninit != 0 || l[1].uninit != 0) {
+		printf("Incorrect uninit calls, expected 0, 0, got %u, %u\n",
+			l[0].uninit, l[1].uninit);
+		goto cleanup_threads;
+	}
+	/* Second thread, that expects LCORE_ID_ANY because of init refusal. */
+	t = &thread_contexts[1];
+	t->state = INIT;
+	t->registered_count = &registered_count;
+	t->lcore_id_any = true;
+	if (pthread_create(&t->id, NULL, thread_loop, t) != 0)
+		goto cleanup_threads;
+	non_eal_threads_count++;
+	while (__atomic_load_n(&registered_count, __ATOMIC_ACQUIRE) !=
+			non_eal_threads_count)
+		;
+	if (l[0].init != eal_threads_count + 2 ||
+			l[1].init != eal_threads_count + 2) {
+		printf("Incorrect init calls, expected %u, %u, got %u, %u\n",
+			eal_threads_count + 2, eal_threads_count + 2,
+			l[0].init, l[1].init);
+		goto cleanup_threads;
+	}
+	if (l[0].uninit != 1 || l[1].uninit != 0) {
+		printf("Incorrect uninit calls, expected 1, 0, got %u, %u\n",
+			l[0].uninit, l[1].uninit);
+		goto cleanup_threads;
+	}
+	/* Release all threads, and check their states. */
+	__atomic_store_n(&registered_count, 0, __ATOMIC_RELEASE);
+	ret = 0;
+	for (i = 0; i < non_eal_threads_count; i++) {
+		t = &thread_contexts[i];
+		pthread_join(t->id, NULL);
+		if (t->state != DONE)
+			ret = -1;
+	}
+	if (ret < 0)
+		goto error;
+	if (l[0].uninit != 2 || l[1].uninit != 1) {
+		printf("Threads reported having successfully registered and unregistered, but incorrect uninit calls, expected 2, 1, got %u, %u\n",
+			l[0].uninit, l[1].uninit);
+		goto error;
+	}
+	rte_lcore_callback_unregister(handle[0]);
+	rte_lcore_callback_unregister(handle[1]);
+	return 0;
+
+cleanup_threads:
+	/* Release all threads */
+	__atomic_store_n(&registered_count, 0, __ATOMIC_RELEASE);
+	for (i = 0; i < non_eal_threads_count; i++) {
+		t = &thread_contexts[i];
+		pthread_join(t->id, NULL);
+	}
+error:
+	if (handle[1] != NULL)
+		rte_lcore_callback_unregister(handle[1]);
+	if (handle[0] != NULL)
+		rte_lcore_callback_unregister(handle[0]);
+	return -1;
+}
+
 static int
 test_lcores(void)
 {
@@ -133,6 +357,12 @@  test_lcores(void)
 	if (test_non_eal_lcores(eal_threads_count) < 0)
 		return TEST_FAILED;
 
+	if (test_lcores_callback(eal_threads_count) < 0)
+		return TEST_FAILED;
+
+	if (test_non_eal_lcores_callback(eal_threads_count) < 0)
+		return TEST_FAILED;
+
 	return TEST_SUCCESS;
 }
 
diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
index a61824a779..52c46a4cea 100644
--- a/lib/librte_eal/common/eal_common_lcore.c
+++ b/lib/librte_eal/common/eal_common_lcore.c
@@ -224,11 +224,114 @@  rte_socket_id_by_idx(unsigned int idx)
 }
 
 static rte_spinlock_t lcore_lock = RTE_SPINLOCK_INITIALIZER;
+struct lcore_callback {
+	TAILQ_ENTRY(lcore_callback) next;
+	char *name;
+	rte_lcore_init_cb init;
+	rte_lcore_uninit_cb uninit;
+	void *arg;
+};
+static TAILQ_HEAD(lcore_callbacks_head, lcore_callback) lcore_callbacks =
+	TAILQ_HEAD_INITIALIZER(lcore_callbacks);
+
+static int
+callback_init(struct lcore_callback *callback, unsigned int lcore_id)
+{
+	if (callback->init == NULL)
+		return 0;
+	RTE_LOG(DEBUG, EAL, "Call init for lcore callback %s, lcore_id %u\n",
+		callback->name, lcore_id);
+	return callback->init(lcore_id, callback->arg);
+}
+
+static void
+callback_uninit(struct lcore_callback *callback, unsigned int lcore_id)
+{
+	if (callback->uninit == NULL)
+		return;
+	RTE_LOG(DEBUG, EAL, "Call uninit for lcore callback %s, lcore_id %u\n",
+		callback->name, lcore_id);
+	callback->uninit(lcore_id, callback->arg);
+}
+
+void *
+rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
+	rte_lcore_uninit_cb uninit, void *arg)
+{
+	struct rte_config *cfg = rte_eal_get_configuration();
+	struct lcore_callback *callback;
+	unsigned int lcore_id;
+
+	callback = calloc(1, sizeof(*callback));
+	if (callback == NULL)
+		return NULL;
+	if (asprintf(&callback->name, "%s-%p", name, arg) == -1) {
+		free(callback);
+		return NULL;
+	}
+	callback->init = init;
+	callback->uninit = uninit;
+	callback->arg = arg;
+	rte_spinlock_lock(&lcore_lock);
+	if (callback->init == NULL)
+		goto no_init;
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (cfg->lcore_role[lcore_id] == ROLE_OFF)
+			continue;
+		if (callback_init(callback, lcore_id) == 0)
+			continue;
+		/* Callback refused init for this lcore, uninitialize all
+		 * previous lcore.
+		 */
+		for (; lcore_id != 0; lcore_id--) {
+			if (cfg->lcore_role[lcore_id - 1] == ROLE_OFF)
+				continue;
+			callback_uninit(callback, lcore_id - 1);
+		}
+		free(callback);
+		callback = NULL;
+		goto out;
+	}
+no_init:
+	TAILQ_INSERT_TAIL(&lcore_callbacks, callback, next);
+	RTE_LOG(DEBUG, EAL, "Registered new lcore callback %s (%sinit, %suninit).\n",
+		callback->name, callback->init == NULL ? "NO " : "",
+		callback->uninit == NULL ? "NO " : "");
+out:
+	rte_spinlock_unlock(&lcore_lock);
+	return callback;
+}
+
+void
+rte_lcore_callback_unregister(void *handle)
+{
+	struct rte_config *cfg = rte_eal_get_configuration();
+	struct lcore_callback *callback = handle;
+	unsigned int lcore_id;
+
+	rte_spinlock_lock(&lcore_lock);
+	if (callback->uninit == NULL)
+		goto no_uninit;
+	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+		if (cfg->lcore_role[lcore_id] == ROLE_OFF)
+			continue;
+		callback_uninit(callback, lcore_id);
+	}
+no_uninit:
+	TAILQ_REMOVE(&lcore_callbacks, callback, next);
+	rte_spinlock_unlock(&lcore_lock);
+	RTE_LOG(DEBUG, EAL, "Unregistered lcore callback %s-%p.\n",
+		callback->name, callback->arg);
+	free(callback->name);
+	free(callback);
+}
 
 unsigned int
 eal_lcore_non_eal_allocate(void)
 {
 	struct rte_config *cfg = rte_eal_get_configuration();
+	struct lcore_callback *callback;
+	struct lcore_callback *prev;
 	unsigned int lcore_id;
 
 	if (cfg->process_type == RTE_PROC_SECONDARY ||
@@ -244,8 +347,29 @@  eal_lcore_non_eal_allocate(void)
 		cfg->lcore_count++;
 		break;
 	}
-	if (lcore_id == RTE_MAX_LCORE)
+	if (lcore_id == RTE_MAX_LCORE) {
 		RTE_LOG(DEBUG, EAL, "No lcore available.\n");
+		goto out;
+	}
+	TAILQ_FOREACH(callback, &lcore_callbacks, next) {
+		if (callback_init(callback, lcore_id) == 0)
+			continue;
+		/* Callback refused init for this lcore, call uninit for all
+		 * previous callbacks.
+		 */
+		prev = TAILQ_PREV(callback, lcore_callbacks_head, next);
+		while (prev != NULL) {
+			callback_uninit(prev, lcore_id);
+			prev = TAILQ_PREV(prev, lcore_callbacks_head, next);
+		}
+		RTE_LOG(DEBUG, EAL, "Initialization refused for lcore %u.\n",
+			lcore_id);
+		cfg->lcore_role[lcore_id] = ROLE_OFF;
+		cfg->lcore_count--;
+		lcore_id = RTE_MAX_LCORE;
+		goto out;
+	}
+out:
 	rte_spinlock_unlock(&lcore_lock);
 	return lcore_id;
 }
@@ -254,11 +378,15 @@  void
 eal_lcore_non_eal_release(unsigned int lcore_id)
 {
 	struct rte_config *cfg = rte_eal_get_configuration();
+	struct lcore_callback *callback;
 
 	rte_spinlock_lock(&lcore_lock);
-	if (cfg->lcore_role[lcore_id] == ROLE_NON_EAL) {
-		cfg->lcore_role[lcore_id] = ROLE_OFF;
-		cfg->lcore_count--;
-	}
+	if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
+		goto out;
+	TAILQ_FOREACH(callback, &lcore_callbacks, next)
+		callback_uninit(callback, lcore_id);
+	cfg->lcore_role[lcore_id] = ROLE_OFF;
+	cfg->lcore_count--;
+out:
 	rte_spinlock_unlock(&lcore_lock);
 }
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 73238ff157..fc79564111 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -401,7 +401,8 @@  uint64_t get_tsc_freq_arch(void);
  *
  * @return
  *   - the id of a lcore with role ROLE_NON_EAL on success.
- *   - RTE_MAX_LCORE if none was available.
+ *   - RTE_MAX_LCORE if none was available or initializing was refused (see
+ *     rte_lcore_callback_register).
  */
 unsigned int eal_lcore_non_eal_allocate(void);
 
diff --git a/lib/librte_eal/include/rte_lcore.h b/lib/librte_eal/include/rte_lcore.h
index 43747e88df..5a2d6ca7af 100644
--- a/lib/librte_eal/include/rte_lcore.h
+++ b/lib/librte_eal/include/rte_lcore.h
@@ -229,6 +229,74 @@  unsigned int rte_get_next_lcore(unsigned int i, int skip_master, int wrap);
 	     i<RTE_MAX_LCORE;						\
 	     i = rte_get_next_lcore(i, 1, 0))
 
+/**
+ * Callback prototype for initializing lcores.
+ *
+ * @param lcore_id
+ *   The lcore to consider.
+ * @param arg
+ *   An opaque pointer passed at callback registration.
+ * @return
+ *   - -1 when refusing this operation,
+ *   - 0 otherwise.
+ */
+typedef int (*rte_lcore_init_cb)(unsigned int lcore_id, void *arg);
+
+/**
+ * Callback prototype for uninitializing lcores.
+ *
+ * @param lcore_id
+ *   The lcore to consider.
+ * @param arg
+ *   An opaque pointer passed at callback registration.
+ */
+typedef void (*rte_lcore_uninit_cb)(unsigned int lcore_id, void *arg);
+
+/**
+ * Register callbacks invoked when initializing and uninitializing a lcore.
+ *
+ * This function calls the init callback with all initialized lcores.
+ * Any error reported by the init callback triggers a rollback calling the
+ * uninit callback for each lcore.
+ * If this step succeeds, the callbacks are put in the lcore callbacks list
+ * that will get called for each lcore allocation/release.
+ *
+ * Note: callbacks execution is serialised under a lock protecting the lcores
+ * and callbacks list.
+ *
+ * @param name
+ *   A name serving as a small description for this callback.
+ * @param init
+ *   The callback invoked when a lcore_id is initialized.
+ * @param uninit
+ *   The callback invoked when a lcore_id is uninitialized.
+ * @param arg
+ *   An optional argument that gets passed to the callback when it gets
+ *   invoked.
+ * @return
+ *   On success, returns an opaque pointer for the registered object.
+ *   On failure (either memory allocation issue in the function itself or an
+ *   error is returned by the init callback itself), returns NULL.
+ */
+__rte_experimental
+void *
+rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
+	rte_lcore_uninit_cb uninit, void *arg);
+
+/**
+ * Unregister callbacks previously registered with rte_lcore_callback_register.
+ *
+ * This function calls the uninit callback with all initialized lcores.
+ * The callbacks are then removed from the lcore callbacks list.
+ *
+ * @param handle
+ *   The handle pointer returned by a former successful call to
+ *   rte_lcore_callback_register.
+ */
+__rte_experimental
+void
+rte_lcore_callback_unregister(void *handle);
+
 /**
  * Set core affinity of the current thread.
  * Support both EAL and non-EAL thread and update TLS.
diff --git a/lib/librte_eal/rte_eal_version.map b/lib/librte_eal/rte_eal_version.map
index 39c41d445d..aeee7cf431 100644
--- a/lib/librte_eal/rte_eal_version.map
+++ b/lib/librte_eal/rte_eal_version.map
@@ -396,6 +396,8 @@  EXPERIMENTAL {
 
 	# added in 20.08
 	__rte_trace_mem_per_thread_free;
+	rte_lcore_callback_register;
+	rte_lcore_callback_unregister;
 	rte_thread_register;
 	rte_thread_unregister;
 };