[v3,4/9] eal: introduce thread uninit helper
Checks
Commit Message
This is a preparation step for dynamically unregistering threads.
Since we explicitly allocate a per thread trace buffer in
rte_thread_init, add an internal helper to free this buffer.
Signed-off-by: David Marchand <david.marchand@redhat.com>
---
Note: I preferred renaming the current internal function to free all
threads trace buffers (new name trace_mem_free()) and reuse the previous
name (trace_mem_per_thread_free()) when freeing this buffer for a given
thread.
Changes since v2:
- added missing stub for windows tracing support,
- moved free symbol to exported (experimental) ABI as a counterpart of
the alloc symbol we already had,
Changes since v1:
- rebased on master, removed Windows workaround wrt traces support,
---
lib/librte_eal/common/eal_common_thread.c | 9 ++++
lib/librte_eal/common/eal_common_trace.c | 51 +++++++++++++++++++----
lib/librte_eal/common/eal_thread.h | 5 +++
lib/librte_eal/common/eal_trace.h | 2 +-
lib/librte_eal/include/rte_trace_point.h | 9 ++++
lib/librte_eal/rte_eal_version.map | 3 ++
lib/librte_eal/windows/eal.c | 5 +++
7 files changed, 75 insertions(+), 9 deletions(-)
@@ -20,6 +20,7 @@
#include "eal_internal_cfg.h"
#include "eal_private.h"
#include "eal_thread.h"
+#include "eal_trace.h"
RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY;
RTE_DEFINE_PER_LCORE(int, _thread_id) = -1;
@@ -161,6 +162,14 @@ rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset)
__rte_trace_mem_per_thread_alloc();
}
+void
+rte_thread_uninit(void)
+{
+ __rte_trace_mem_per_thread_free();
+
+ RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY;
+}
+
struct rte_thread_ctrl_params {
void *(*start_routine)(void *);
void *arg;
@@ -101,7 +101,7 @@ eal_trace_fini(void)
{
if (!rte_trace_is_enabled())
return;
- trace_mem_per_thread_free();
+ trace_mem_free();
trace_metadata_destroy();
eal_trace_args_free();
}
@@ -370,24 +370,59 @@ __rte_trace_mem_per_thread_alloc(void)
rte_spinlock_unlock(&trace->lock);
}
+static void
+trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
+{
+ if (meta->area == TRACE_AREA_HUGEPAGE)
+ eal_free_no_trace(meta->mem);
+ else if (meta->area == TRACE_AREA_HEAP)
+ free(meta->mem);
+}
+
+void
+__rte_trace_mem_per_thread_free(void)
+{
+ struct trace *trace = trace_obj_get();
+ struct __rte_trace_header *header;
+ uint32_t count;
+
+ if (RTE_PER_LCORE(trace_mem) == NULL)
+ return;
+
+ header = RTE_PER_LCORE(trace_mem);
+ rte_spinlock_lock(&trace->lock);
+ for (count = 0; count < trace->nb_trace_mem_list; count++) {
+ if (trace->lcore_meta[count].mem == header)
+ break;
+ }
+ if (count != trace->nb_trace_mem_list) {
+ struct thread_mem_meta *meta = &trace->lcore_meta[count];
+
+ trace_mem_per_thread_free_unlocked(meta);
+ if (count != trace->nb_trace_mem_list - 1) {
+ memmove(meta, meta + 1,
+ sizeof(*meta) *
+ (trace->nb_trace_mem_list - count - 1));
+ }
+ trace->nb_trace_mem_list--;
+ }
+ rte_spinlock_unlock(&trace->lock);
+}
+
void
-trace_mem_per_thread_free(void)
+trace_mem_free(void)
{
struct trace *trace = trace_obj_get();
uint32_t count;
- void *mem;
if (!rte_trace_is_enabled())
return;
rte_spinlock_lock(&trace->lock);
for (count = 0; count < trace->nb_trace_mem_list; count++) {
- mem = trace->lcore_meta[count].mem;
- if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
- eal_free_no_trace(mem);
- else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
- free(mem);
+ trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
}
+ trace->nb_trace_mem_list = 0;
rte_spinlock_unlock(&trace->lock);
}
@@ -25,6 +25,11 @@ __rte_noreturn void *eal_thread_loop(void *arg);
*/
void rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset);
+/**
+ * Uninitialize per-lcore info for current thread.
+ */
+void rte_thread_uninit(void);
+
/**
* Get the NUMA socket id from cpu id.
* This function is private to EAL.
@@ -106,7 +106,7 @@ int trace_metadata_create(void);
void trace_metadata_destroy(void);
int trace_mkdir(void);
int trace_epoch_time_save(void);
-void trace_mem_per_thread_free(void);
+void trace_mem_free(void);
/* EAL interface */
int eal_trace_init(void);
@@ -230,6 +230,15 @@ __rte_trace_point_fp_is_enabled(void)
__rte_experimental
void __rte_trace_mem_per_thread_alloc(void);
+/**
+ * @internal
+ *
+ * Free trace memory buffer per thread.
+ *
+ */
+__rte_experimental
+void __rte_trace_mem_per_thread_free(void);
+
/**
* @internal
*
@@ -393,6 +393,9 @@ EXPERIMENTAL {
rte_trace_point_lookup;
rte_trace_regexp;
rte_trace_save;
+
+ # added in 20.08
+ __rte_trace_mem_per_thread_free;
};
INTERNAL {
@@ -255,6 +255,11 @@ __rte_trace_mem_per_thread_alloc(void)
{
}
+void
+__rte_trace_mem_per_thread_free(void)
+{
+}
+
void
__rte_trace_point_emit_field(size_t sz, const char *field,
const char *type)