[dpdk-dev,v2,2/3] timer: handle timers installed from non-EAL threads
Checks
Commit Message
This commit adds support for timers being created from
non-EAL threads; it maps timers from all such threads to
lcore id RTE_MAX_LCORE, and puts them all in a corresponding
skiplist.
Signed-off-by: Gabriel Carrillo <erik.g.carrillo@intel.com>
---
v2:
* Address checkpatch warnings
lib/librte_timer/rte_timer.c | 48 ++++++++++++++++++++++++++++++--------------
1 file changed, 33 insertions(+), 15 deletions(-)
@@ -64,8 +64,8 @@ struct skiplist {
} __rte_cache_aligned;
struct priv_timer {
- /** one pending list per enabled lcore */
- struct skiplist pending_lists[RTE_MAX_LCORE];
+ /** one pending list per lcore, plus one for non-EAL threads */
+ struct skiplist pending_lists[RTE_MAX_LCORE + 1];
/** per-core variable that true if a timer was updated on this
* core since last reset of the variable */
@@ -85,7 +85,7 @@ struct priv_timer {
static struct priv_timer priv_timer[RTE_MAX_LCORE];
/** cache of IDs of enabled lcores */
-static unsigned int enabled_lcores[RTE_MAX_LCORE];
+static unsigned int enabled_lcores[RTE_MAX_LCORE + 1];
static int n_enabled_lcores;
/* when debug is enabled, store some statistics */
@@ -103,23 +103,33 @@ static int n_enabled_lcores;
void
rte_timer_subsystem_init(void)
{
- unsigned int lcore_id1, lcore_id2;
+ unsigned int target_lcore, installer_lcore;
struct skiplist *list;
+ struct priv_timer *priv_tim;
int i, j;
- RTE_LCORE_FOREACH(lcore_id1)
- enabled_lcores[n_enabled_lcores++] = lcore_id1;
+ RTE_LCORE_FOREACH(target_lcore)
+ enabled_lcores[n_enabled_lcores++] = target_lcore;
+
+ /* To handle timers coming from non-EAL threads */
+ enabled_lcores[n_enabled_lcores++] = RTE_MAX_LCORE;
/* since priv_timer is static, it's zeroed by default, so only init some
* fields.
*/
- for (i = 0, lcore_id1 = enabled_lcores[i]; i < n_enabled_lcores;
- lcore_id1 = enabled_lcores[++i]) {
- priv_timer[lcore_id1].prev_lcore = lcore_id1;
+ for (i = 0, target_lcore = enabled_lcores[i]; i < n_enabled_lcores;
+ target_lcore = enabled_lcores[++i]) {
+ /* Don't use this value to index the priv_timer array */
+ if (target_lcore == RTE_MAX_LCORE)
+ continue;
- for (j = 0, lcore_id2 = enabled_lcores[j]; j < n_enabled_lcores;
- lcore_id2 = enabled_lcores[++j]) {
- list = &priv_timer[lcore_id1].pending_lists[lcore_id2];
+ priv_tim = &priv_timer[target_lcore];
+ priv_tim->prev_lcore = target_lcore;
+
+ for (j = 0, installer_lcore = enabled_lcores[j];
+ j < n_enabled_lcores;
+ installer_lcore = enabled_lcores[++j]) {
+ list = &priv_tim->pending_lists[installer_lcore];
rte_spinlock_init(&list->lock);
}
}
@@ -300,10 +310,16 @@ timer_get_prev_entries_for_node(struct rte_timer *tim, struct skiplist *list,
static void
timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked)
{
- unsigned lcore_id = rte_lcore_id();
+ unsigned int installer_lcore, lcore_id = rte_lcore_id();
unsigned lvl;
struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
- struct skiplist *list = &priv_timer[tim_lcore].pending_lists[lcore_id];
+ struct skiplist *list;
+
+ /* Check if timer being installed from non-EAL thread */
+ installer_lcore = (lcore_id == LCORE_ID_ANY) ? RTE_MAX_LCORE :
+ lcore_id;
+
+ list = &priv_timer[tim_lcore].pending_lists[installer_lcore];
/* if timer needs to be scheduled on another core, we need to
* lock the list; if it is on local core, we need to lock if
@@ -439,7 +455,9 @@ __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
* the state so we don't need to use cmpset() here */
rte_wmb();
status.state = RTE_TIMER_PENDING;
- status.installer = lcore_id;
+ /* Check if installer is non-EAL thread */
+ status.installer = (lcore_id == LCORE_ID_ANY) ? RTE_MAX_LCORE :
+ lcore_id;
status.owner = tim_lcore;
tim->status.u32 = status.u32;