@@ -13,6 +13,9 @@ static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
return rte_errno;
+ if (mlx5dr_definer_init_cache(&ctx->definer_cache))
+ goto uninit_pat_cache;
+
/* Create an STC pool per FT type */
pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
@@ -35,8 +38,10 @@ static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
if (ctx->stc_pool[i])
mlx5dr_pool_destroy(ctx->stc_pool[i]);
- mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
+ mlx5dr_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+ mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
return rte_errno;
}
@@ -44,12 +49,13 @@ static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
{
int i;
- mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
-
for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
if (ctx->stc_pool[i])
mlx5dr_pool_destroy(ctx->stc_pool[i]);
}
+
+ mlx5dr_definer_uninit_cache(ctx->definer_cache);
+ mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
}
static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
@@ -39,6 +39,7 @@ struct mlx5dr_context {
struct mlx5dr_context_common_res common_res[MLX5DR_TABLE_TYPE_MAX];
struct mlx5dr_context_shared_gvmi_res gvmi_res[MLX5DR_TABLE_TYPE_MAX];
struct mlx5dr_pattern_cache *pattern_cache;
+ struct mlx5dr_definer_cache *definer_cache;
pthread_spinlock_t ctrl_lock;
enum mlx5dr_context_flags flags;
struct mlx5dr_send_engine *send_queue;
@@ -2061,6 +2061,7 @@ mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
{
int i;
+ /* Future: Optimize by comparing selectors with valid mask only */
for (i = 0; i < BYTE_SELECTORS; i++)
if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
return 1;
@@ -2133,15 +2134,106 @@ mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
return rte_errno;
}
+int mlx5dr_definer_init_cache(struct mlx5dr_definer_cache **cache)
+{
+ struct mlx5dr_definer_cache *new_cache;
+
+ new_cache = simple_calloc(1, sizeof(*new_cache));
+ if (!new_cache) {
+ rte_errno = ENOMEM;
+ return rte_errno;
+ }
+ LIST_INIT(&new_cache->head);
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5dr_definer_uninit_cache(struct mlx5dr_definer_cache *cache)
+{
+ simple_free(cache);
+}
+
+static struct mlx5dr_devx_obj *
+mlx5dr_definer_get_obj(struct mlx5dr_context *ctx,
+ struct mlx5dr_definer *definer)
+{
+ struct mlx5dr_definer_cache *cache = ctx->definer_cache;
+ struct mlx5dr_cmd_definer_create_attr def_attr = {0};
+ struct mlx5dr_definer_cache_item *cached_definer;
+ struct mlx5dr_devx_obj *obj;
+
+ /* Search definer cache for requested definer */
+ LIST_FOREACH(cached_definer, &cache->head, next) {
+ if (mlx5dr_definer_compare(&cached_definer->definer, definer))
+ continue;
+
+ /* Reuse definer and set LRU (move to be first in the list) */
+ LIST_REMOVE(cached_definer, next);
+ LIST_INSERT_HEAD(&cache->head, cached_definer, next);
+ cached_definer->refcount++;
+ return cached_definer->definer.obj;
+ }
+
+ /* Allocate and create definer based on the bitmask tag */
+ def_attr.match_mask = definer->mask.jumbo;
+ def_attr.dw_selector = definer->dw_selector;
+ def_attr.byte_selector = definer->byte_selector;
+
+ obj = mlx5dr_cmd_definer_create(ctx->ibv_ctx, &def_attr);
+ if (!obj)
+ return NULL;
+
+ cached_definer = simple_calloc(1, sizeof(*cached_definer));
+ if (!cached_definer) {
+ rte_errno = ENOMEM;
+ goto free_definer_obj;
+ }
+
+ memcpy(&cached_definer->definer, definer, sizeof(*definer));
+ cached_definer->definer.obj = obj;
+ cached_definer->refcount = 1;
+ LIST_INSERT_HEAD(&cache->head, cached_definer, next);
+
+ return obj;
+
+free_definer_obj:
+ mlx5dr_cmd_destroy_obj(obj);
+ return NULL;
+}
+
+static void
+mlx5dr_definer_put_obj(struct mlx5dr_context *ctx,
+ struct mlx5dr_devx_obj *obj)
+{
+ struct mlx5dr_definer_cache_item *cached_definer;
+
+ LIST_FOREACH(cached_definer, &ctx->definer_cache->head, next) {
+ if (cached_definer->definer.obj != obj)
+ continue;
+
+ /* Object found */
+ if (--cached_definer->refcount)
+ return;
+
+ LIST_REMOVE(cached_definer, next);
+ mlx5dr_cmd_destroy_obj(cached_definer->definer.obj);
+ simple_free(cached_definer);
+ return;
+ }
+
+ /* Programming error, object must be part of cache */
+ assert(false);
+}
+
static struct mlx5dr_definer *
-mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
+mlx5dr_definer_alloc(struct mlx5dr_context *ctx,
struct mlx5dr_definer_fc *fc,
int fc_sz,
struct rte_flow_item *items,
struct mlx5dr_definer *layout,
bool bind_fc)
{
- struct mlx5dr_cmd_definer_create_attr def_attr = {0};
struct mlx5dr_definer *definer;
int ret;
@@ -2166,12 +2258,7 @@ mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
/* Create the tag mask used for definer creation */
mlx5dr_definer_create_tag_mask(items, fc, fc_sz, definer->mask.jumbo);
- /* Create definer based on the bitmask tag */
- def_attr.match_mask = definer->mask.jumbo;
- def_attr.dw_selector = layout->dw_selector;
- def_attr.byte_selector = layout->byte_selector;
-
- definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
+ definer->obj = mlx5dr_definer_get_obj(ctx, definer);
if (!definer->obj)
goto free_definer;
@@ -2183,9 +2270,10 @@ mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
}
static void
-mlx5dr_definer_free(struct mlx5dr_definer *definer)
+mlx5dr_definer_free(struct mlx5dr_context *ctx,
+ struct mlx5dr_definer *definer)
{
- mlx5dr_cmd_destroy_obj(definer->obj);
+ mlx5dr_definer_put_obj(ctx, definer->obj);
simple_free(definer);
}
@@ -2199,7 +2287,7 @@ mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
/* Create mendatory match definer */
for (i = 0; i < matcher->num_of_mt; i++) {
- mt[i].definer = mlx5dr_definer_alloc(ctx->ibv_ctx,
+ mt[i].definer = mlx5dr_definer_alloc(ctx,
mt[i].fc,
mt[i].fc_sz,
mt[i].items,
@@ -2214,7 +2302,7 @@ mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
free_definers:
while (i--)
- mlx5dr_definer_free(mt[i].definer);
+ mlx5dr_definer_free(ctx, mt[i].definer);
return rte_errno;
}
@@ -2222,10 +2310,11 @@ mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
static void
mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
{
+ struct mlx5dr_context *ctx = matcher->tbl->ctx;
int i;
for (i = 0; i < matcher->num_of_mt; i++)
- mlx5dr_definer_free(matcher->mt[i].definer);
+ mlx5dr_definer_free(ctx, matcher->mt[i].definer);
}
static int
@@ -2249,7 +2338,7 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
/* Create definer without fcr binding, already binded */
- mt[i].range_definer = mlx5dr_definer_alloc(ctx->ibv_ctx,
+ mt[i].range_definer = mlx5dr_definer_alloc(ctx,
mt[i].fcr,
mt[i].fcr_sz,
mt[i].items,
@@ -2265,7 +2354,7 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
free_definers:
while (i--)
if (mt[i].range_definer)
- mlx5dr_definer_free(mt[i].range_definer);
+ mlx5dr_definer_free(ctx, mt[i].range_definer);
return rte_errno;
}
@@ -2273,11 +2362,12 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
static void
mlx5dr_definer_matcher_range_uninit(struct mlx5dr_matcher *matcher)
{
+ struct mlx5dr_context *ctx = matcher->tbl->ctx;
int i;
for (i = 0; i < matcher->num_of_mt; i++)
if (matcher->mt[i].range_definer)
- mlx5dr_definer_free(matcher->mt[i].range_definer);
+ mlx5dr_definer_free(ctx, matcher->mt[i].range_definer);
}
static int
@@ -569,6 +569,16 @@ struct mlx5dr_definer {
struct mlx5dr_devx_obj *obj;
};
+struct mlx5dr_definer_cache {
+ LIST_HEAD(definer_head, mlx5dr_definer_cache_item) head;
+};
+
+struct mlx5dr_definer_cache_item {
+ struct mlx5dr_definer definer;
+ uint32_t refcount;
+ LIST_ENTRY(mlx5dr_definer_cache_item) next;
+};
+
static inline bool
mlx5dr_definer_is_jumbo(struct mlx5dr_definer *definer)
{
@@ -592,4 +602,8 @@ int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher);
+int mlx5dr_definer_init_cache(struct mlx5dr_definer_cache **cache);
+
+void mlx5dr_definer_uninit_cache(struct mlx5dr_definer_cache *cache);
+
#endif