@@ -405,6 +405,7 @@ CONFIG_RTE_TABLE_HASH_EXT_STATS_COLLECT=n
CONFIG_RTE_TABLE_HASH_KEY16_STATS_COLLECT=n
CONFIG_RTE_TABLE_HASH_KEY32_STATS_COLLECT=n
CONFIG_RTE_TABLE_HASH_KEY8_STATS_COLLECT=n
+CONFIG_RTE_TABLE_HASH_LRU_STATS_COLLECT=n
#
# Compile librte_pipeline
@@ -412,6 +412,7 @@ CONFIG_RTE_TABLE_HASH_EXT_STATS_COLLECT=n
CONFIG_RTE_TABLE_HASH_KEY16_STATS_COLLECT=n
CONFIG_RTE_TABLE_HASH_KEY32_STATS_COLLECT=n
CONFIG_RTE_TABLE_HASH_KEY8_STATS_COLLECT=n
+CONFIG_RTE_TABLE_HASH_LRU_STATS_COLLECT=n
#
# Compile librte_pipeline
@@ -45,6 +45,20 @@
#define KEYS_PER_BUCKET 4
+#ifdef RTE_TABLE_HASH_LRU_STATS_COLLECT
+
+#define RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(table, val) \
+ table->stats.n_pkts_in += val
+#define RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(table, val) \
+ table->stats.n_pkts_lookup_miss += val
+
+#else
+
+#define RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(table, val)
+#define RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(table, val)
+
+#endif
+
struct bucket {
union {
struct bucket *next;
@@ -63,6 +77,8 @@ struct grinder {
};
struct rte_table_hash {
+ struct rte_table_stats stats;
+
/* Input parameters */
uint32_t key_size;
uint32_t entry_size;
@@ -368,6 +384,9 @@ static int rte_table_hash_lru_lookup_unoptimized(
struct rte_table_hash *t = (struct rte_table_hash *) table;
uint64_t pkts_mask_out = 0;
+ __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
+ RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in);
+
for ( ; pkts_mask; ) {
struct bucket *bkt;
struct rte_mbuf *pkt;
@@ -412,6 +431,7 @@ static int rte_table_hash_lru_lookup_unoptimized(
}
*lookup_hit_mask = pkts_mask_out;
+ RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return 0;
}
@@ -804,6 +824,9 @@ static int rte_table_hash_lru_lookup(
uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
int status = 0;
+ __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
+ RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in);
+
/* Cannot run the pipeline with less than 7 packets */
if (__builtin_popcountll(pkts_mask) < 7)
return rte_table_hash_lru_lookup_unoptimized(table, pkts,
@@ -916,6 +939,7 @@ static int rte_table_hash_lru_lookup(
}
*lookup_hit_mask = pkts_mask_out;
+ RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return status;
}
@@ -933,6 +957,9 @@ static int rte_table_hash_lru_lookup_dosig(
uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
int status = 0;
+ __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
+ RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in);
+
/* Cannot run the pipeline with less than 7 packets */
if (__builtin_popcountll(pkts_mask) < 7)
return rte_table_hash_lru_lookup_unoptimized(table, pkts,
@@ -1045,15 +1072,31 @@ static int rte_table_hash_lru_lookup_dosig(
}
*lookup_hit_mask = pkts_mask_out;
+ RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return status;
}
+static int
+rte_table_hash_lru_stats_read(void *table, struct rte_table_stats *stats, int clear)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+
+ if (stats != NULL)
+ memcpy(stats, &t->stats, sizeof(t->stats));
+
+ if (clear)
+ memset(&t->stats, 0, sizeof(t->stats));
+
+ return 0;
+}
+
struct rte_table_ops rte_table_hash_lru_ops = {
.f_create = rte_table_hash_lru_create,
.f_free = rte_table_hash_lru_free,
.f_add = rte_table_hash_lru_entry_add,
.f_delete = rte_table_hash_lru_entry_delete,
.f_lookup = rte_table_hash_lru_lookup,
+ .f_stats = rte_table_hash_lru_stats_read,
};
struct rte_table_ops rte_table_hash_lru_dosig_ops = {
@@ -1062,4 +1105,5 @@ struct rte_table_ops rte_table_hash_lru_dosig_ops = {
.f_add = rte_table_hash_lru_entry_add,
.f_delete = rte_table_hash_lru_entry_delete,
.f_lookup = rte_table_hash_lru_lookup_dosig,
+ .f_stats = rte_table_hash_lru_stats_read,
};