[dpdk-dev,v4,3/4] Add autotests for RIB library

Message ID 1524780214-23196-4-git-send-email-medvedkinv@gmail.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail apply patch file failure

Commit Message

Vladimir Medvedkin April 26, 2018, 10:03 p.m. UTC
  Signed-off-by: Medvedkin Vladimir <medvedkinv@gmail.com>
---
 test/test/Makefile               |   5 +
 test/test/meson.build            |   8 +
 test/test/test_rib.c             | 308 +++++++++++++++++++++++++++++++++++++++
 test/test/test_rib_generate_rt.c | 297 +++++++++++++++++++++++++++++++++++++
 test/test/test_rib_generate_rt.h |  38 +++++
 test/test/test_rib_lpm_comp.c    | 189 ++++++++++++++++++++++++
 test/test/test_rib_perf.c        | 145 ++++++++++++++++++
 7 files changed, 990 insertions(+)
 create mode 100644 test/test/test_rib.c
 create mode 100644 test/test/test_rib_generate_rt.c
 create mode 100644 test/test/test_rib_generate_rt.h
 create mode 100644 test/test/test_rib_lpm_comp.c
 create mode 100644 test/test/test_rib_perf.c
  

Comments

Bruce Richardson June 29, 2018, 2:13 p.m. UTC | #1
On Fri, Apr 27, 2018 at 01:03:33AM +0300, Medvedkin Vladimir wrote:
> Signed-off-by: Medvedkin Vladimir <medvedkinv@gmail.com>
> ---
>  test/test/Makefile               |   5 +
>  test/test/meson.build            |   8 +
>  test/test/test_rib.c             | 308 +++++++++++++++++++++++++++++++++++++++
>  test/test/test_rib_generate_rt.c | 297 +++++++++++++++++++++++++++++++++++++
>  test/test/test_rib_generate_rt.h |  38 +++++
>  test/test/test_rib_lpm_comp.c    | 189 ++++++++++++++++++++++++
>  test/test/test_rib_perf.c        | 145 ++++++++++++++++++
>  7 files changed, 990 insertions(+)
>  create mode 100644 test/test/test_rib.c
>  create mode 100644 test/test/test_rib_generate_rt.c
>  create mode 100644 test/test/test_rib_generate_rt.h
>  create mode 100644 test/test/test_rib_lpm_comp.c
>  create mode 100644 test/test/test_rib_perf.c
> 
Clang is giving some errors after this patch, nothing to serious, but they
need to be fixed in the next version:

../test/test/test_rib_perf.c:87:12: error: equality comparison with extraneous parentheses [-Werror,-Wparentheses-equality]
                if ((ret == 0))
                     ~~~~^~~~
../test/test/test_rib_perf.c:87:12: note: remove extraneous parentheses around the comparison to silence this warning
                if ((ret == 0))
                    ~    ^   ~
../test/test/test_rib_perf.c:87:12: note: use '=' to turn this equality comparison into an assignment
                if ((ret == 0))
                         ^~
                         =
1 error generated.
  
Bruce Richardson June 29, 2018, 3:07 p.m. UTC | #2
On Fri, Apr 27, 2018 at 01:03:33AM +0300, Medvedkin Vladimir wrote:
> Signed-off-by: Medvedkin Vladimir <medvedkinv@gmail.com>
> ---
>  test/test/Makefile               |   5 +
>  test/test/meson.build            |   8 +
>  test/test/test_rib.c             | 308 +++++++++++++++++++++++++++++++++++++++
>  test/test/test_rib_generate_rt.c | 297 +++++++++++++++++++++++++++++++++++++
>  test/test/test_rib_generate_rt.h |  38 +++++
>  test/test/test_rib_lpm_comp.c    | 189 ++++++++++++++++++++++++
>  test/test/test_rib_perf.c        | 145 ++++++++++++++++++
>  7 files changed, 990 insertions(+)
>  create mode 100644 test/test/test_rib.c
>  create mode 100644 test/test/test_rib_generate_rt.c
>  create mode 100644 test/test/test_rib_generate_rt.h
>  create mode 100644 test/test/test_rib_lpm_comp.c
>  create mode 100644 test/test/test_rib_perf.c
> 

<snip>

> diff --git a/test/test/test_rib_lpm_comp.c b/test/test/test_rib_lpm_comp.c
> new file mode 100644
> index 0000000..ef48c8c
> --- /dev/null
> +++ b/test/test/test_rib_lpm_comp.c
> @@ -0,0 +1,189 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
> + */
> +
> +#include <stdio.h>
> +#include <stdint.h>
> +#include <stdlib.h>
> +
> +#include <rte_random.h>
> +#include <rte_cycles.h>
> +#include <rte_branch_prediction.h>
> +#include <rte_ip.h>
> +#include <rte_malloc.h>
> +#include <rte_lpm.h>
> +#include <rte_rib.h>
> +
> +#include "test.h"
> +#include "test_xmmt_ops.h"
> +#include "test_rib_generate_rt.h"
> +
> +#define TEST_RIB_ASSERT(cond) do {				\
> +	if (!(cond)) {						\
> +		printf("Error at line %d:\n", __LINE__);	\
> +		return -1;					\
> +	}							\
> +} while (0)
> +
> +#define ITERATIONS (1 << 25)
> +#define BATCH_SIZE (1 << 7)
> +#define BULK_SIZE 32
> +#define LPM_NH_MASK	((1 << 24) - 1)
> +
> +static uint64_t default_nh;
> +
> +static int
> +test_lookup(struct rte_rib *rib, struct rte_lpm *lpm)

It should be fairly obvious, but put in a comment explaining the function
and what it does, and how.

> +{
> +	static uint32_t ip_batch[BATCH_SIZE];
> +	uint64_t rib_next_hops[BULK_SIZE];
> +	uint32_t lpm_next_hops[BULK_SIZE];
> +	int i, j, k;
> +
> +	for (i = 0; i < ITERATIONS; i++) {
> +		for (j = 0; j < BATCH_SIZE; j++)
> +			ip_batch[j] = (i << 7) + j;
> +
> +		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
> +			rte_rib_fib_lookup_bulk(rib, &ip_batch[j],
> +				rib_next_hops, BULK_SIZE);
> +			rte_lpm_lookup_bulk(lpm, &ip_batch[j],
> +				lpm_next_hops, BULK_SIZE);
> +			for (k = 0; k < BULK_SIZE; k++) {
> +				if (likely(lpm_next_hops[k] &
> +					RTE_LPM_LOOKUP_SUCCESS))
> +					lpm_next_hops[k] &= LPM_NH_MASK;
> +				else
> +					lpm_next_hops[k] = default_nh;
> +			}
> +			for (k = 0; k < BULK_SIZE; k++)
> +				TEST_RIB_ASSERT(rib_next_hops[k] ==
> +						lpm_next_hops[k]);
> +		}
> +	}
> +	return 0;
> +}

This looks a good unit test for comparisons. Although it's scanning
linearly, I wonder if it may be worthwhile to rework the loops so you do
all lookups for a batch for lpm first then for fib, and track the cycles
for each. Then at the end you can print out the lookup perf comparison.
Alternatively, an additional batch at the end with random lookups could be
done. [Yes, I know the info can be got by running the perf tests for lpm
and rib separately, but it would be nice to have it as part of a comparison
autotest]

> +
> +static int
> +test_rib_lpm_comp(void)
> +{
> +	struct rte_rib *rib = NULL;
> +	struct rte_lpm *lpm = NULL;
> +	struct route_rule *rt = NULL;
> +	unsigned int i;
> +	int rib_add = 0, lpm_add = 0;
> +	int ret, nh_bits, nr_tbl8;
> +	uint32_t num_routes;
> +	struct rte_rib_conf conf;
> +	struct rte_lpm_config config;
> +
> +	rte_srand(rte_rdtsc());
> +	default_nh = 17;
> +
> +	conf.max_nodes = 3000000;
> +	conf.node_sz = sizeof(struct rte_rib_node);
> +	conf.type = RTE_RIB_DIR24_8;
> +	conf.fib_conf.dir24_8.def_nh = default_nh;
> +	conf.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_8B;
> +
> +	nh_bits = RTE_MIN(((1 << (3 + conf.fib_conf.dir24_8.nh_sz)) - 1), 24);
> +	nr_tbl8 = RTE_MIN(((1 << nh_bits) - 1), 65535);

These two lines need a comment explaining them - especially the former one.

> +	config.number_tbl8s = nr_tbl8;
> +	conf.fib_conf.dir24_8.num_tbl8 = nr_tbl8;
> +	config.max_rules = 2000000;
> +	config.flags = 0;
> +
> +	num_routes = 1200000;
> +
> +	rt = rte_zmalloc("struct route_rule *", sizeof(struct route_rule) *
> +		num_routes + 5, 0);
> +	TEST_RIB_ASSERT(rt != NULL);
> +
> +	num_routes = generate_large_route_rule_table(num_routes, rt);
> +	TEST_RIB_ASSERT(num_routes != 0);
> +	printf("No. routes = %u\n", (unsigned int) num_routes);
> +
> +	shuffle_rt(rt, num_routes);
> +
> +	print_route_distribution(rt, (uint32_t) num_routes);
> +
> +	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &conf);
> +	TEST_RIB_ASSERT(rib != NULL);
> +
> +	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
> +	TEST_RIB_ASSERT(lpm != NULL);
> +
> +	for (i = 0; i < num_routes; i++)
> +		rt[i].nh = rte_rand() & ((1ULL << nh_bits) - 1);
> +

Put comment here explaining this next block.
> +	for (i = 0; i < num_routes; i++) {
> +		ret = rte_rib_add(rib, rt[i].ip, rt[i].depth, rt[i].nh);
> +		if (ret == 0)
> +			rib_add++;
> +		else
> +			continue;
> +
> +		ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].nh);
> +		if (ret == 0)
> +			lpm_add++;
> +		else {
> +			rte_rib_delete(rib, rt[i].ip, rt[i].depth);
> +			rib_add--;
> +		}
> +	}
> +	TEST_RIB_ASSERT(rib_add == lpm_add);
> +
> +	ret = test_lookup(rib, lpm);
> +	if (ret != 0)
> +		return ret;
> +
> +	for (i = 0; i < num_routes; i++) {
> +		if ((i % 3) == 0) {
I assume the intention here is after filling the table and doing the lookup
tests, we drop 1/3 of the entries and retest. Put in a comment explaining
the why.
Rather than putting in the if statement, why not just change the loop to
be:

	for (i = 0; i < num_routes; i += 3)

> +			ret = rte_rib_delete(rib, rt[i].ip, rt[i].depth);
> +			if (ret == 0)
> +				rib_add--;
> +			else
> +				continue;
> +
> +			ret = rte_lpm_delete(lpm, rt[i].ip, rt[i].depth);
> +			if (ret == 0)
> +				lpm_add--;
> +		}
> +	}
> +	TEST_RIB_ASSERT(rib_add == lpm_add);
> +
> +	ret = test_lookup(rib, lpm);
> +	if (ret != 0)
> +		return ret;
> +
> +	for (i = 0; i < num_routes; i++) {
> +		if ((i % 6) == 0) {

As above, put in a comment, and consider removing the if statment. It helps
having the reduced indentation.

> +			ret = rte_rib_add(rib, rt[i].ip, rt[i].depth, rt[i].nh);
> +			if (ret == 0)
> +				rib_add++;
> +			else
> +				continue;
> +
> +			ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].nh);
> +			if (ret == 0)
> +				lpm_add++;
> +			else {
> +				rte_rib_delete(rib, rt[i].ip, rt[i].depth);
> +				rib_add--;
> +			}
> +		}
> +	}
> +	TEST_RIB_ASSERT(rib_add == lpm_add);
> +
> +	ret = test_lookup(rib, lpm);
> +	if (ret != 0)
> +		return ret;
> +
> +	rte_rib_free(rib);
> +	rte_lpm_free(lpm);
> +	rte_free(rt);
> +
> +	return 0;
> +}

Looks a really good test. Only issue I have is that it sits for a long time
doing the checks in the background without any output. I would therefore
suggest:
* At end of each stage, add, lookup, delete, etc print out a message
  stating what was done with how many entries, e.g X adds, Y lookups etc.
* For any of those individual items that takes a long time on its own,
  consider printing out every e.g. 100,000 items, and/or printing a dot
  every 10,000 items, so the user can see progress.

> +
> +REGISTER_TEST_COMMAND(rib_lpm_comp_autotest, test_rib_lpm_comp);

<snip>
  
Bruce Richardson June 29, 2018, 3:31 p.m. UTC | #3
On Fri, Apr 27, 2018 at 01:03:33AM +0300, Medvedkin Vladimir wrote:
> Signed-off-by: Medvedkin Vladimir <medvedkinv@gmail.com>
> ---
>  test/test/Makefile               |   5 +
>  test/test/meson.build            |   8 +
>  test/test/test_rib.c             | 308 +++++++++++++++++++++++++++++++++++++++
>  test/test/test_rib_generate_rt.c | 297 +++++++++++++++++++++++++++++++++++++
>  test/test/test_rib_generate_rt.h |  38 +++++
>  test/test/test_rib_lpm_comp.c    | 189 ++++++++++++++++++++++++
>  test/test/test_rib_perf.c        | 145 ++++++++++++++++++
>  7 files changed, 990 insertions(+)
>  create mode 100644 test/test/test_rib.c
>  create mode 100644 test/test/test_rib_generate_rt.c
>  create mode 100644 test/test/test_rib_generate_rt.h
>  create mode 100644 test/test/test_rib_lpm_comp.c
>  create mode 100644 test/test/test_rib_perf.c
> 

<snip>

> diff --git a/test/test/test_rib_perf.c b/test/test/test_rib_perf.c
> new file mode 100644
> index 0000000..42fbd1e
> --- /dev/null
> +++ b/test/test/test_rib_perf.c
> @@ -0,0 +1,145 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
> + */
> +
> +#include <stdio.h>
> +#include <stdint.h>
> +#include <stdlib.h>
> +
> +#include <rte_cycles.h>
> +#include <rte_random.h>
> +#include <rte_branch_prediction.h>
> +#include <rte_ip.h>
> +#include <rte_malloc.h>
> +#include <rte_rib.h>
> +#include <rte_dir24_8.h>
> +
> +#include "test.h"
> +#include "test_xmmt_ops.h"
> +#include "test_rib_generate_rt.h"
> +
> +#define TEST_RIB_ASSERT(cond) do {				\
> +	if (!(cond)) {						\
> +		printf("Error at line %d:\n", __LINE__);	\
> +		return -1;					\
> +	}							\
> +} while (0)
> +
> +#define ITERATIONS (1 << 15)
> +#define BATCH_SIZE (1 << 12)
> +#define BULK_SIZE 32
> +
> +#define NH_MSK(nh_sz)	((1ULL << ((1 << (3 + nh_sz)) - 1)) - 1)
> +
> +static int
> +test_rib_perf(void)
> +{
> +	struct rte_rib *rib = NULL;
> +	struct rte_rib_conf conf;
> +	struct route_rule *rt;
> +	uint64_t begin, total_time;
> +	uint64_t next_hop_add;
> +	uint64_t default_nh = 0;
> +	int64_t count = 0;
> +	unsigned int i, j;
> +	int status = 0;
> +	int ret, nh_bits, nr_tbl8;
> +	uint32_t num_routes;
> +
> +	conf.max_nodes = 3000000;
> +	conf.node_sz = sizeof(struct rte_rib_node);
> +	conf.type = RTE_RIB_DIR24_8;
> +	conf.fib_conf.dir24_8.def_nh = default_nh;
> +	conf.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_8B;

It's good that you are taking the worst-case to show the perf, but it means
that the library comes out a bit slower than LPM in the autotest.
How about running the same test cases for multiple data sizes, 8, 4, 2?

> +
> +	rte_srand(rte_rdtsc());
> +
> +	nh_bits = RTE_MIN(((1 << (3 + conf.fib_conf.dir24_8.nh_sz)) - 1), 24);
> +	nr_tbl8 = RTE_MIN(((1 << nh_bits) - 1), 131071);
> +	conf.fib_conf.dir24_8.num_tbl8 = nr_tbl8;
> +	num_routes = 1200000;
> +
> +	rt = rte_zmalloc("struct route_rule *", sizeof(struct route_rule) *
> +		num_routes, 0);
> +	TEST_RIB_ASSERT(rt != NULL);
> +
> +	num_routes = generate_large_route_rule_table(num_routes, rt);
> +	TEST_RIB_ASSERT(num_routes != 0);
> +
> +	printf("No. routes = %u\n", (unsigned int) num_routes);
> +
> +	shuffle_rt(rt, num_routes);
> +
> +	print_route_distribution(rt, (uint32_t) num_routes);
> +
> +	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &conf);
> +	TEST_RIB_ASSERT(rib != NULL);
> +
> +	/* Measue add. */
> +	begin = rte_rdtsc();
> +
> +	for (i = 0; i < num_routes; i++) {
> +		do {
> +			next_hop_add = rte_rand() & NH_MSK(conf.fib_conf.dir24_8.nh_sz);
> +		} while (next_hop_add == default_nh);
> +
> +		ret = rte_rib_add(rib, rt[i].ip, rt[i].depth, next_hop_add);
> +		if ((ret == 0))
> +			status++;
> +	}
> +
> +	total_time = rte_rdtsc() - begin;
> +
> +	printf("Unique added entries = %d\n", status);
> +	printf("Average RIB Add: %g cycles\n",
> +			(double)total_time / num_routes);
> +
> +	/* Measure bulk Lookup */
> +	total_time = 0;
> +	count = 0;
> +	for (i = 0; i < ITERATIONS; i++) {
> +		static uint32_t ip_batch[BATCH_SIZE];
> +		uint64_t next_hops[BULK_SIZE];
> +
> +		/* Create array of random IP addresses */
> +		for (j = 0; j < BATCH_SIZE; j++)
> +			ip_batch[j] = rte_rand();
> +
> +		/* Lookup per batch */
> +		begin = rte_rdtsc();
> +		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE)
> +			rte_rib_fib_lookup_bulk(rib, &ip_batch[j], next_hops,
> +				BULK_SIZE);
> +
> +		total_time += rte_rdtsc() - begin;
> +		for (j = 0; j < BULK_SIZE; j++) {
> +			if (next_hops[j] == default_nh)
> +				count++;
> +		}
> +	}
> +	printf("BULK RIB Lookup: %.1f cycles (fails = %.1f%%)\n",
> +			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
> +			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
> +
> +	/* Delete */
> +	status = 0;
> +	begin = rte_rdtsc();
> +
> +	for (i = 0; i < num_routes; i++) {
> +		ret = rte_rib_delete(rib, rt[i].ip, rt[i].depth);
> +		if (ret == 0)
> +			status++;
> +	}
> +
> +	total_time = rte_rdtsc() - begin;
> +
> +	printf("Average RIB Delete: %g cycles\n",
> +			(double)total_time / num_routes);
> +
> +	rte_rib_free(rib);
> +	rte_free(rt);
> +
> +	return 0;
> +}
> +
> +REGISTER_TEST_COMMAND(rib_perf_autotest, test_rib_perf);
> -- 
> 1.8.3.1
>
  

Patch

diff --git a/test/test/Makefile b/test/test/Makefile
index 2630ab4..b5f4fb3 100644
--- a/test/test/Makefile
+++ b/test/test/Makefile
@@ -119,6 +119,11 @@  SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_perf.c
 SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6.c
 SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6_perf.c
 
+SRCS-$(CONFIG_RTE_LIBRTE_RIB) += test_rib.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_rib_generate_rt.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_rib_perf.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_rib_lpm_comp.c
+
 SRCS-y += test_debug.c
 SRCS-y += test_errno.c
 SRCS-y += test_tailq.c
diff --git a/test/test/meson.build b/test/test/meson.build
index ad0a650..f9abc3d 100644
--- a/test/test/meson.build
+++ b/test/test/meson.build
@@ -74,6 +74,10 @@  test_sources = files('commands.c',
 	'test_reciprocal_division_perf.c',
 	'test_red.c',
 	'test_reorder.c',
+	'test_rib.c',
+	'test_rib_generate_rt.c',
+	'test_rib_perf.c',
+	'test_rib_lpm_comp.c',
 	'test_ring.c',
 	'test_ring_perf.c',
 	'test_rwlock.c',
@@ -111,6 +115,7 @@  test_deps = ['acl',
 	'pipeline',
 	'port',
 	'reorder',
+	'rib',
 	'ring',
 	'timer'
 ]
@@ -192,6 +197,9 @@  test_names = [
 	'red_autotest',
 	'red_perf',
 	'reorder_autotest',
+	'rib_autotest',
+	'rib_perf_autotest',
+	'rib_lpm_comp_autotest',
 	'ring_autotest',
 	'ring_perf_autotest',
 	'ring_pmd_autotest',
diff --git a/test/test/test_rib.c b/test/test/test_rib.c
new file mode 100644
index 0000000..c5d7509
--- /dev/null
+++ b/test/test/test_rib.c
@@ -0,0 +1,308 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <rte_ip.h>
+#include <rte_rib.h>
+
+#include "test.h"
+#include "test_xmmt_ops.h"
+#include <rte_dir24_8.h>
+
+
+#define TEST_RIB_ASSERT(cond) do {				\
+	if (!(cond)) {						\
+		printf("Error at line %d:\n", __LINE__);	\
+		return -1;					\
+	}							\
+} while (0)
+
+typedef int32_t (*rte_rib_test)(void);
+
+static int32_t test0(void);
+static int32_t test1(void);
+static int32_t test2(void);
+static int32_t test3(void);
+static int32_t test4(void);
+static int32_t test5(void);
+
+static rte_rib_test tests[] = {
+/* Test Cases */
+	test0,
+	test1,
+	test2,
+	test3,
+	test4,
+	test5
+};
+
+#define NUM_RIB_TESTS (sizeof(tests)/sizeof(tests[0]))
+#define MAX_DEPTH 32
+#define MAX_RULES (1 << 22)
+#define NUMBER_TBL8S 4096
+#define PASS 0
+
+/*
+ * Check that rte_rib_create fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test0(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_rib_conf config;
+
+	config.type = RTE_RIB_DIR24_8;
+	config.max_nodes = MAX_RULES;
+	config.node_sz = sizeof(struct rte_rib_node);
+	config.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_4B;
+	config.fib_conf.dir24_8.num_tbl8 = NUMBER_TBL8S;
+	config.fib_conf.dir24_8.def_nh = 0;
+
+	/* rte_rib_create: rib name == NULL */
+	rib = rte_rib_create(NULL, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+
+	/* rte_rib_create: config == NULL */
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, NULL);
+	TEST_RIB_ASSERT(rib == NULL);
+
+	/* socket_id < -1 is invalid */
+	rib = rte_rib_create(__func__, -2, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+
+	/* rte_rib_create: max_nodes = 0 */
+	config.max_nodes = 0;
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+	config.max_nodes = MAX_RULES;
+
+	/* rte_rib_create: node_sz = 0 */
+	config.node_sz = 0;
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+	config.node_sz = sizeof(struct rte_rib_node);
+
+	/* rte_rib_create: invalid type */
+	config.type = RTE_RIB_TYPE_MAX;
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+	config.type = RTE_RIB_DIR24_8;
+
+	/* rte_rib_create: invalid fib type */
+	config.fib_conf.dir24_8.nh_sz = 10;
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+	config.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_4B;
+
+	/** rte_rib_create: invalid default next hop */
+	config.fib_conf.dir24_8.def_nh = UINT32_MAX;
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+	config.fib_conf.dir24_8.def_nh = 0;
+
+	config.fib_conf.dir24_8.num_tbl8 = UINT32_MAX;
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib == NULL);
+
+	return PASS;
+}
+
+/*
+ * Create rib table then delete rib table 10 times
+ * Use a slightly different rules size each time
+ */
+int32_t
+test1(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_rib_conf config;
+	int32_t i, j;
+
+	config.node_sz = sizeof(struct rte_rib_node);
+	config.type = RTE_RIB_DIR24_8;
+	config.fib_conf.dir24_8.def_nh = 0;
+	config.fib_conf.dir24_8.num_tbl8 = 127;
+
+	for (j = 0; j < 4; j++) {
+		config.fib_conf.dir24_8.nh_sz = j;
+		/* rte_rib_free: Free NULL */
+		for (i = 0; i < 2; i++) {
+			config.max_nodes = MAX_RULES - i;
+			rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+			TEST_RIB_ASSERT(rib != NULL);
+			rte_rib_free(rib);
+		}
+	}
+	/* Can not test free so return success */
+	return PASS;
+}
+
+/*
+ * Call rte_rib_free for NULL pointer user input. Note: free has no return and
+ * therefore it is impossible to check for failure but this test is added to
+ * increase function coverage metrics and to validate that freeing null does
+ * not crash.
+ */
+int32_t
+test2(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_rib_conf config;
+
+	config.type = RTE_RIB_DIR24_8;
+	config.max_nodes = MAX_RULES;
+	config.node_sz = sizeof(struct rte_rib_node);
+	config.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_4B;
+	config.fib_conf.dir24_8.num_tbl8 = NUMBER_TBL8S;
+	config.fib_conf.dir24_8.def_nh = 0;
+
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib != NULL);
+
+	rte_rib_free(rib);
+	rte_rib_free(NULL);
+	return PASS;
+}
+
+/*
+ * Check that rte_rib_add fails gracefully for incorrect user input arguments
+ */
+int32_t
+test3(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_rib_conf config;
+	uint32_t ip = IPv4(0, 0, 0, 0);
+	uint64_t next_hop = 100;
+	uint8_t depth = 24;
+	int32_t status = 0;
+
+	config.type = RTE_RIB_DIR24_8;
+	config.max_nodes = MAX_RULES;
+	config.node_sz = sizeof(struct rte_rib_node);
+	config.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_4B;
+	config.fib_conf.dir24_8.num_tbl8 = NUMBER_TBL8S;
+	config.fib_conf.dir24_8.def_nh = 0;
+
+	/* rte_rib_add: rib == NULL */
+	status = rte_rib_add(NULL, ip, depth, next_hop);
+	TEST_RIB_ASSERT(status < 0);
+
+	/*Create valid rib to use in rest of test. */
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib != NULL);
+
+	/* rte_rib_add: depth > MAX_DEPTH */
+	status = rte_rib_add(rib, ip, (MAX_DEPTH + 1), next_hop);
+	TEST_RIB_ASSERT(status < 0);
+
+	rte_rib_free(rib);
+
+	return PASS;
+}
+
+/*
+ * Check that rte_rib_delete fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test4(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_rib_conf config;
+	uint32_t ip = IPv4(0, 0, 0, 0);
+	uint8_t depth = 24;
+	int32_t status = 0;
+
+	config.type = RTE_RIB_DIR24_8;
+	config.max_nodes = MAX_RULES;
+	config.node_sz = sizeof(struct rte_rib_node);
+	config.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_4B;
+	config.fib_conf.dir24_8.num_tbl8 = NUMBER_TBL8S;
+	config.fib_conf.dir24_8.def_nh = 0;
+
+	/* rte_rib_delete: rib == NULL */
+	status = rte_rib_delete(NULL, ip, depth);
+	TEST_RIB_ASSERT(status < 0);
+
+	/*Create valid rib to use in rest of test. */
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib != NULL);
+
+	/* rte_rib_delete: depth > MAX_DEPTH */
+	status = rte_rib_delete(rib, ip, (MAX_DEPTH + 1));
+	TEST_RIB_ASSERT(status < 0);
+
+	rte_rib_free(rib);
+
+	return PASS;
+}
+
+/*
+ * Call add, lookup and delete for a single rule with depth <= 24
+ */
+int32_t
+test5(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_rib_conf config;
+
+	uint32_t ip = IPv4(190, 2, 0, 0);
+	uint64_t next_hop_add = 10;
+	uint64_t next_hop_return = 20;
+	uint64_t next_hop_default = 14;
+	uint8_t depth = 24;
+	uint32_t status = 0;
+
+	config.type = RTE_RIB_DIR24_8;
+	config.max_nodes = MAX_RULES;
+	config.node_sz = sizeof(struct rte_rib_node);
+	config.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_4B;
+	config.fib_conf.dir24_8.num_tbl8 = NUMBER_TBL8S;
+	config.fib_conf.dir24_8.def_nh = next_hop_default;
+
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(rib != NULL);
+
+	status = rte_rib_add(rib, ip, depth, next_hop_add);
+	TEST_RIB_ASSERT(status == 0);
+
+	status = rte_rib_fib_lookup_bulk(rib, &ip, &next_hop_return, 1);
+	TEST_RIB_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+	status = rte_rib_delete(rib, ip, depth);
+	TEST_RIB_ASSERT(status == 0);
+	status = rte_rib_fib_lookup_bulk(rib, &ip, &next_hop_return, 1);
+	TEST_RIB_ASSERT(next_hop_return == next_hop_default);
+
+	rte_rib_free(rib);
+
+	return PASS;
+}
+
+/*
+ * Do all unit tests.
+ */
+static int
+test_rib(void)
+{
+	unsigned int i;
+	int status, global_status = 0;
+
+	for (i = 0; i < NUM_RIB_TESTS; i++) {
+		status = tests[i]();
+		if (status < 0) {
+			printf("ERROR: RIB Test %u: FAIL\n", i);
+			global_status = status;
+		}
+	}
+
+	return global_status;
+}
+
+REGISTER_TEST_COMMAND(rib_autotest, test_rib);
diff --git a/test/test/test_rib_generate_rt.c b/test/test/test_rib_generate_rt.c
new file mode 100644
index 0000000..36834ed
--- /dev/null
+++ b/test/test/test_rib_generate_rt.c
@@ -0,0 +1,297 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <math.h>
+
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_ip.h>
+
+#include "test_rib_generate_rt.h"
+
+static uint32_t max_route_entries;
+static uint32_t num_route_entries;
+
+/* All following numbers of each depth of each common IP class are just
+ * got from previous large constant table in app/test/test_rib_routes.h .
+ * In order to match similar performance, they keep same depth and IP
+ * address coverage as previous constant table. These numbers don't
+ * include any private local IP address. As previous large const rule
+ * table was just dumped from a real router, there are no any IP address
+ * in class C or D.
+ */
+static struct route_rule_count rule_count = {
+	.a = { /* IP class A in which the most significant bit is 0 */
+		    0, /* depth =  1 */
+		    0, /* depth =  2 */
+		    1, /* depth =  3 */
+		    0, /* depth =  4 */
+		    2, /* depth =  5 */
+		    1, /* depth =  6 */
+		    3, /* depth =  7 */
+		  185, /* depth =  8 */
+		   26, /* depth =  9 */
+		   16, /* depth = 10 */
+		   39, /* depth = 11 */
+		  144, /* depth = 12 */
+		  233, /* depth = 13 */
+		  528, /* depth = 14 */
+		  866, /* depth = 15 */
+		 3856, /* depth = 16 */
+		 3268, /* depth = 17 */
+		 5662, /* depth = 18 */
+		17301, /* depth = 19 */
+		22226, /* depth = 20 */
+		11147, /* depth = 21 */
+		16746, /* depth = 22 */
+		17120, /* depth = 23 */
+		77578, /* depth = 24 */
+		  401, /* depth = 25 */
+		  656, /* depth = 26 */
+		 1107, /* depth = 27 */
+		 1121, /* depth = 28 */
+		 2316, /* depth = 29 */
+		  717, /* depth = 30 */
+		   10, /* depth = 31 */
+		   66  /* depth = 32 */
+	},
+	.b = { /* IP class A in which the most 2 significant bits are 10 */
+		    0, /* depth =  1 */
+		    0, /* depth =  2 */
+		    0, /* depth =  3 */
+		    0, /* depth =  4 */
+		    1, /* depth =  5 */
+		    1, /* depth =  6 */
+		    1, /* depth =  7 */
+		    3, /* depth =  8 */
+		    3, /* depth =  9 */
+		   30, /* depth = 10 */
+		   25, /* depth = 11 */
+		  168, /* depth = 12 */
+		  305, /* depth = 13 */
+		  569, /* depth = 14 */
+		 1129, /* depth = 15 */
+		50800, /* depth = 16 */
+		 1645, /* depth = 17 */
+		 1820, /* depth = 18 */
+		 3506, /* depth = 19 */
+		 3258, /* depth = 20 */
+		 3424, /* depth = 21 */
+		 4971, /* depth = 22 */
+		 6885, /* depth = 23 */
+		39771, /* depth = 24 */
+		  424, /* depth = 25 */
+		  170, /* depth = 26 */
+		  443, /* depth = 27 */
+		   92, /* depth = 28 */
+		  366, /* depth = 29 */
+		  377, /* depth = 30 */
+		    2, /* depth = 31 */
+		  200  /* depth = 32 */
+	},
+	.c = { /* IP class A in which the most 3 significant bits are 110 */
+		     0, /* depth =  1 */
+		     0, /* depth =  2 */
+		     0, /* depth =  3 */
+		     0, /* depth =  4 */
+		     0, /* depth =  5 */
+		     0, /* depth =  6 */
+		     0, /* depth =  7 */
+		    12, /* depth =  8 */
+		     8, /* depth =  9 */
+		     9, /* depth = 10 */
+		    33, /* depth = 11 */
+		    69, /* depth = 12 */
+		   237, /* depth = 13 */
+		  1007, /* depth = 14 */
+		  1717, /* depth = 15 */
+		 14663, /* depth = 16 */
+		  8070, /* depth = 17 */
+		 16185, /* depth = 18 */
+		 48261, /* depth = 19 */
+		 36870, /* depth = 20 */
+		 33960, /* depth = 21 */
+		 50638, /* depth = 22 */
+		 61422, /* depth = 23 */
+		466549, /* depth = 24 */
+		  1829, /* depth = 25 */
+		  4824, /* depth = 26 */
+		  4927, /* depth = 27 */
+		  5914, /* depth = 28 */
+		 10254, /* depth = 29 */
+		  4905, /* depth = 30 */
+		     1, /* depth = 31 */
+		   716  /* depth = 32 */
+	}
+};
+
+static void generate_random_rule_prefix(struct route_rule *rt,
+	uint32_t ip_class, uint8_t depth)
+{
+/* IP address class A, the most significant bit is 0 */
+#define IP_HEAD_MASK_A			0x00000000
+#define IP_HEAD_BIT_NUM_A		1
+
+/* IP address class B, the most significant 2 bits are 10 */
+#define IP_HEAD_MASK_B			0x80000000
+#define IP_HEAD_BIT_NUM_B		2
+
+/* IP address class C, the most significant 3 bits are 110 */
+#define IP_HEAD_MASK_C			0xC0000000
+#define IP_HEAD_BIT_NUM_C		3
+
+	uint32_t class_depth;
+	uint32_t range;
+	uint32_t mask;
+	uint32_t step;
+	uint32_t start;
+	uint32_t fixed_bit_num;
+	uint32_t ip_head_mask;
+	uint32_t rule_num;
+	uint32_t k;
+	struct route_rule *ptr_rule;
+
+	if (ip_class == IP_CLASS_A) {        /* IP Address class A */
+		fixed_bit_num = IP_HEAD_BIT_NUM_A;
+		ip_head_mask = IP_HEAD_MASK_A;
+		rule_num = rule_count.a[depth - 1];
+	} else if (ip_class == IP_CLASS_B) { /* IP Address class B */
+		fixed_bit_num = IP_HEAD_BIT_NUM_B;
+		ip_head_mask = IP_HEAD_MASK_B;
+		rule_num = rule_count.b[depth - 1];
+	} else {                             /* IP Address class C */
+		fixed_bit_num = IP_HEAD_BIT_NUM_C;
+		ip_head_mask = IP_HEAD_MASK_C;
+		rule_num = rule_count.c[depth - 1];
+	}
+
+	if ((rule_num == 0) || ((num_route_entries + rule_num) >=
+		max_route_entries))
+		return;
+
+	/* the number of rest bits which don't include the most significant
+	 * fixed bits for this IP address class
+	 */
+	class_depth = depth - fixed_bit_num;
+
+	/* range is the maximum number of rules for this depth and
+	 * this IP address class
+	 */
+	range = 1 << class_depth;
+
+	/* only mask the most depth significant generated bits
+	 * except fixed bits for IP address class
+	 */
+	mask = range - 1;
+
+	/* Widen coverage of IP address in generated rules */
+	if (range <= rule_num)
+		step = 1;
+	else
+		step = round((double)range / rule_num);
+
+	/* Only generate rest bits except the most significant
+	 * fixed bits for IP address class
+	 */
+	start = rte_rand() & mask;
+	ptr_rule = &rt[num_route_entries];
+	for (k = 0; k < rule_num; k++) {
+		ptr_rule->ip = (start << (RTE_RIB_MAXDEPTH - depth))
+			| ip_head_mask;
+		ptr_rule->depth = depth;
+		ptr_rule++;
+		start = (start + step) & mask;
+	}
+	num_route_entries += rule_num;
+}
+
+static void insert_rule_in_random_pos(struct route_rule *rt,
+	uint32_t ip, uint8_t depth)
+{
+	uint32_t pos;
+	int try_count = 0;
+	struct route_rule tmp;
+
+	do {
+		pos = rte_rand();
+		try_count++;
+	} while ((try_count < 10) && (pos > num_route_entries));
+
+	if ((pos > num_route_entries) || (pos > max_route_entries))
+		pos = num_route_entries >> 1;
+
+	tmp = rt[pos];
+	rt[pos].ip = ip;
+	rt[pos].depth = depth;
+	if (num_route_entries < max_route_entries)
+		rt[num_route_entries++] = tmp;
+}
+
+uint32_t
+generate_large_route_rule_table(uint32_t num_routes, struct route_rule *rt)
+{
+	uint32_t ip_class;
+	uint8_t  depth;
+
+	rte_srand(rte_rdtsc());
+	num_route_entries = 0;
+	max_route_entries = num_routes;
+	for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {
+		for (depth = 1; depth <= RTE_RIB_MAXDEPTH; depth++)
+			generate_random_rule_prefix(rt, ip_class, depth);
+	}
+	/* Add following rules to keep same as previous large constant table,
+	 * they are 4 rules with private local IP address and 1 all-zeros prefix
+	 * with depth = 8.
+	 */
+	insert_rule_in_random_pos(rt, IPv4(0, 0, 0, 0), 8);
+	insert_rule_in_random_pos(rt, IPv4(10, 2, 23, 147), 32);
+	insert_rule_in_random_pos(rt, IPv4(192, 168, 100, 10), 24);
+	insert_rule_in_random_pos(rt, IPv4(192, 168, 25, 100), 24);
+	insert_rule_in_random_pos(rt, IPv4(192, 168, 129, 124), 32);
+
+	return num_route_entries;
+}
+
+void
+print_route_distribution(const struct route_rule *table, uint32_t n)
+{
+	unsigned int i, j;
+
+	printf("Route distribution per prefix width:\n");
+	printf("DEPTH    QUANTITY (PERCENT)\n");
+	printf("---------------------------\n");
+
+	/* Count depths. */
+	for (i = 1; i <= 32; i++) {
+		unsigned int depth_counter = 0;
+		double percent_hits;
+
+		for (j = 0; j < n; j++)
+			if (table[j].depth == (uint8_t) i)
+				depth_counter++;
+
+		percent_hits = ((double)depth_counter)/((double)n) * 100;
+		printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+	}
+	printf("\n");
+}
+
+void
+shuffle_rt(struct route_rule *rt, uint32_t n)
+{
+	uint32_t pos;
+	struct route_rule tmp;
+	uint32_t i;
+
+	for (i = 0; i < n; i++) {
+		pos = rte_rand() % n;
+		tmp = rt[pos];
+		rt[pos] = rt[i];
+		rt[i] = tmp;
+	}
+}
diff --git a/test/test/test_rib_generate_rt.h b/test/test/test_rib_generate_rt.h
new file mode 100644
index 0000000..90573c7
--- /dev/null
+++ b/test/test/test_rib_generate_rt.h
@@ -0,0 +1,38 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ */
+
+#ifndef _TEST_RIB_GENERATE_RT_H_
+#define _TEST_RIB_GENERATE_RT_H_
+
+#define RTE_RIB_MAXDEPTH	32
+
+struct route_rule {
+	uint64_t nh;
+	uint32_t ip;
+	uint8_t depth;
+};
+
+enum {
+	IP_CLASS_A,
+	IP_CLASS_B,
+	IP_CLASS_C
+};
+
+/* struct route_rule_count defines the total number of rules in following a/b/c
+ * each item in a[]/b[]/c[] is the number of common IP address class A/B/C, not
+ * including the ones for private local network.
+ */
+struct route_rule_count {
+	uint32_t a[RTE_RIB_MAXDEPTH];
+	uint32_t b[RTE_RIB_MAXDEPTH];
+	uint32_t c[RTE_RIB_MAXDEPTH];
+};
+
+
+uint32_t generate_large_route_rule_table(uint32_t num_routes,
+	struct route_rule *rt);
+void print_route_distribution(const struct route_rule *table, uint32_t n);
+void shuffle_rt(struct route_rule *rt, uint32_t n);
+
+#endif /* _TEST_RIB_GENERATE_RT_H_ */
diff --git a/test/test/test_rib_lpm_comp.c b/test/test/test_rib_lpm_comp.c
new file mode 100644
index 0000000..ef48c8c
--- /dev/null
+++ b/test/test/test_rib_lpm_comp.c
@@ -0,0 +1,189 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_branch_prediction.h>
+#include <rte_ip.h>
+#include <rte_malloc.h>
+#include <rte_lpm.h>
+#include <rte_rib.h>
+
+#include "test.h"
+#include "test_xmmt_ops.h"
+#include "test_rib_generate_rt.h"
+
+#define TEST_RIB_ASSERT(cond) do {				\
+	if (!(cond)) {						\
+		printf("Error at line %d:\n", __LINE__);	\
+		return -1;					\
+	}							\
+} while (0)
+
+#define ITERATIONS (1 << 25)
+#define BATCH_SIZE (1 << 7)
+#define BULK_SIZE 32
+#define LPM_NH_MASK	((1 << 24) - 1)
+
+static uint64_t default_nh;
+
+static int
+test_lookup(struct rte_rib *rib, struct rte_lpm *lpm)
+{
+	static uint32_t ip_batch[BATCH_SIZE];
+	uint64_t rib_next_hops[BULK_SIZE];
+	uint32_t lpm_next_hops[BULK_SIZE];
+	int i, j, k;
+
+	for (i = 0; i < ITERATIONS; i++) {
+		for (j = 0; j < BATCH_SIZE; j++)
+			ip_batch[j] = (i << 7) + j;
+
+		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
+			rte_rib_fib_lookup_bulk(rib, &ip_batch[j],
+				rib_next_hops, BULK_SIZE);
+			rte_lpm_lookup_bulk(lpm, &ip_batch[j],
+				lpm_next_hops, BULK_SIZE);
+			for (k = 0; k < BULK_SIZE; k++) {
+				if (likely(lpm_next_hops[k] &
+					RTE_LPM_LOOKUP_SUCCESS))
+					lpm_next_hops[k] &= LPM_NH_MASK;
+				else
+					lpm_next_hops[k] = default_nh;
+			}
+			for (k = 0; k < BULK_SIZE; k++)
+				TEST_RIB_ASSERT(rib_next_hops[k] ==
+						lpm_next_hops[k]);
+		}
+	}
+	return 0;
+}
+
+static int
+test_rib_lpm_comp(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_lpm *lpm = NULL;
+	struct route_rule *rt = NULL;
+	unsigned int i;
+	int rib_add = 0, lpm_add = 0;
+	int ret, nh_bits, nr_tbl8;
+	uint32_t num_routes;
+	struct rte_rib_conf conf;
+	struct rte_lpm_config config;
+
+	rte_srand(rte_rdtsc());
+	default_nh = 17;
+
+	conf.max_nodes = 3000000;
+	conf.node_sz = sizeof(struct rte_rib_node);
+	conf.type = RTE_RIB_DIR24_8;
+	conf.fib_conf.dir24_8.def_nh = default_nh;
+	conf.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_8B;
+
+	nh_bits = RTE_MIN(((1 << (3 + conf.fib_conf.dir24_8.nh_sz)) - 1), 24);
+	nr_tbl8 = RTE_MIN(((1 << nh_bits) - 1), 65535);
+	config.number_tbl8s = nr_tbl8;
+	conf.fib_conf.dir24_8.num_tbl8 = nr_tbl8;
+	config.max_rules = 2000000;
+	config.flags = 0;
+
+	num_routes = 1200000;
+
+	rt = rte_zmalloc("struct route_rule *", sizeof(struct route_rule) *
+		num_routes + 5, 0);
+	TEST_RIB_ASSERT(rt != NULL);
+
+	num_routes = generate_large_route_rule_table(num_routes, rt);
+	TEST_RIB_ASSERT(num_routes != 0);
+	printf("No. routes = %u\n", (unsigned int) num_routes);
+
+	shuffle_rt(rt, num_routes);
+
+	print_route_distribution(rt, (uint32_t) num_routes);
+
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &conf);
+	TEST_RIB_ASSERT(rib != NULL);
+
+	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
+	TEST_RIB_ASSERT(lpm != NULL);
+
+	for (i = 0; i < num_routes; i++)
+		rt[i].nh = rte_rand() & ((1ULL << nh_bits) - 1);
+
+	for (i = 0; i < num_routes; i++) {
+		ret = rte_rib_add(rib, rt[i].ip, rt[i].depth, rt[i].nh);
+		if (ret == 0)
+			rib_add++;
+		else
+			continue;
+
+		ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].nh);
+		if (ret == 0)
+			lpm_add++;
+		else {
+			rte_rib_delete(rib, rt[i].ip, rt[i].depth);
+			rib_add--;
+		}
+	}
+	TEST_RIB_ASSERT(rib_add == lpm_add);
+
+	ret = test_lookup(rib, lpm);
+	if (ret != 0)
+		return ret;
+
+	for (i = 0; i < num_routes; i++) {
+		if ((i % 3) == 0) {
+			ret = rte_rib_delete(rib, rt[i].ip, rt[i].depth);
+			if (ret == 0)
+				rib_add--;
+			else
+				continue;
+
+			ret = rte_lpm_delete(lpm, rt[i].ip, rt[i].depth);
+			if (ret == 0)
+				lpm_add--;
+		}
+	}
+	TEST_RIB_ASSERT(rib_add == lpm_add);
+
+	ret = test_lookup(rib, lpm);
+	if (ret != 0)
+		return ret;
+
+	for (i = 0; i < num_routes; i++) {
+		if ((i % 6) == 0) {
+			ret = rte_rib_add(rib, rt[i].ip, rt[i].depth, rt[i].nh);
+			if (ret == 0)
+				rib_add++;
+			else
+				continue;
+
+			ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].nh);
+			if (ret == 0)
+				lpm_add++;
+			else {
+				rte_rib_delete(rib, rt[i].ip, rt[i].depth);
+				rib_add--;
+			}
+		}
+	}
+	TEST_RIB_ASSERT(rib_add == lpm_add);
+
+	ret = test_lookup(rib, lpm);
+	if (ret != 0)
+		return ret;
+
+	rte_rib_free(rib);
+	rte_lpm_free(lpm);
+	rte_free(rt);
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(rib_lpm_comp_autotest, test_rib_lpm_comp);
diff --git a/test/test/test_rib_perf.c b/test/test/test_rib_perf.c
new file mode 100644
index 0000000..42fbd1e
--- /dev/null
+++ b/test/test/test_rib_perf.c
@@ -0,0 +1,145 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_branch_prediction.h>
+#include <rte_ip.h>
+#include <rte_malloc.h>
+#include <rte_rib.h>
+#include <rte_dir24_8.h>
+
+#include "test.h"
+#include "test_xmmt_ops.h"
+#include "test_rib_generate_rt.h"
+
+#define TEST_RIB_ASSERT(cond) do {				\
+	if (!(cond)) {						\
+		printf("Error at line %d:\n", __LINE__);	\
+		return -1;					\
+	}							\
+} while (0)
+
+#define ITERATIONS (1 << 15)
+#define BATCH_SIZE (1 << 12)
+#define BULK_SIZE 32
+
+#define NH_MSK(nh_sz)	((1ULL << ((1 << (3 + nh_sz)) - 1)) - 1)
+
+static int
+test_rib_perf(void)
+{
+	struct rte_rib *rib = NULL;
+	struct rte_rib_conf conf;
+	struct route_rule *rt;
+	uint64_t begin, total_time;
+	uint64_t next_hop_add;
+	uint64_t default_nh = 0;
+	int64_t count = 0;
+	unsigned int i, j;
+	int status = 0;
+	int ret, nh_bits, nr_tbl8;
+	uint32_t num_routes;
+
+	conf.max_nodes = 3000000;
+	conf.node_sz = sizeof(struct rte_rib_node);
+	conf.type = RTE_RIB_DIR24_8;
+	conf.fib_conf.dir24_8.def_nh = default_nh;
+	conf.fib_conf.dir24_8.nh_sz = RTE_DIR24_8_8B;
+
+	rte_srand(rte_rdtsc());
+
+	nh_bits = RTE_MIN(((1 << (3 + conf.fib_conf.dir24_8.nh_sz)) - 1), 24);
+	nr_tbl8 = RTE_MIN(((1 << nh_bits) - 1), 131071);
+	conf.fib_conf.dir24_8.num_tbl8 = nr_tbl8;
+	num_routes = 1200000;
+
+	rt = rte_zmalloc("struct route_rule *", sizeof(struct route_rule) *
+		num_routes, 0);
+	TEST_RIB_ASSERT(rt != NULL);
+
+	num_routes = generate_large_route_rule_table(num_routes, rt);
+	TEST_RIB_ASSERT(num_routes != 0);
+
+	printf("No. routes = %u\n", (unsigned int) num_routes);
+
+	shuffle_rt(rt, num_routes);
+
+	print_route_distribution(rt, (uint32_t) num_routes);
+
+	rib = rte_rib_create(__func__, SOCKET_ID_ANY, &conf);
+	TEST_RIB_ASSERT(rib != NULL);
+
+	/* Measue add. */
+	begin = rte_rdtsc();
+
+	for (i = 0; i < num_routes; i++) {
+		do {
+			next_hop_add = rte_rand() & NH_MSK(conf.fib_conf.dir24_8.nh_sz);
+		} while (next_hop_add == default_nh);
+
+		ret = rte_rib_add(rib, rt[i].ip, rt[i].depth, next_hop_add);
+		if ((ret == 0))
+			status++;
+	}
+
+	total_time = rte_rdtsc() - begin;
+
+	printf("Unique added entries = %d\n", status);
+	printf("Average RIB Add: %g cycles\n",
+			(double)total_time / num_routes);
+
+	/* Measure bulk Lookup */
+	total_time = 0;
+	count = 0;
+	for (i = 0; i < ITERATIONS; i++) {
+		static uint32_t ip_batch[BATCH_SIZE];
+		uint64_t next_hops[BULK_SIZE];
+
+		/* Create array of random IP addresses */
+		for (j = 0; j < BATCH_SIZE; j++)
+			ip_batch[j] = rte_rand();
+
+		/* Lookup per batch */
+		begin = rte_rdtsc();
+		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE)
+			rte_rib_fib_lookup_bulk(rib, &ip_batch[j], next_hops,
+				BULK_SIZE);
+
+		total_time += rte_rdtsc() - begin;
+		for (j = 0; j < BULK_SIZE; j++) {
+			if (next_hops[j] == default_nh)
+				count++;
+		}
+	}
+	printf("BULK RIB Lookup: %.1f cycles (fails = %.1f%%)\n",
+			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+	/* Delete */
+	status = 0;
+	begin = rte_rdtsc();
+
+	for (i = 0; i < num_routes; i++) {
+		ret = rte_rib_delete(rib, rt[i].ip, rt[i].depth);
+		if (ret == 0)
+			status++;
+	}
+
+	total_time = rte_rdtsc() - begin;
+
+	printf("Average RIB Delete: %g cycles\n",
+			(double)total_time / num_routes);
+
+	rte_rib_free(rib);
+	rte_free(rt);
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(rib_perf_autotest, test_rib_perf);