new file mode 100644
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <rte_debug.h>
+#include <rte_malloc.h>
+#include <rte_prefetch.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_branch_prediction.h>
+
+#include <rte_rib.h>
+#include <rte_dir24_8.h>
+
+#define BITMAP_SLAB_BIT_SIZE_LOG2 6
+#define BITMAP_SLAB_BIT_SIZE (1 << BITMAP_SLAB_BIT_SIZE_LOG2)
+#define BITMAP_SLAB_BITMASK (BITMAP_SLAB_BIT_SIZE - 1)
+
+struct rte_dir24_8_tbl {
+ uint32_t number_tbl8s; /**< Total number of tbl8s */
+ uint32_t cur_tbl8s; /**< Current cumber of tbl8s */
+ uint64_t def_nh; /**< Default next hop */
+ enum rte_dir24_8_nh_sz nh_sz; /**< Size of nexthop entry */
+ uint64_t *tbl8; /**< LPM tbl8 table. */
+ uint64_t *tbl8_idxes; /**< bitmap containing free tbl8 idxes*/
+ uint64_t tbl24[0] __rte_cache_aligned; /**< LPM tbl24 table. */
+};
+
+#define ROUNDUP(x, y) RTE_ALIGN_CEIL(x, (1 << (32 - y)))
+
+enum lookup_type {
+ MACRO,
+ INLINE,
+ UNI
+};
+enum lookup_type test_lookup = MACRO;
+
+static __rte_always_inline __attribute__((pure)) void *
+get_tbl24_p(struct rte_dir24_8_tbl *fib, uint32_t ip, uint8_t nh_sz)
+{
+ return (void *)&((uint8_t *)fib->tbl24)[(ip &
+ RTE_DIR24_8_TBL24_MASK) >> (8 - nh_sz)];
+}
+
+static __rte_always_inline __attribute__((pure)) uint8_t
+bits_in_nh(uint8_t nh_sz)
+{
+ return 8 * (1 << nh_sz);
+}
+
+static __rte_always_inline __attribute__((pure)) uint64_t
+get_max_nh(uint8_t nh_sz)
+{
+ return ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1);
+}
+
+static __rte_always_inline __attribute__((pure)) uint32_t
+get_tbl24_idx(uint32_t ip)
+{
+ return ip >> 8;
+}
+
+static __rte_always_inline __attribute__((pure)) uint32_t
+get_tbl8_idx(uint32_t res, uint32_t ip)
+{
+ return (res >> 1) * RTE_DIR24_8_TBL8_GRP_NUM_ENT + (uint8_t)ip;
+}
+
+static __rte_always_inline __attribute__((pure)) uint64_t
+lookup_msk(uint8_t nh_sz)
+{
+ return ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1;
+}
+
+static __rte_always_inline __attribute__((pure)) uint8_t
+get_psd_idx(uint32_t val, uint8_t nh_sz)
+{
+ return val & ((1 << (3 - nh_sz)) - 1);
+}
+
+static __rte_always_inline __attribute__((pure)) uint32_t
+get_tbl_idx(uint32_t val, uint8_t nh_sz)
+{
+ return val >> (3 - nh_sz);
+}
+
+static __rte_always_inline __attribute__((pure)) uint64_t
+get_tbl24(struct rte_dir24_8_tbl *fib, uint32_t ip, uint8_t nh_sz)
+{
+ return ((fib->tbl24[get_tbl_idx(get_tbl24_idx(ip), nh_sz)] >>
+ (get_psd_idx(get_tbl24_idx(ip), nh_sz) *
+ bits_in_nh(nh_sz))) & lookup_msk(nh_sz));
+}
+
+static __rte_always_inline __attribute__((pure)) uint64_t
+get_tbl8(struct rte_dir24_8_tbl *fib, uint32_t res, uint32_t ip, uint8_t nh_sz)
+{
+ return ((fib->tbl8[get_tbl_idx(get_tbl8_idx(res, ip), nh_sz)] >>
+ (get_psd_idx(get_tbl8_idx(res, ip), nh_sz) *
+ bits_in_nh(nh_sz))) & lookup_msk(nh_sz));
+}
+
+#define LOOKUP_FUNC(suffix, type, bulk_prefetch, nh_sz) \
+static int rte_dir24_8_lookup_bulk_##suffix(void *fib_p, const uint32_t *ips, \
+ uint64_t *next_hops, const unsigned int n) \
+{ \
+ struct rte_dir24_8_tbl *fib = (struct rte_dir24_8_tbl *)fib_p; \
+ uint64_t tmp; \
+ uint32_t i; \
+ uint32_t prefetch_offset = \
+ RTE_MIN((unsigned int)bulk_prefetch, n); \
+ \
+ RTE_RIB_RETURN_IF_TRUE(((fib == NULL) || (ips == NULL) || \
+ (next_hops == NULL)), -EINVAL); \
+ \
+ for (i = 0; i < prefetch_offset; i++) \
+ rte_prefetch0(get_tbl24_p(fib, ips[i], nh_sz)); \
+ for (i = 0; i < (n - prefetch_offset); i++) { \
+ rte_prefetch0(get_tbl24_p(fib, \
+ ips[i + prefetch_offset], nh_sz)); \
+ tmp = ((type *)fib->tbl24)[ips[i] >> 8]; \
+ if (unlikely((tmp & RTE_DIR24_8_VALID_EXT_ENT) == \
+ RTE_DIR24_8_VALID_EXT_ENT)) { \
+ tmp = ((type *)fib->tbl8)[(uint8_t)ips[i] + \
+ ((tmp >> 1) * RTE_DIR24_8_TBL8_GRP_NUM_ENT)]; \
+ } \
+ next_hops[i] = tmp >> 1; \
+ } \
+ for (; i < n; i++) { \
+ tmp = ((type *)fib->tbl24)[ips[i] >> 8]; \
+ if (unlikely((tmp & RTE_DIR24_8_VALID_EXT_ENT) == \
+ RTE_DIR24_8_VALID_EXT_ENT)) { \
+ tmp = ((type *)fib->tbl8)[(uint8_t)ips[i] + \
+ ((tmp >> 1) * RTE_DIR24_8_TBL8_GRP_NUM_ENT)]; \
+ } \
+ next_hops[i] = tmp >> 1; \
+ } \
+ return 0; \
+} \
+
+LOOKUP_FUNC(1b, uint8_t, 5, 0)
+LOOKUP_FUNC(2b, uint16_t, 6, 1)
+LOOKUP_FUNC(4b, uint32_t, 15, 2)
+LOOKUP_FUNC(8b, uint64_t, 12, 3)
+
+static inline int
+rte_dir24_8_lookup_bulk(struct rte_dir24_8_tbl *fib, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n, uint8_t nh_sz)
+{
+ uint64_t tmp;
+ uint32_t i;
+ uint32_t prefetch_offset = RTE_MIN(15U, n);
+
+ RTE_RIB_RETURN_IF_TRUE(((fib == NULL) || (ips == NULL) ||
+ (next_hops == NULL)), -EINVAL);
+
+ for (i = 0; i < prefetch_offset; i++)
+ rte_prefetch0(get_tbl24_p(fib, ips[i], nh_sz));
+ for (i = 0; i < (n - prefetch_offset); i++) {
+ rte_prefetch0(get_tbl24_p(fib, ips[i + prefetch_offset],
+ nh_sz));
+ tmp = get_tbl24(fib, ips[i], nh_sz);
+ if (unlikely((tmp & RTE_DIR24_8_VALID_EXT_ENT) ==
+ RTE_DIR24_8_VALID_EXT_ENT)) {
+ tmp = get_tbl8(fib, tmp, ips[i], nh_sz);
+ }
+ next_hops[i] = tmp >> 1;
+ }
+ for (; i < n; i++) {
+ tmp = get_tbl24(fib, ips[i], nh_sz);
+ if (unlikely((tmp & RTE_DIR24_8_VALID_EXT_ENT) ==
+ RTE_DIR24_8_VALID_EXT_ENT)) {
+ tmp = get_tbl8(fib, tmp, ips[i], nh_sz);
+ }
+ next_hops[i] = tmp >> 1;
+ }
+ return 0;
+}
+
+static int
+rte_dir24_8_lookup_bulk_0(void *fib_p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ struct rte_dir24_8_tbl *fib = (struct rte_dir24_8_tbl *)fib_p;
+
+ return rte_dir24_8_lookup_bulk(fib, ips, next_hops, n, 0);
+}
+
+static int
+rte_dir24_8_lookup_bulk_1(void *fib_p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ struct rte_dir24_8_tbl *fib = (struct rte_dir24_8_tbl *)fib_p;
+
+ return rte_dir24_8_lookup_bulk(fib, ips, next_hops, n, 1);
+}
+
+static int
+rte_dir24_8_lookup_bulk_2(void *fib_p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ struct rte_dir24_8_tbl *fib = (struct rte_dir24_8_tbl *)fib_p;
+
+ return rte_dir24_8_lookup_bulk(fib, ips, next_hops, n, 2);
+}
+
+static int
+rte_dir24_8_lookup_bulk_3(void *fib_p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ struct rte_dir24_8_tbl *fib = (struct rte_dir24_8_tbl *)fib_p;
+
+ return rte_dir24_8_lookup_bulk(fib, ips, next_hops, n, 3);
+}
+
+static int
+rte_dir24_8_lookup_bulk_uni(void *fib_p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ struct rte_dir24_8_tbl *fib = (struct rte_dir24_8_tbl *)fib_p;
+ uint64_t tmp;
+ uint32_t i;
+ uint32_t prefetch_offset = RTE_MIN(15U, n);
+ uint8_t nh_sz = fib->nh_sz
+
+ RTE_RIB_RETURN_IF_TRUE(((fib == NULL) || (ips == NULL) ||
+ (next_hops == NULL)), -EINVAL);
+
+ for (i = 0; i < prefetch_offset; i++)
+ rte_prefetch0(get_tbl24_p(fib, ips[i], nh_sz));
+ for (i = 0; i < (n - prefetch_offset); i++) {
+ rte_prefetch0(get_tbl24_p(fib, ips[i + prefetch_offset],
+ nh_sz));
+ tmp = get_tbl24(fib, ips[i], nh_sz);
+ if (unlikely((tmp & RTE_DIR24_8_VALID_EXT_ENT) ==
+ RTE_DIR24_8_VALID_EXT_ENT)) {
+ tmp = get_tbl8(fib, tmp, ips[i], nh_sz);
+ }
+ next_hops[i] = tmp >> 1;
+ }
+ for (; i < n; i++) {
+ tmp = get_tbl24(fib, ips[i], nh_sz);
+ if (unlikely((tmp & RTE_DIR24_8_VALID_EXT_ENT) ==
+ RTE_DIR24_8_VALID_EXT_ENT)) {
+ tmp = get_tbl8(fib, tmp, ips[i], nh_sz);
+ }
+ next_hops[i] = tmp >> 1;
+ }
+ return 0;
+}
+
+rte_rib_lookup_fn_t
+rte_dir24_8_get_lookup_fn(struct rte_rib_conf *rib_conf)
+{
+ enum rte_dir24_8_nh_sz nh_sz = rib_conf->fib_conf.dir24_8.nh_sz;
+
+ if (test_lookup == MACRO) {
+ switch (nh_sz) {
+ case(RTE_DIR24_8_1B):
+ return rte_dir24_8_lookup_bulk_1b;
+ case(RTE_DIR24_8_2B):
+ return rte_dir24_8_lookup_bulk_2b;
+ case(RTE_DIR24_8_4B):
+ return rte_dir24_8_lookup_bulk_4b;
+ case(RTE_DIR24_8_8B):
+ return rte_dir24_8_lookup_bulk_8b;
+ }
+ } else if (test_lookup == INLINE) {
+ switch (nh_sz) {
+ case(RTE_DIR24_8_1B):
+ return rte_dir24_8_lookup_bulk_0;
+ case(RTE_DIR24_8_2B):
+ return rte_dir24_8_lookup_bulk_1;
+ case(RTE_DIR24_8_4B):
+ return rte_dir24_8_lookup_bulk_2;
+ case(RTE_DIR24_8_8B):
+ return rte_dir24_8_lookup_bulk_3;
+ }
+ } else
+ return rte_dir24_8_lookup_bulk_uni;
+ return NULL;
+}
+
+static void
+write_to_fib(void *ptr, uint64_t val, enum rte_dir24_8_nh_sz size, int n)
+{
+ int i;
+ uint8_t *ptr8 = (uint8_t *)ptr;
+ uint16_t *ptr16 = (uint16_t *)ptr;
+ uint32_t *ptr32 = (uint32_t *)ptr;
+ uint64_t *ptr64 = (uint64_t *)ptr;
+
+ switch (size) {
+ case RTE_DIR24_8_1B:
+ for (i = 0; i < n; i++)
+ ptr8[i] = (uint8_t)val;
+ break;
+ case RTE_DIR24_8_2B:
+ for (i = 0; i < n; i++)
+ ptr16[i] = (uint16_t)val;
+ break;
+ case RTE_DIR24_8_4B:
+ for (i = 0; i < n; i++)
+ ptr32[i] = (uint32_t)val;
+ break;
+ case RTE_DIR24_8_8B:
+ for (i = 0; i < n; i++)
+ ptr64[i] = (uint64_t)val;
+ break;
+ }
+}
+
+static int
+tbl8_get_idx(struct rte_dir24_8_tbl *fib)
+{
+ uint32_t i;
+ int bit_idx;
+
+ for (i = 0; (i <= (fib->number_tbl8s >> BITMAP_SLAB_BIT_SIZE_LOG2)) &&
+ (fib->tbl8_idxes[i] == UINT64_MAX); i++)
+ ;
+ if (i <= (fib->number_tbl8s >> BITMAP_SLAB_BIT_SIZE_LOG2)) {
+ bit_idx = __builtin_ctzll(~fib->tbl8_idxes[i]);
+ fib->tbl8_idxes[i] |= (1ULL << bit_idx);
+ return (i << BITMAP_SLAB_BIT_SIZE_LOG2) + bit_idx;
+ }
+ return -ENOSPC;
+}
+
+static inline void
+tbl8_free_idx(struct rte_dir24_8_tbl *fib, int idx)
+{
+ fib->tbl8_idxes[idx >> BITMAP_SLAB_BIT_SIZE_LOG2] &=
+ ~(1ULL << (idx & BITMAP_SLAB_BITMASK));
+}
+
+static int
+tbl8_alloc(struct rte_dir24_8_tbl *fib, uint64_t nh)
+{
+ int tbl8_idx;
+ uint8_t *tbl8_ptr;
+
+ tbl8_idx = tbl8_get_idx(fib);
+ if (tbl8_idx < 0)
+ return tbl8_idx;
+ tbl8_ptr = (uint8_t *)fib->tbl8 +
+ ((tbl8_idx * RTE_DIR24_8_TBL8_GRP_NUM_ENT) <<
+ fib->nh_sz);
+ /*Init tbl8 entries with nexthop from tbl24*/
+ write_to_fib((void *)tbl8_ptr, nh|
+ RTE_DIR24_8_VALID_EXT_ENT, fib->nh_sz,
+ RTE_DIR24_8_TBL8_GRP_NUM_ENT);
+ return tbl8_idx;
+}
+
+static void
+tbl8_recycle(struct rte_dir24_8_tbl *fib, uint32_t ip, uint64_t tbl8_idx)
+{
+ int i;
+ uint64_t nh;
+ uint8_t *ptr8;
+ uint16_t *ptr16;
+ uint32_t *ptr32;
+ uint64_t *ptr64;
+
+ switch (fib->nh_sz) {
+ case RTE_DIR24_8_1B:
+ ptr8 = &((uint8_t *)fib->tbl8)[tbl8_idx *
+ RTE_DIR24_8_TBL8_GRP_NUM_ENT];
+ nh = *ptr8;
+ for (i = 1; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++) {
+ if (nh != ptr8[i])
+ return;
+ }
+ ((uint8_t *)fib->tbl24)[ip >> 8] =
+ nh & ~RTE_DIR24_8_VALID_EXT_ENT;
+ for (i = 0; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++)
+ ptr8[i] = 0;
+ break;
+ case RTE_DIR24_8_2B:
+ ptr16 = &((uint16_t *)fib->tbl8)[tbl8_idx *
+ RTE_DIR24_8_TBL8_GRP_NUM_ENT];
+ nh = *ptr16;
+ for (i = 1; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++) {
+ if (nh != ptr16[i])
+ return;
+ }
+ ((uint16_t *)fib->tbl24)[ip >> 8] =
+ nh & ~RTE_DIR24_8_VALID_EXT_ENT;
+ for (i = 0; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++)
+ ptr16[i] = 0;
+ break;
+ case RTE_DIR24_8_4B:
+ ptr32 = &((uint32_t *)fib->tbl8)[tbl8_idx *
+ RTE_DIR24_8_TBL8_GRP_NUM_ENT];
+ nh = *ptr32;
+ for (i = 1; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++) {
+ if (nh != ptr32[i])
+ return;
+ }
+ ((uint32_t *)fib->tbl24)[ip >> 8] =
+ nh & ~RTE_DIR24_8_VALID_EXT_ENT;
+ for (i = 0; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++)
+ ptr32[i] = 0;
+ break;
+ case RTE_DIR24_8_8B:
+ ptr64 = &((uint64_t *)fib->tbl8)[tbl8_idx *
+ RTE_DIR24_8_TBL8_GRP_NUM_ENT];
+ nh = *ptr64;
+ for (i = 1; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++) {
+ if (nh != ptr64[i])
+ return;
+ }
+ ((uint64_t *)fib->tbl24)[ip >> 8] =
+ nh & ~RTE_DIR24_8_VALID_EXT_ENT;
+ for (i = 0; i < RTE_DIR24_8_TBL8_GRP_NUM_ENT; i++)
+ ptr64[i] = 0;
+ break;
+ }
+ tbl8_free_idx(fib, tbl8_idx);
+}
+
+static int
+install_to_fib(struct rte_dir24_8_tbl *fib, uint32_t ledge, uint32_t redge,
+ uint64_t next_hop)
+{
+ uint64_t tbl24_tmp;
+ int tbl8_idx;
+ int tmp_tbl8_idx;
+ uint8_t *tbl8_ptr;
+ uint32_t len;
+
+ len = (unlikely((ledge == 0) && (redge == 0))) ? 1 << 24 :
+ ((redge & RTE_DIR24_8_TBL24_MASK) - ROUNDUP(ledge, 24)) >> 8;
+
+ if (((ledge >> 8) != (redge >> 8)) || (len == 1 << 24)) {
+ if ((ROUNDUP(ledge, 24) - ledge) != 0) {
+ tbl24_tmp = get_tbl24(fib, ledge, fib->nh_sz);
+ if ((tbl24_tmp & RTE_DIR24_8_VALID_EXT_ENT) !=
+ RTE_DIR24_8_VALID_EXT_ENT) {
+ /**
+ * Make sure there is space for two TBL8.
+ * This is necessary when installing range that
+ * needs tbl8 for ledge and redge.
+ */
+ tbl8_idx = tbl8_alloc(fib, tbl24_tmp);
+ tmp_tbl8_idx = tbl8_get_idx(fib);
+ if (tbl8_idx < 0)
+ return -ENOSPC;
+ else if (tmp_tbl8_idx < 0) {
+ tbl8_free_idx(fib, tbl8_idx);
+ return -ENOSPC;
+ }
+ tbl8_free_idx(fib, tmp_tbl8_idx);
+ /*update dir24 entry with tbl8 index*/
+ write_to_fib(get_tbl24_p(fib, ledge,
+ fib->nh_sz), (tbl8_idx << 1)|
+ RTE_DIR24_8_VALID_EXT_ENT,
+ fib->nh_sz, 1);
+ } else
+ tbl8_idx = tbl24_tmp >> 1;
+ tbl8_ptr = (uint8_t *)fib->tbl8 +
+ (((tbl8_idx * RTE_DIR24_8_TBL8_GRP_NUM_ENT) +
+ (ledge & ~RTE_DIR24_8_TBL24_MASK)) <<
+ fib->nh_sz);
+ /*update tbl8 with new next hop*/
+ write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
+ RTE_DIR24_8_VALID_EXT_ENT,
+ fib->nh_sz, ROUNDUP(ledge, 24) - ledge);
+ tbl8_recycle(fib, ledge, tbl8_idx);
+ }
+ write_to_fib(get_tbl24_p(fib, ROUNDUP(ledge, 24), fib->nh_sz),
+ next_hop << 1, fib->nh_sz, len);
+ if (redge & ~RTE_DIR24_8_TBL24_MASK) {
+ tbl24_tmp = get_tbl24(fib, redge, fib->nh_sz);
+ if ((tbl24_tmp & RTE_DIR24_8_VALID_EXT_ENT) !=
+ RTE_DIR24_8_VALID_EXT_ENT) {
+ tbl8_idx = tbl8_alloc(fib, tbl24_tmp);
+ if (tbl8_idx < 0)
+ return -ENOSPC;
+ /*update dir24 entry with tbl8 index*/
+ write_to_fib(get_tbl24_p(fib, redge,
+ fib->nh_sz), (tbl8_idx << 1)|
+ RTE_DIR24_8_VALID_EXT_ENT,
+ fib->nh_sz, 1);
+ } else
+ tbl8_idx = tbl24_tmp >> 1;
+ tbl8_ptr = (uint8_t *)fib->tbl8 +
+ ((tbl8_idx * RTE_DIR24_8_TBL8_GRP_NUM_ENT) <<
+ fib->nh_sz);
+ /*update tbl8 with new next hop*/
+ write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
+ RTE_DIR24_8_VALID_EXT_ENT,
+ fib->nh_sz, redge & ~RTE_DIR24_8_TBL24_MASK);
+ tbl8_recycle(fib, redge, tbl8_idx);
+ }
+ } else if ((redge - ledge) != 0) {
+ tbl24_tmp = get_tbl24(fib, ledge, fib->nh_sz);
+ if ((tbl24_tmp & RTE_DIR24_8_VALID_EXT_ENT) !=
+ RTE_DIR24_8_VALID_EXT_ENT) {
+ tbl8_idx = tbl8_alloc(fib, tbl24_tmp);
+ if (tbl8_idx < 0)
+ return -ENOSPC;
+ /*update dir24 entry with tbl8 index*/
+ write_to_fib(get_tbl24_p(fib, ledge, fib->nh_sz),
+ (tbl8_idx << 1)|
+ RTE_DIR24_8_VALID_EXT_ENT,
+ fib->nh_sz, 1);
+ } else
+ tbl8_idx = tbl24_tmp >> 1;
+ tbl8_ptr = (uint8_t *)fib->tbl8 +
+ (((tbl8_idx * RTE_DIR24_8_TBL8_GRP_NUM_ENT) +
+ (ledge & ~RTE_DIR24_8_TBL24_MASK)) <<
+ fib->nh_sz);
+ /*update tbl8 with new next hop*/
+ write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
+ RTE_DIR24_8_VALID_EXT_ENT,
+ fib->nh_sz, redge - ledge);
+ tbl8_recycle(fib, ledge, tbl8_idx);
+ }
+ return 0;
+}
+
+static int
+modify_fib(struct rte_rib *rib, uint32_t ip, uint8_t depth,
+ uint64_t next_hop)
+{
+ struct rte_rib_node *tmp = NULL;
+ struct rte_dir24_8_tbl *fib;
+ uint32_t ledge, redge;
+ int ret;
+
+ fib = rte_rib_get_fibp(rib);
+
+ if (next_hop > get_max_nh(fib->nh_sz))
+ return -EINVAL;
+
+ ip &= rte_rib_depth_to_mask(depth);
+ ledge = ip;
+ do {
+ tmp = rte_rib_tree_get_nxt(rib, ip, depth, tmp,
+ RTE_RIB_GET_NXT_COVER);
+ if (tmp != NULL) {
+ if (tmp->depth == depth)
+ continue;
+ redge = tmp->key & rte_rib_depth_to_mask(tmp->depth);
+ if (ledge == redge) {
+ ledge = redge +
+ (uint32_t)(1ULL << (32 - tmp->depth));
+ continue;
+ }
+ ret = install_to_fib(fib, ledge, redge,
+ next_hop);
+ if (ret != 0)
+ return ret;
+ ledge = redge +
+ (uint32_t)(1ULL << (32 - tmp->depth));
+ } else {
+ redge = ip + (uint32_t)(1ULL << (32 - depth));
+ ret = install_to_fib(fib, ledge, redge,
+ next_hop);
+ if (ret != 0)
+ return ret;
+ }
+ } while (tmp);
+
+ return 0;
+}
+
+int
+rte_dir24_8_modify(struct rte_rib *rib, uint32_t ip, uint8_t depth,
+ uint64_t next_hop, enum rte_rib_op op)
+{
+ struct rte_dir24_8_tbl *fib;
+ struct rte_rib_node *tmp = NULL;
+ struct rte_rib_node *node;
+ struct rte_rib_node *parent;
+ int ret = 0;
+
+ if ((rib == NULL) || (depth > RTE_RIB_MAXDEPTH))
+ return -EINVAL;
+
+ fib = rte_rib_get_fibp(rib);
+ RTE_ASSERT(fib);
+
+ ip &= rte_rib_depth_to_mask(depth);
+
+ node = rte_rib_tree_lookup_exact(rib, ip, depth);
+ switch (op) {
+ case RTE_RIB_ADD:
+ if (node != NULL) {
+ if (node->nh == next_hop)
+ return 0;
+ ret = modify_fib(rib, ip, depth, next_hop);
+ if (ret == 0)
+ node->nh = next_hop;
+ return 0;
+ }
+ if (depth > 24) {
+ tmp = rte_rib_tree_get_nxt(rib, ip, 24, NULL,
+ RTE_RIB_GET_NXT_COVER);
+ if ((tmp == NULL) &&
+ (fib->cur_tbl8s >= fib->number_tbl8s))
+ return -ENOSPC;
+
+ }
+ node = rte_rib_tree_insert(rib, ip, depth);
+ if (node == NULL)
+ return -rte_errno;
+ node->nh = next_hop;
+ parent = rte_rib_tree_lookup_parent(node);
+ if ((parent != NULL) && (parent->nh == next_hop))
+ return 0;
+ ret = modify_fib(rib, ip, depth, next_hop);
+ if (ret != 0) {
+ rte_rib_tree_remove(rib, ip, depth);
+ return ret;
+ }
+ if ((depth > 24) && (tmp == NULL))
+ fib->cur_tbl8s++;
+ return 0;
+ case RTE_RIB_DEL:
+ if (node == NULL)
+ return -ENOENT;
+
+ parent = rte_rib_tree_lookup_parent(node);
+ if (parent != NULL) {
+ if (parent->nh != node->nh)
+ ret = modify_fib(rib, ip, depth, parent->nh);
+ } else
+ ret = modify_fib(rib, ip, depth, fib->def_nh);
+ if (ret == 0) {
+ rte_rib_tree_remove(rib, ip, depth);
+ if (depth > 24) {
+ tmp = rte_rib_tree_get_nxt(rib, ip, 24, NULL,
+ RTE_RIB_GET_NXT_COVER);
+ if (tmp == NULL)
+ fib->cur_tbl8s--;
+ }
+ }
+ return ret;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+struct rte_dir24_8_tbl *rte_dir24_8_create(const char *name, int socket_id,
+ struct rte_rib_conf *rib_conf)
+{
+ char mem_name[RTE_RIB_NAMESIZE];
+ struct rte_dir24_8_tbl *fib;
+ uint64_t def_nh;
+ uint32_t num_tbl8;
+ enum rte_dir24_8_nh_sz nh_sz;
+
+ if ((name == NULL) || (socket_id < -1) || (rib_conf == NULL) ||
+ (rib_conf->fib_conf.dir24_8.nh_sz < RTE_DIR24_8_1B) ||
+ (rib_conf->fib_conf.dir24_8.nh_sz > RTE_DIR24_8_8B) ||
+ (rib_conf->fib_conf.dir24_8.num_tbl8 >
+ get_max_nh(rib_conf->fib_conf.dir24_8.nh_sz)) ||
+ (rib_conf->fib_conf.dir24_8.num_tbl8 == 0) ||
+ (rib_conf->fib_conf.dir24_8.def_nh >
+ get_max_nh(rib_conf->fib_conf.dir24_8.nh_sz))) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ def_nh = rib_conf->fib_conf.dir24_8.def_nh;
+ nh_sz = rib_conf->fib_conf.dir24_8.nh_sz;
+ num_tbl8 = rib_conf->fib_conf.dir24_8.num_tbl8;
+
+ snprintf(mem_name, sizeof(mem_name), "FIB_%s", name);
+ fib = rte_zmalloc_socket(name, sizeof(struct rte_dir24_8_tbl) +
+ RTE_DIR24_8_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (fib == NULL) {
+ rte_errno = ENOMEM;
+ return fib;
+ }
+
+ write_to_fib(&fib->tbl24, (def_nh << 1), nh_sz, 1 << 24);
+
+ snprintf(mem_name, sizeof(mem_name), "TBL8_%s", name);
+ fib->tbl8 = rte_zmalloc_socket(mem_name, RTE_DIR24_8_TBL8_GRP_NUM_ENT *
+ (1 << nh_sz) * (num_tbl8 + 1),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (fib->tbl8 == NULL) {
+ rte_errno = ENOMEM;
+ rte_free(fib);
+ return NULL;
+ }
+ fib->def_nh = def_nh;
+ fib->nh_sz = nh_sz;
+ fib->number_tbl8s = num_tbl8;
+
+ snprintf(mem_name, sizeof(mem_name), "TBL8_idxes_%s", name);
+ fib->tbl8_idxes = rte_zmalloc_socket(mem_name,
+ RTE_ALIGN_CEIL(fib->number_tbl8s, 64) >> 3,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (fib->tbl8_idxes == NULL) {
+ rte_errno = ENOMEM;
+ rte_free(fib->tbl8);
+ rte_free(fib);
+ return NULL;
+ }
+
+ return fib;
+}
+
+void
+rte_dir24_8_free(void *fib_p)
+{
+ struct rte_dir24_8_tbl *fib = (struct rte_dir24_8_tbl *)fib_p;
+
+ rte_free(fib->tbl8_idxes);
+ rte_free(fib->tbl8);
+ rte_free(fib);
+}
new file mode 100644
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ */
+
+#ifndef _RTE_DIR24_8_H_
+#define _RTE_DIR24_8_H_
+
+/**
+ * @file
+ * RTE Longest Prefix Match (LPM)
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @internal Total number of tbl24 entries. */
+#define RTE_DIR24_8_TBL24_NUM_ENT (1 << 24)
+
+/** Maximum depth value possible for IPv4 LPM. */
+#define RTE_DIR24_8_MAX_DEPTH 32
+
+/** @internal Number of entries in a tbl8 group. */
+#define RTE_DIR24_8_TBL8_GRP_NUM_ENT 256
+
+/** @internal Total number of tbl8 groups in the tbl8. */
+#define RTE_DIR24_8_TBL8_NUM_GROUPS 65536
+
+/** @internal bitmask with valid and valid_group fields set */
+#define RTE_DIR24_8_VALID_EXT_ENT 0x01
+
+#define RTE_DIR24_8_TBL24_MASK 0xffffff00
+
+struct rte_dir24_8_tbl *rte_dir24_8_create(const char *name, int socket_id,
+ struct rte_rib_conf *rib_conf);
+void rte_dir24_8_free(void *fib_p);
+int rte_dir24_8_modify(struct rte_rib *rib, uint32_t key,
+ uint8_t depth, uint64_t next_hop, enum rte_rib_op op);
+
+rte_rib_lookup_fn_t rte_dir24_8_get_lookup_fn(struct rte_rib_conf *rib_conf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DIR24_8_H_ */
+