@@ -43,7 +43,7 @@ INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h
INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
INC += rte_malloc.h rte_keepalive.h rte_time.h
INC += rte_service.h rte_service_component.h
-INC += rte_bitmap.h rte_vfio.h
+INC += rte_bitmap.h rte_vfio.h rte_fbarray.h
GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
new file mode 100755
@@ -0,0 +1,585 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/file.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+
+#include "eal_filesystem.h"
+#include "eal_private.h"
+
+#include "rte_fbarray.h"
+
+#define MASK_SHIFT 6ULL
+#define MASK_ALIGN (1 << MASK_SHIFT)
+#define MASK_LEN_TO_IDX(x) ((x) >> MASK_SHIFT)
+#define MASK_LEN_TO_MOD(x) ((x) - RTE_ALIGN_FLOOR(x, MASK_ALIGN))
+#define MASK_GET_IDX(idx, mod) ((idx << MASK_SHIFT) + mod)
+
+/*
+ * This is a mask that is always stored at the end of array, to provide fast
+ * way of finding free/used spots without looping through each element.
+ */
+
+struct used_mask {
+ int n_masks;
+ uint64_t data[];
+};
+
+static size_t
+calc_mask_size(int len) {
+ return sizeof(struct used_mask) + sizeof(uint64_t) * MASK_LEN_TO_IDX(len);
+}
+
+static size_t
+calc_data_size(size_t page_sz, int elt_sz, int len) {
+ size_t data_sz = elt_sz * len;
+ size_t msk_sz = calc_mask_size(len);
+ return RTE_ALIGN_CEIL(data_sz + msk_sz, page_sz);
+}
+
+static struct used_mask *
+get_used_mask(void *data, int elt_sz, int len) {
+ return (struct used_mask *) RTE_PTR_ADD(data, elt_sz * len);
+}
+
+static void
+move_mask(void *data, int elt_sz, int old_len, int new_len) {
+ struct used_mask *old_msk, *new_msk;
+
+ old_msk = get_used_mask(data, elt_sz, old_len);
+ new_msk = get_used_mask(data, elt_sz, new_len);
+
+ memset(new_msk, 0, calc_mask_size(new_len));
+ memcpy(new_msk, old_msk, calc_mask_size(old_len));
+ memset(old_msk, 0, calc_mask_size(old_len));
+ new_msk->n_masks = MASK_LEN_TO_IDX(new_len);
+}
+
+static int
+expand_and_map(void *addr, const char *name, size_t old_len, size_t new_len) {
+ char path[PATH_MAX];
+ void *map_addr, *adj_addr;
+ size_t map_len;
+ int fd, ret = 0;
+
+ map_len = new_len - old_len;
+ adj_addr = RTE_PTR_ADD(addr, old_len);
+
+ eal_get_fbarray_path(path, sizeof(path), name);
+
+ /* open our file */
+ fd = open(path, O_CREAT | O_RDWR, 0600);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s\n", path);
+ return -1;
+ }
+ if (ftruncate(fd, new_len)) {
+ RTE_LOG(ERR, EAL, "Cannot truncate %s\n", path);
+ ret = -1;
+ goto out;
+ }
+
+ map_addr = mmap(adj_addr, map_len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, old_len);
+ if (map_addr != adj_addr) {
+ RTE_LOG(ERR, EAL, "mmap() failed: %s\n", strerror(errno));
+ ret = -1;
+ goto out;
+ }
+out:
+ close(fd);
+ return ret;
+}
+
+static int
+find_next_n(const struct used_mask *msk, int start, int n, bool used) {
+ int msk_idx, lookahead_idx, first, first_mod;
+ uint64_t first_msk;
+
+ /*
+ * mask only has granularity of MASK_ALIGN, but start may not be aligned
+ * on that boundary, so construct a special mask to exclude anything we
+ * don't want to see to avoid confusing ctz.
+ */
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+ first_msk = ~((1ULL << first_mod) - 1);
+
+ for (msk_idx = first; msk_idx < msk->n_masks; msk_idx++) {
+ uint64_t cur_msk, lookahead_msk;
+ int run_start, clz, left;
+ bool found = false;
+ /*
+ * The process of getting n consecutive bits for arbitrary n is
+ * a bit involved, but here it is in a nutshell:
+ *
+ * 1. let n be the number of consecutive bits we're looking for
+ * 2. check if n can fit in one mask, and if so, do n-1
+ * rshift-ands to see if there is an appropriate run inside
+ * our current mask
+ * 2a. if we found a run, bail out early
+ * 2b. if we didn't find a run, proceed
+ * 3. invert the mask and count leading zeroes (that is, count
+ * how many consecutive set bits we had starting from the
+ * end of current mask) as k
+ * 3a. if k is 0, continue to next mask
+ * 3b. if k is not 0, we have a potential run
+ * 4. to satisfy our requirements, next mask must have n-k
+ * consecutive set bits right at the start, so we will do
+ * (n-k-1) rshift-ands and check if first bit is set.
+ *
+ * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
+ * we either run out of masks, lose the run, or find what we
+ * were looking for.
+ */
+ cur_msk = msk->data[msk_idx];
+ left = n;
+
+ /* if we're looking for free spaces, invert the mask */
+ if (!used)
+ cur_msk = ~cur_msk;
+
+ /* ignore everything before start on first iteration */
+ if (msk_idx == first)
+ cur_msk &= first_msk;
+
+ /* if n can fit in within a single mask, do a search */
+ if (n <= MASK_ALIGN) {
+ uint64_t tmp_msk = cur_msk;
+ int s_idx;
+ for (s_idx = 0; s_idx < n - 1; s_idx++) {
+ tmp_msk &= tmp_msk >> 1ULL;
+ }
+ /* we found what we were looking for */
+ if (tmp_msk != 0) {
+ run_start = __builtin_ctzll(tmp_msk);
+ return MASK_GET_IDX(msk_idx, run_start);
+ }
+ }
+
+ /*
+ * we didn't find our run within the mask, or n > MASK_ALIGN,
+ * so we're going for plan B.
+ */
+
+ /* count leading zeroes on inverted mask */
+ clz = __builtin_clzll(~cur_msk);
+
+ /* if there aren't any runs at the end either, just continue */
+ if (clz == 0)
+ continue;
+
+ /* we have a partial run at the end, so try looking ahead */
+ run_start = MASK_ALIGN - clz;
+ left -= clz;
+
+ for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks;
+ lookahead_idx++) {
+ int s_idx, need;
+ lookahead_msk = msk->data[lookahead_idx];
+
+ /* if we're looking for free space, invert the mask */
+ if (!used)
+ lookahead_msk = ~lookahead_msk;
+
+ /* figure out how many consecutive bits we need here */
+ need = RTE_MIN(left, MASK_ALIGN);
+
+ for (s_idx = 0; s_idx < need - 1; s_idx++)
+ lookahead_msk &= lookahead_msk >> 1ULL;
+
+ /* if first bit is not set, we've lost the run */
+ if ((lookahead_msk & 1) == 0)
+ break;
+
+ left -= need;
+
+ /* check if we've found what we were looking for */
+ if (left == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ /* we didn't find anything, so continue */
+ if (!found) {
+ continue;
+ }
+
+ return MASK_GET_IDX(msk_idx, run_start);
+ }
+ return used ? -ENOENT : -ENOSPC;
+}
+
+static int
+find_next(const struct used_mask *msk, int start, bool used) {
+ int idx, first, first_mod;
+ uint64_t first_msk;
+
+ /*
+ * mask only has granularity of MASK_ALIGN, but start may not be aligned
+ * on that boundary, so construct a special mask to exclude anything we
+ * don't want to see to avoid confusing ctz.
+ */
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+ first_msk = ~((1ULL << first_mod) - 1ULL);
+
+ for (idx = first; idx < msk->n_masks; idx++) {
+ uint64_t cur = msk->data[idx];
+ int found;
+
+ /* if we're looking for free entries, invert mask */
+ if (!used) {
+ cur = ~cur;
+ }
+
+ /* ignore everything before start on first iteration */
+ if (idx == first)
+ cur &= first_msk;
+
+ /* check if we have any entries */
+ if (cur == 0)
+ continue;
+
+ /*
+ * find first set bit - that will correspond to whatever it is
+ * that we're looking for.
+ */
+ found = __builtin_ctzll(cur);
+ return MASK_GET_IDX(idx, found);
+ }
+ return used ? -ENOENT : -ENOSPC;
+}
+
+static int
+find_contig(const struct used_mask *msk, int start, bool used) {
+ int idx, first;
+ int need_len, result = 0;
+
+ first = MASK_LEN_TO_IDX(start);
+ for (idx = first; idx < msk->n_masks; idx++, result += need_len) {
+ uint64_t cur = msk->data[idx];
+ int run_len;
+
+ need_len = MASK_ALIGN;
+
+ /* if we're looking for free entries, invert mask */
+ if (!used) {
+ cur = ~cur;
+ }
+
+ /* ignore everything before start on first iteration */
+ if (idx == first) {
+ cur >>= start;
+ /* at the start, we don't need the full mask len */
+ need_len -= start;
+ }
+
+ /* we will be looking for zeroes, so invert the mask */
+ cur = ~cur;
+
+ /* if mask is zero, we have a complete run */
+ if (cur == 0)
+ continue;
+
+ /*
+ * see if current run ends before mask end.
+ */
+ run_len = __builtin_ctzll(cur);
+
+ /* add however many zeroes we've had in the last run and quit */
+ if (run_len < need_len) {
+ result += run_len;
+ break;
+ }
+ }
+ return result;
+}
+
+int
+rte_fbarray_alloc(struct rte_fbarray *arr, const char *name, int cur_len,
+ int max_len, int elt_sz) {
+ size_t max_mmap_len, cur_mmap_len, page_sz;
+ char path[PATH_MAX];
+ struct used_mask *msk;
+ void *data;
+
+ // TODO: validation
+
+ /* lengths must be aligned */
+ cur_len = RTE_ALIGN_CEIL(cur_len, MASK_ALIGN);
+ max_len = RTE_ALIGN_CEIL(max_len, MASK_ALIGN);
+
+ page_sz = sysconf(_SC_PAGESIZE);
+
+ cur_mmap_len = calc_data_size(page_sz, elt_sz, cur_len);
+ max_mmap_len = calc_data_size(page_sz, elt_sz, max_len);
+
+ data = eal_get_virtual_area(NULL, &max_mmap_len, page_sz, 0);
+ if (data == NULL)
+ return -1;
+
+ eal_get_fbarray_path(path, sizeof(path), name);
+ unlink(path);
+
+ if (expand_and_map(data, name, 0, cur_mmap_len)) {
+ return -1;
+ }
+
+ /* populate data structure */
+ snprintf(arr->name, sizeof(arr->name), "%s", name);
+ arr->data = data;
+ arr->capacity = max_len;
+ arr->len = cur_len;
+ arr->elt_sz = elt_sz;
+ arr->count = 0;
+
+ msk = get_used_mask(data, elt_sz, cur_len);
+ msk->n_masks = MASK_LEN_TO_IDX(cur_len);
+
+ return 0;
+}
+
+int
+rte_fbarray_attach(const struct rte_fbarray *arr) {
+ uint64_t max_mmap_len, cur_mmap_len, page_sz;
+ void *data;
+
+ page_sz = sysconf(_SC_PAGESIZE);
+
+ cur_mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
+ max_mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->capacity);
+
+ data = eal_get_virtual_area(arr->data, &max_mmap_len, page_sz, 0);
+ if (data == NULL)
+ return -1;
+
+ if (expand_and_map(data, arr->name, 0, cur_mmap_len)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+rte_fbarray_free(struct rte_fbarray *arr) {
+ size_t page_sz = sysconf(_SC_PAGESIZE);
+ munmap(arr->data, calc_data_size(page_sz, arr->elt_sz, arr->capacity));
+ memset(arr, 0, sizeof(*arr));
+}
+
+int
+rte_fbarray_resize(struct rte_fbarray *arr, int new_len) {
+ size_t cur_mmap_len, new_mmap_len, page_sz;
+
+ // TODO: validation
+ if (arr->len >= new_len) {
+ RTE_LOG(ERR, EAL, "Invalid length: %i >= %i\n", arr->len, new_len);
+ return -1;
+ }
+
+ page_sz = sysconf(_SC_PAGESIZE);
+
+ new_len = RTE_ALIGN_CEIL(new_len, MASK_ALIGN);
+
+ cur_mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
+ new_mmap_len = calc_data_size(page_sz, arr->elt_sz, new_len);
+
+ if (cur_mmap_len != new_mmap_len &&
+ expand_and_map(arr->data, arr->name, cur_mmap_len,
+ new_mmap_len)) {
+ return -1;
+ }
+
+ move_mask(arr->data, arr->elt_sz, arr->len, new_len);
+
+ arr->len = new_len;
+
+ return 0;
+}
+
+void *
+rte_fbarray_get(const struct rte_fbarray *arr, int idx) {
+ if (idx >= arr->len || idx < 0)
+ return NULL;
+ return RTE_PTR_ADD(arr->data, idx * arr->elt_sz);
+}
+
+// TODO: replace -1 with debug sanity checks
+int
+rte_fbarray_set_used(struct rte_fbarray *arr, int idx, bool used) {
+ struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+ bool already_used;
+ int msk_idx = MASK_LEN_TO_IDX(idx);
+ uint64_t msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
+
+ if (idx >= arr->len || idx < 0)
+ return -1;
+
+ already_used = (msk->data[msk_idx] & msk_bit) != 0;
+
+ /* nothing to be done */
+ if (used == already_used)
+ return 0;
+
+ if (used) {
+ msk->data[msk_idx] |= msk_bit;
+ arr->count++;
+ } else {
+ msk->data[msk_idx] &= ~msk_bit;
+ arr->count--;
+ }
+
+ return 0;
+}
+int
+rte_fbarray_is_used(const struct rte_fbarray *arr, int idx) {
+ struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+ int msk_idx = MASK_LEN_TO_IDX(idx);
+ uint64_t msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
+
+ if (idx >= arr->len || idx < 0)
+ return -1;
+
+ return (msk->data[msk_idx] & msk_bit) != 0;
+}
+
+int
+rte_fbarray_find_next_free(const struct rte_fbarray *arr, int start) {
+ if (start >= arr->len || start < 0)
+ return -EINVAL;
+
+ if (arr->len == arr->count)
+ return -ENOSPC;
+
+ return find_next(get_used_mask(arr->data, arr->elt_sz, arr->len),
+ start, false);
+}
+
+int
+rte_fbarray_find_next_used(const struct rte_fbarray *arr, int start) {
+ if (start >= arr->len || start < 0)
+ return -EINVAL;
+
+ if (arr->count == 0)
+ return -1;
+
+ return find_next(get_used_mask(arr->data, arr->elt_sz, arr->len),
+ start, true);
+}
+
+int
+rte_fbarray_find_next_n_free(const struct rte_fbarray *arr, int start, int n) {
+ if (start >= arr->len || start < 0 || n > arr->len)
+ return -EINVAL;
+
+ if (arr->len == arr->count || arr->len - arr->count < n)
+ return -ENOSPC;
+
+ return find_next_n(get_used_mask(arr->data, arr->elt_sz, arr->len),
+ start, n, false);
+}
+
+int
+rte_fbarray_find_next_n_used(const struct rte_fbarray *arr, int start, int n) {
+ if (start >= arr->len || start < 0 || n > arr->len)
+ return -EINVAL;
+
+ if (arr->count < n)
+ return -ENOENT;
+
+ return find_next_n(get_used_mask(arr->data, arr->elt_sz, arr->len),
+ start, n, true);
+}
+
+int
+rte_fbarray_find_contig_free(const struct rte_fbarray *arr, int start) {
+ if (start >= arr->len || start < 0)
+ return -EINVAL;
+
+ if (arr->len == arr->count)
+ return -ENOSPC;
+
+ if (arr->count == 0)
+ return arr->len - start;
+
+ return find_contig(get_used_mask(arr->data, arr->elt_sz, arr->len),
+ start, false);
+}
+
+int
+rte_fbarray_find_contig_used(const struct rte_fbarray *arr, int start) {
+ if (start >= arr->len || start < 0)
+ return -EINVAL;
+
+ if (arr->count == 0)
+ return -ENOENT;
+
+ return find_contig(get_used_mask(arr->data, arr->elt_sz, arr->len),
+ start, true);
+}
+
+int
+rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt) {
+ void *end;
+
+ end = RTE_PTR_ADD(arr->data, arr->elt_sz * arr->len);
+
+ if (elt < arr->data || elt >= end)
+ return -EINVAL;
+ return RTE_PTR_DIFF(elt, arr->data) / arr->elt_sz;
+}
+
+void
+rte_fbarray_dump_metadata(const struct rte_fbarray *arr, FILE *f) {
+ const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+
+ fprintf(f, "File-backed array: %s\n", arr->name);
+ fprintf(f, "size: %i occupied: %i capacity: %i elt_sz: %i\n",
+ arr->len, arr->count, arr->capacity, arr->elt_sz);
+ if (!arr->data) {
+ fprintf(f, "not allocated\n");
+ return;
+ }
+
+ for (int i = 0; i < msk->n_masks; i++) {
+ fprintf(f, "msk idx %i: 0x%016lx\n", i, msk->data[i]);
+ }
+}
@@ -42,6 +42,7 @@
/** Path of rte config file. */
#define RUNTIME_CONFIG_FMT "%s/.%s_config"
+#define FBARRAY_FMT "%s/%s_%s"
#include <stdint.h>
#include <limits.h>
@@ -67,6 +68,18 @@ eal_runtime_config_path(void)
return buffer;
}
+static inline const char *
+eal_get_fbarray_path(char *buffer, size_t buflen, const char *name) {
+ const char *directory = default_config_dir;
+ const char *home_dir = getenv("HOME");
+
+ if (getuid() != 0 && home_dir != NULL)
+ directory = home_dir;
+ snprintf(buffer, buflen - 1, FBARRAY_FMT, directory,
+ internal_config.hugefile_prefix, name);
+ return buffer;
+}
+
/** Path of hugepage info file. */
#define HUGEPAGE_INFO_FMT "%s/.%s_hugepage_info"
new file mode 100755
@@ -0,0 +1,98 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_FBARRAY_H
+#define RTE_FBARRAY_H
+
+#include <stdbool.h>
+#include <stdio.h>
+
+#define RTE_FBARRAY_NAME_LEN 64
+
+struct rte_fbarray {
+ char name[RTE_FBARRAY_NAME_LEN]; /**< name associated with an array */
+ int count; /**< number of entries stored */
+ int len; /**< current length of the array */
+ int capacity; /**< maximum length of the array */
+ int elt_sz; /**< size of each element */
+ void *data; /**< data pointer */
+};
+
+// TODO: tmp? shmget?
+
+int
+rte_fbarray_alloc(struct rte_fbarray *arr, const char *name, int cur_len,
+ int max_len, int elt_sz);
+
+int
+rte_fbarray_attach(const struct rte_fbarray *arr);
+
+void
+rte_fbarray_free(struct rte_fbarray *arr);
+
+int
+rte_fbarray_resize(struct rte_fbarray *arr, int new_len);
+
+void *
+rte_fbarray_get(const struct rte_fbarray *arr, int idx);
+
+int
+rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt);
+
+int
+rte_fbarray_set_used(struct rte_fbarray *arr, int idx, bool used);
+
+int
+rte_fbarray_is_used(const struct rte_fbarray *arr, int idx);
+
+int
+rte_fbarray_find_next_free(const struct rte_fbarray *arr, int start);
+
+int
+rte_fbarray_find_next_used(const struct rte_fbarray *arr, int start);
+
+int
+rte_fbarray_find_next_n_free(const struct rte_fbarray *arr, int start, int n);
+
+int
+rte_fbarray_find_next_n_used(const struct rte_fbarray *arr, int start, int n);
+
+int
+rte_fbarray_find_contig_free(const struct rte_fbarray *arr, int start);
+
+int
+rte_fbarray_find_contig_used(const struct rte_fbarray *arr, int start);
+
+void
+rte_fbarray_dump_metadata(const struct rte_fbarray *arr, FILE *f);
+
+#endif // RTE_FBARRAY_H
@@ -86,6 +86,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_dev.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_options.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_thread.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_proc.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_fbarray.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_malloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_elem.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_heap.c