@@ -357,7 +357,7 @@ rte_cryptodev_asym_capability_get(uint8_t dev_id,
return &capability->asym.xform_capa;
}
return NULL;
-};
+}
int
rte_cryptodev_sym_capability_check_cipher(
@@ -803,7 +803,7 @@ rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
if ((dev == NULL) || (nb_qpairs < 1)) {
CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
- dev, nb_qpairs);
+ (void*)dev, nb_qpairs);
return -EINVAL;
}
@@ -14,10 +14,10 @@ rte_cpu_is_supported(void)
static const enum rte_cpu_flag_t compile_time_flags[] = {
RTE_COMPILE_TIME_CPUFLAGS
};
- unsigned count = RTE_DIM(compile_time_flags), i;
+ const unsigned int count = RTE_DIM(compile_time_flags);
int ret;
- for (i = 0; i < count; i++) {
+ for (unsigned int i = 0; i < count; i++) {
ret = rte_cpu_get_flag_enabled(compile_time_flags[i]);
if (ret < 0) {
@@ -18,7 +18,7 @@
RTE_DEFINE_PER_LCORE(int, _rte_errno);
const char *
-rte_strerror(int errnum)
+rte_strerror(const int errnum)
{
/* BSD puts a colon in the "unknown error" messages, Linux doesn't */
#ifdef RTE_EXEC_ENV_FREEBSD
@@ -18,7 +18,7 @@ rte_hexdump(FILE *f, const char *title, const void *buf, unsigned int len)
char line[LINE_LEN]; /* space needed 8+16*3+3+16 == 75 */
fprintf(f, "%s at [%p], len=%u\n",
- title ? : " Dump data", data, len);
+ title ? title : " Dump data", data, len);
ofs = 0;
while (ofs < len) {
/* format the line in the buffer */
@@ -49,7 +49,7 @@ rte_hexdump(FILE *f, const char *title, const void *buf, unsigned int len)
}
void
-rte_memdump(FILE *f, const char *title, const void *buf, unsigned int len)
+rte_memdump(FILE *f, const char *title, const void *buf, const unsigned int len)
{
unsigned int i, out;
const unsigned char *data = buf;
@@ -48,13 +48,13 @@ int rte_lcore_to_cpu_id(int lcore_id)
return lcore_config[lcore_id].core_id;
}
-rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id)
+rte_cpuset_t rte_lcore_cpuset(const unsigned int lcore_id)
{
return lcore_config[lcore_id].cpuset;
}
enum rte_lcore_role_t
-rte_eal_lcore_role(unsigned int lcore_id)
+rte_eal_lcore_role(const unsigned int lcore_id)
{
struct rte_config *cfg = rte_eal_get_configuration();
@@ -63,7 +63,7 @@ rte_eal_lcore_role(unsigned int lcore_id)
return cfg->lcore_role[lcore_id];
}
-int rte_lcore_is_enabled(unsigned int lcore_id)
+int rte_lcore_is_enabled(const unsigned int lcore_id)
{
struct rte_config *cfg = rte_eal_get_configuration();
@@ -92,7 +92,7 @@ unsigned int rte_get_next_lcore(unsigned int i, int skip_master, int wrap)
}
unsigned int
-rte_lcore_to_socket_id(unsigned int lcore_id)
+rte_lcore_to_socket_id(const unsigned int lcore_id)
{
return lcore_config[lcore_id].socket_id;
}
@@ -194,7 +194,7 @@ rte_socket_count(void)
}
int
-rte_socket_id_by_idx(unsigned int idx)
+rte_socket_id_by_idx(const unsigned int idx)
{
const struct rte_config *config = rte_eal_get_configuration();
if (idx >= config->numa_node_count) {
@@ -84,14 +84,14 @@ rte_log_get_stream(void)
* of stderr, even if the application closes and
* reopens it.
*/
- return default_log_stream ? : stderr;
+ return default_log_stream ? default_log_stream : stderr;
}
return f;
}
/* Set global log level */
void
-rte_log_set_global_level(uint32_t level)
+rte_log_set_global_level(const uint32_t level)
{
rte_logs.level = (uint32_t)level;
}
@@ -104,7 +104,7 @@ rte_log_get_global_level(void)
}
int
-rte_log_get_level(uint32_t type)
+rte_log_get_level(const uint32_t type)
{
if (type >= rte_logs.dynamic_types_len)
return -1;
@@ -113,7 +113,7 @@ rte_log_get_level(uint32_t type)
}
int
-rte_log_set_level(uint32_t type, uint32_t level)
+rte_log_set_level(const uint32_t type, const uint32_t level)
{
if (type >= rte_logs.dynamic_types_len)
return -1;
@@ -127,7 +127,7 @@ rte_log_set_level(uint32_t type, uint32_t level)
/* set log level by regular expression */
int
-rte_log_set_level_regexp(const char *regex, uint32_t level)
+rte_log_set_level_regexp(const char *regex, const uint32_t level)
{
regex_t r;
size_t i;
@@ -69,7 +69,7 @@ find_mem_alloc_validator(const char *name, int socket_id)
bool
eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
- size_t len)
+ const size_t len)
{
void *end, *aligned_start, *aligned_end;
size_t pgsz = (size_t)msl->page_sz;
@@ -252,7 +252,7 @@ eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start,
int
eal_memalloc_mem_alloc_validator_register(const char *name,
- rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
+ rte_mem_alloc_validator_t clb, const int socket_id, const size_t limit)
{
struct mem_alloc_validator_entry *entry;
int ret, len;
@@ -302,7 +302,7 @@ eal_memalloc_mem_alloc_validator_register(const char *name,
}
int
-eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id)
+eal_memalloc_mem_alloc_validator_unregister(const char *name, const int socket_id)
{
struct mem_alloc_validator_entry *entry;
int ret, len;
@@ -341,7 +341,7 @@ eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id)
}
int
-eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len)
+eal_memalloc_mem_alloc_validate(const int socket_id, const size_t new_len)
{
struct mem_alloc_validator_entry *entry;
int ret = 0;
@@ -43,7 +43,7 @@ static uint64_t system_page_sz;
#define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5
void *
eal_get_virtual_area(void *requested_addr, size_t *size,
- size_t page_sz, int flags, int mmap_flags)
+ const size_t page_sz, const int flags, int mmap_flags)
{
bool addr_is_hint, allow_shrink, unmap, no_align;
uint64_t map_sz;
@@ -707,7 +707,7 @@ send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
}
static int
-mp_send(struct rte_mp_msg *msg, const char *peer, int type)
+mp_send(struct rte_mp_msg *msg, const char *peer, const int type)
{
int dir_fd, ret = 0;
DIR *mp_dir;
@@ -65,7 +65,7 @@ rte_dump_tailq(FILE *f)
const struct rte_tailq_entry_head *head = &tailq->tailq_head;
fprintf(f, "Tailq %u: qname:<%s>, tqh_first:%p, tqh_last:%p\n",
- i, tailq->name, head->tqh_first, head->tqh_last);
+ i, tailq->name, (void*)head->tqh_first, (void*)head->tqh_last);
}
rte_mcfg_tailq_read_unlock();
}
@@ -48,7 +48,7 @@ eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs);
*/
bool
eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
- size_t len);
+ const size_t len);
/* synchronize local memory map to primary process */
int
@@ -67,13 +67,13 @@ eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start,
int
eal_memalloc_mem_alloc_validator_register(const char *name,
- rte_mem_alloc_validator_t clb, int socket_id, size_t limit);
+ rte_mem_alloc_validator_t clb, const int socket_id, const size_t limit);
int
-eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id);
+eal_memalloc_mem_alloc_validator_unregister(const char *name, const int socket_id);
int
-eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len);
+eal_memalloc_mem_alloc_validate(const int socket_id, const size_t new_len);
/* returns fd or -errno */
int
@@ -234,7 +234,7 @@ int rte_eal_check_module(const char *module_name);
/**< immediately unmap reserved virtual area. */
void *
eal_get_virtual_area(void *requested_addr, size_t *size,
- size_t page_sz, int flags, int mmap_flags);
+ const size_t page_sz, const int flags, int mmap_flags);
/**
* Get cpu core_id.
@@ -94,9 +94,9 @@ __rte_bitmap_index2_set(struct rte_bitmap *bmp)
}
static inline uint32_t
-__rte_bitmap_get_memory_footprint(uint32_t n_bits,
- uint32_t *array1_byte_offset, uint32_t *array1_slabs,
- uint32_t *array2_byte_offset, uint32_t *array2_slabs)
+__rte_bitmap_get_memory_footprint(const uint32_t n_bits,
+ uint32_t * __restrict array1_byte_offset, uint32_t * __restrict array1_slabs,
+ uint32_t * __restrict array2_byte_offset, uint32_t * __restrict array2_slabs)
{
uint32_t n_slabs_context, n_slabs_array1, n_cache_lines_context_and_array1;
uint32_t n_cache_lines_array2;
@@ -145,7 +145,7 @@ __rte_bitmap_scan_init(struct rte_bitmap *bmp)
* Bitmap memory footprint measured in bytes on success, 0 on error
*/
static inline uint32_t
-rte_bitmap_get_memory_footprint(uint32_t n_bits) {
+rte_bitmap_get_memory_footprint(const uint32_t n_bits) {
/* Check input arguments */
if (n_bits == 0) {
return 0;
@@ -167,7 +167,7 @@ rte_bitmap_get_memory_footprint(uint32_t n_bits) {
* Handle to bitmap instance.
*/
static inline struct rte_bitmap *
-rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t mem_size)
+rte_bitmap_init(const uint32_t n_bits, uint8_t *mem, const uint32_t mem_size)
{
struct rte_bitmap *bmp;
uint32_t array1_byte_offset, array1_slabs, array2_byte_offset, array2_slabs;
@@ -247,7 +247,7 @@ rte_bitmap_reset(struct rte_bitmap *bmp)
* 0 upon success, error code otherwise
*/
static inline void
-rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos)
+rte_bitmap_prefetch0(struct rte_bitmap *bmp, const uint32_t pos)
{
uint64_t *slab2;
uint32_t index2;
@@ -268,7 +268,7 @@ rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos)
* 0 when bit is cleared, non-zero when bit is set
*/
static inline uint64_t
-rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos)
+rte_bitmap_get(struct rte_bitmap *bmp, const uint32_t pos)
{
uint64_t *slab2;
uint32_t index2, offset2;
@@ -288,7 +288,7 @@ rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos)
* Bit position
*/
static inline void
-rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos)
+rte_bitmap_set(struct rte_bitmap *bmp, const uint32_t pos)
{
uint64_t *slab1, *slab2;
uint32_t index1, index2, offset1, offset2;
@@ -316,7 +316,7 @@ rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos)
* Value to be assigned to the 64-bit slab in array2
*/
static inline void
-rte_bitmap_set_slab(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab)
+rte_bitmap_set_slab(struct rte_bitmap *bmp, const uint32_t pos, const uint64_t slab)
{
uint64_t *slab1, *slab2;
uint32_t index1, index2, offset1;
@@ -356,7 +356,7 @@ __rte_bitmap_line_not_empty(uint64_t *slab2)
* Bit position
*/
static inline void
-rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos)
+rte_bitmap_clear(struct rte_bitmap *bmp, const uint32_t pos)
{
uint64_t *slab1, *slab2;
uint32_t index1, index2, offset1, offset2;
@@ -261,7 +261,7 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void)
* whichever difference is the lowest.
*/
#define RTE_ALIGN_MUL_NEAR(v, mul) \
- ({ \
+ __extension__ ({ \
typeof(v) ceil = RTE_ALIGN_MUL_CEIL(v, mul); \
typeof(v) floor = RTE_ALIGN_MUL_FLOOR(v, mul); \
(ceil - v) > (v - floor) ? floor : ceil; \
@@ -39,7 +39,7 @@ RTE_DECLARE_PER_LCORE(int, _rte_errno); /**< Per core error number. */
* A pointer to a thread-local string containing the text describing
* the error.
*/
-const char *rte_strerror(int errnum);
+const char *rte_strerror(const int errnum);
#ifndef __ELASTERROR
/**
@@ -50,7 +50,7 @@ rte_hexdump(FILE *f, const char * title, const void * buf, unsigned int len);
*/
void
-rte_memdump(FILE *f, const char * title, const void * buf, unsigned int len);
+rte_memdump(FILE *f, const char * title, const void * buf, const unsigned int len);
#ifdef __cplusplus
@@ -33,7 +33,7 @@ RTE_DECLARE_PER_LCORE(rte_cpuset_t, _cpuset); /**< Per thread "cpuset". */
* @return
* The role of the lcore.
*/
-enum rte_lcore_role_t rte_eal_lcore_role(unsigned int lcore_id);
+enum rte_lcore_role_t rte_eal_lcore_role(const unsigned int lcore_id);
/**
* Return the Application thread ID of the execution unit.
@@ -121,7 +121,7 @@ rte_socket_count(void);
* - -1 on error, with errno set to EINVAL
*/
int
-rte_socket_id_by_idx(unsigned int idx);
+rte_socket_id_by_idx(const unsigned int idx);
/**
* Get the ID of the physical socket of the specified lcore
@@ -132,7 +132,7 @@ rte_socket_id_by_idx(unsigned int idx);
* the ID of lcoreid's physical socket
*/
unsigned int
-rte_lcore_to_socket_id(unsigned int lcore_id);
+rte_lcore_to_socket_id(const unsigned int lcore_id);
/**
* @warning
@@ -161,7 +161,7 @@ rte_lcore_to_cpu_id(int lcore_id);
*/
__rte_experimental
rte_cpuset_t
-rte_lcore_cpuset(unsigned int lcore_id);
+rte_lcore_cpuset(const unsigned int lcore_id);
/**
* Test if an lcore is enabled.
@@ -172,7 +172,7 @@ rte_lcore_cpuset(unsigned int lcore_id);
* @return
* True if the given lcore is enabled; false otherwise.
*/
-int rte_lcore_is_enabled(unsigned int lcore_id);
+int rte_lcore_is_enabled(const unsigned int lcore_id);
/**
* Get the next enabled lcore ID.
@@ -123,7 +123,7 @@ FILE *rte_log_get_stream(void);
* @param level
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
*/
-void rte_log_set_global_level(uint32_t level);
+void rte_log_set_global_level(const uint32_t level);
/**
* Get the global log level.
@@ -141,7 +141,7 @@ uint32_t rte_log_get_global_level(void);
* @return
* 0 on success, a negative value if logtype is invalid.
*/
-int rte_log_get_level(uint32_t logtype);
+int rte_log_get_level(const uint32_t logtype);
/**
* Set the log level for a given type based on shell pattern.
@@ -165,7 +165,7 @@ int rte_log_set_level_pattern(const char *pattern, uint32_t level);
* @return
* 0 on success, a negative value if level is invalid.
*/
-int rte_log_set_level_regexp(const char *regex, uint32_t level);
+int rte_log_set_level_regexp(const char *regex, const uint32_t level);
/**
* Set the log level for a given type.
@@ -177,7 +177,7 @@ int rte_log_set_level_regexp(const char *regex, uint32_t level);
* @return
* 0 on success, a negative value if logtype or level is invalid.
*/
-int rte_log_set_level(uint32_t logtype, uint32_t level);
+int rte_log_set_level(uint32_t logtype, const uint32_t level);
/**
* Get the current loglevel for the message being processed.
@@ -666,8 +666,8 @@ elem_state_to_str(enum elem_state state)
void
malloc_elem_dump(const struct malloc_elem *elem, FILE *f)
{
- fprintf(f, "Malloc element at %p (%s)\n", elem,
+ fprintf(f, "Malloc element at %p (%s)\n", (const void*)elem,
elem_state_to_str(elem->state));
fprintf(f, " len: 0x%zx pad: 0x%" PRIx32 "\n", elem->size, elem->pad);
- fprintf(f, " prev: %p next: %p\n", elem->prev, elem->next);
+ fprintf(f, " prev: %p next: %p\n", (void*)elem->prev, (void*)elem->next);
}
@@ -451,7 +451,7 @@ rte_eal_config_reattach(void)
/* errno is stale, don't use */
RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config at [%p], got [%p]"
" - please use '--" OPT_BASE_VIRTADDR
- "' option\n", rte_mem_cfg_addr, mem_config);
+ "' option\n", rte_mem_cfg_addr, (void*)mem_config);
munmap(mem_config, sizeof(struct rte_mem_config));
return -1;
}
@@ -163,4 +163,4 @@ static struct rte_class rte_class_eth = {
.dev_iterate = eth_dev_iterate,
};
-RTE_REGISTER_CLASS(eth, rte_class_eth);
+RTE_REGISTER_CLASS(eth, rte_class_eth)
@@ -41,7 +41,7 @@ rte_mtr_ops_get(uint16_t port_id, struct rte_mtr_error *error)
}
#define RTE_MTR_FUNC(port_id, func) \
-({ \
+__extension__ ({ \
const struct rte_mtr_ops *ops = \
rte_mtr_ops_get(port_id, error); \
if (ops == NULL) \
@@ -40,7 +40,7 @@ rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error)
}
#define RTE_TM_FUNC(port_id, func) \
-({ \
+__extension__ ({ \
const struct rte_tm_ops *ops = \
rte_tm_ops_get(port_id, error); \
if (ops == NULL) \
@@ -474,7 +474,7 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
__rte_mbuf_sanity_check(m, 1);
fprintf(f, "dump mbuf at %p, iova=%"PRIx64", buf_len=%u\n",
- m, (uint64_t)m->buf_iova, (unsigned)m->buf_len);
+ (const void*)m, (uint64_t)m->buf_iova, (unsigned)m->buf_len);
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
"in_port=%u\n", m->pkt_len, m->ol_flags,
(unsigned)m->nb_segs, (unsigned)m->port);
@@ -484,7 +484,7 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
__rte_mbuf_sanity_check(m, 0);
fprintf(f, " segment at %p, data=%p, data_len=%u\n",
- m, rte_pktmbuf_mtod(m, void *), (unsigned)m->data_len);
+ (const void*)m, (void*)rte_pktmbuf_mtod(m, void *), (unsigned)m->data_len);
len = dump_len;
if (len > m->data_len)
len = m->data_len;
@@ -28,7 +28,7 @@ TAILQ_HEAD(mbuf_dynfield_list, rte_tailq_entry);
static struct rte_tailq_elem mbuf_dynfield_tailq = {
.name = "RTE_MBUF_DYNFIELD",
};
-EAL_REGISTER_TAILQ(mbuf_dynfield_tailq);
+EAL_REGISTER_TAILQ(mbuf_dynfield_tailq)
struct mbuf_dynflag_elt {
TAILQ_ENTRY(mbuf_dynflag_elt) next;
@@ -40,7 +40,7 @@ TAILQ_HEAD(mbuf_dynflag_list, rte_tailq_entry);
static struct rte_tailq_elem mbuf_dynflag_tailq = {
.name = "RTE_MBUF_DYNFLAG",
};
-EAL_REGISTER_TAILQ(mbuf_dynflag_tailq);
+EAL_REGISTER_TAILQ(mbuf_dynflag_tailq)
struct mbuf_dyn_shm {
/**
@@ -1200,7 +1200,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
RTE_ASSERT(f != NULL);
RTE_ASSERT(mp != NULL);
- fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
+ fprintf(f, "mempool <%s>@%p\n", mp->name, (void*)mp);
fprintf(f, " flags=%x\n", mp->flags);
fprintf(f, " pool=%p\n", mp->pool_data);
fprintf(f, " iova=0x%" PRIx64 "\n", mp->mz->iova);
@@ -232,7 +232,7 @@ rte_ring_free(struct rte_ring *r)
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
- fprintf(f, "ring <%s>@%p\n", r->name, r);
+ fprintf(f, "ring <%s>@%p\n", r->name, (const void*)r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " capacity=%"PRIu32"\n", r->capacity);
@@ -145,7 +145,7 @@ rte_timer_subsystem_init_v20(void)
priv_timer[lcore_id].prev_lcore = lcore_id;
}
}
-VERSION_SYMBOL(rte_timer_subsystem_init, _v20, 2.0);
+VERSION_SYMBOL(rte_timer_subsystem_init, _v20, 2.0)
/* Init the timer library. Allocate an array of timer data structs in shared
* memory, and allocate the zeroth entry for use with original timer
@@ -211,7 +211,7 @@ rte_timer_subsystem_init_v1905(void)
}
MAP_STATIC_SYMBOL(int rte_timer_subsystem_init(void),
rte_timer_subsystem_init_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_subsystem_init, _v1905, 19.05);
+BIND_DEFAULT_SYMBOL(rte_timer_subsystem_init, _v1905, 19.05)
void
rte_timer_subsystem_finalize(void)
@@ -572,7 +572,7 @@ rte_timer_reset_v20(struct rte_timer *tim, uint64_t ticks,
return __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore,
fct, arg, 0, &default_timer_data);
}
-VERSION_SYMBOL(rte_timer_reset, _v20, 2.0);
+VERSION_SYMBOL(rte_timer_reset, _v20, 2.0)
int
rte_timer_reset_v1905(struct rte_timer *tim, uint64_t ticks,
@@ -587,7 +587,7 @@ MAP_STATIC_SYMBOL(int rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
unsigned int tim_lcore,
rte_timer_cb_t fct, void *arg),
rte_timer_reset_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_reset, _v1905, 19.05);
+BIND_DEFAULT_SYMBOL(rte_timer_reset, _v1905, 19.05)
int
rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
@@ -662,7 +662,7 @@ rte_timer_stop_v20(struct rte_timer *tim)
{
return __rte_timer_stop(tim, 0, &default_timer_data);
}
-VERSION_SYMBOL(rte_timer_stop, _v20, 2.0);
+VERSION_SYMBOL(rte_timer_stop, _v20, 2.0)
int
rte_timer_stop_v1905(struct rte_timer *tim)
@@ -671,7 +671,7 @@ rte_timer_stop_v1905(struct rte_timer *tim)
}
MAP_STATIC_SYMBOL(int rte_timer_stop(struct rte_timer *tim),
rte_timer_stop_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_stop, _v1905, 19.05);
+BIND_DEFAULT_SYMBOL(rte_timer_stop, _v1905, 19.05)
int
rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
@@ -822,7 +822,7 @@ rte_timer_manage_v20(void)
{
__rte_timer_manage(&default_timer_data);
}
-VERSION_SYMBOL(rte_timer_manage, _v20, 2.0);
+VERSION_SYMBOL(rte_timer_manage, _v20, 2.0)
int
rte_timer_manage_v1905(void)
@@ -836,7 +836,7 @@ rte_timer_manage_v1905(void)
return 0;
}
MAP_STATIC_SYMBOL(int rte_timer_manage(void), rte_timer_manage_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_manage, _v1905, 19.05);
+BIND_DEFAULT_SYMBOL(rte_timer_manage, _v1905, 19.05)
int
rte_timer_alt_manage(uint32_t timer_data_id,
@@ -1079,7 +1079,7 @@ rte_timer_dump_stats_v20(FILE *f)
{
__rte_timer_dump_stats(&default_timer_data, f);
}
-VERSION_SYMBOL(rte_timer_dump_stats, _v20, 2.0);
+VERSION_SYMBOL(rte_timer_dump_stats, _v20, 2.0)
int
rte_timer_dump_stats_v1905(FILE *f)
@@ -1088,7 +1088,7 @@ rte_timer_dump_stats_v1905(FILE *f)
}
MAP_STATIC_SYMBOL(int rte_timer_dump_stats(FILE *f),
rte_timer_dump_stats_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_dump_stats, _v1905, 19.05);
+BIND_DEFAULT_SYMBOL(rte_timer_dump_stats, _v1905, 19.05)
int
rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)