@@ -105,32 +105,32 @@ my_ether_aton(const char *a)
errno = 0;
o[i] = strtoul(a, &end, 16);
if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
- return (NULL);
+ return NULL;
a = end + 1;
} while (++i != sizeof (o) / sizeof (o[0]) && end[0] != 0);
/* Junk at the end of line */
if (end[0] != 0)
- return (NULL);
+ return NULL;
/* Support the format XX:XX:XX:XX:XX:XX */
if (i == ETHER_ADDR_LEN) {
while (i-- != 0) {
if (o[i] > UINT8_MAX)
- return (NULL);
+ return NULL;
ether_addr.ea_oct[i] = (uint8_t)o[i];
}
/* Support the format XXXX:XXXX:XXXX */
} else if (i == ETHER_ADDR_LEN / 2) {
while (i-- != 0) {
if (o[i] > UINT16_MAX)
- return (NULL);
+ return NULL;
ether_addr.ea_oct[i * 2] = (uint8_t)(o[i] >> 8);
ether_addr.ea_oct[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
}
/* unknown format */
} else
- return (NULL);
+ return NULL;
return (struct ether_addr *)ðer_addr;
}
@@ -135,12 +135,12 @@ my_inet_pton(int af, const char *src, void *dst)
{
switch (af) {
case AF_INET:
- return (inet_pton4(src, dst));
+ return inet_pton4(src, dst);
case AF_INET6:
- return (inet_pton6(src, dst));
+ return inet_pton6(src, dst);
default:
errno = EAFNOSUPPORT;
- return (-1);
+ return -1;
}
/* NOTREACHED */
}
@@ -172,26 +172,26 @@ inet_pton4(const char *src, unsigned char *dst)
unsigned int new = *tp * 10 + (pch - digits);
if (new > 255)
- return (0);
+ return 0;
if (! saw_digit) {
if (++octets > 4)
- return (0);
+ return 0;
saw_digit = 1;
}
*tp = (unsigned char)new;
} else if (ch == '.' && saw_digit) {
if (octets == 4)
- return (0);
+ return 0;
*++tp = 0;
saw_digit = 0;
} else
- return (0);
+ return 0;
}
if (octets < 4)
- return (0);
+ return 0;
memcpy(dst, tmp, INADDRSZ);
- return (1);
+ return 1;
}
/* int
@@ -224,7 +224,7 @@ inet_pton6(const char *src, unsigned char *dst)
/* Leading :: requires some special handling. */
if (*src == ':')
if (*++src != ':')
- return (0);
+ return 0;
curtok = src;
saw_xdigit = count_xdigit = 0;
val = 0;
@@ -236,11 +236,11 @@ inet_pton6(const char *src, unsigned char *dst)
pch = strchr((xdigits = xdigits_u), ch);
if (pch != NULL) {
if (count_xdigit >= 4)
- return (0);
+ return 0;
val <<= 4;
val |= (pch - xdigits);
if (val > 0xffff)
- return (0);
+ return 0;
saw_xdigit = 1;
count_xdigit++;
continue;
@@ -249,14 +249,14 @@ inet_pton6(const char *src, unsigned char *dst)
curtok = src;
if (!saw_xdigit) {
if (colonp)
- return (0);
+ return 0;
colonp = tp;
continue;
} else if (*src == '\0') {
- return (0);
+ return 0;
}
if (tp + sizeof(int16_t) > endp)
- return (0);
+ return 0;
*tp++ = (unsigned char) ((val >> 8) & 0xff);
*tp++ = (unsigned char) (val & 0xff);
saw_xdigit = 0;
@@ -272,11 +272,11 @@ inet_pton6(const char *src, unsigned char *dst)
dbloct_count += 2;
break; /* '\0' was seen by inet_pton4(). */
}
- return (0);
+ return 0;
}
if (saw_xdigit) {
if (tp + sizeof(int16_t) > endp)
- return (0);
+ return 0;
*tp++ = (unsigned char) ((val >> 8) & 0xff);
*tp++ = (unsigned char) (val & 0xff);
dbloct_count++;
@@ -300,9 +300,9 @@ inet_pton6(const char *src, unsigned char *dst)
tp = endp;
}
if (tp != endp)
- return (0);
+ return 0;
memcpy(dst, tmp, IN6ADDRSZ);
- return (1);
+ return 1;
}
int
@@ -315,35 +315,35 @@ cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
case BIN_OK:
if ( nd.type == INT8 && res1 <= INT8_MAX ) {
if (res) *(int8_t *)res = (int8_t) res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == INT16 && res1 <= INT16_MAX ) {
if (res) *(int16_t *)res = (int16_t) res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == INT32 && res1 <= INT32_MAX ) {
if (res) *(int32_t *)res = (int32_t) res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == INT64 && res1 <= INT64_MAX ) {
if (res) *(int64_t *)res = (int64_t) res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == UINT8 && res1 <= UINT8_MAX ) {
if (res) *(uint8_t *)res = (uint8_t) res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if (nd.type == UINT16 && res1 <= UINT16_MAX ) {
if (res) *(uint16_t *)res = (uint16_t) res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == UINT32 && res1 <= UINT32_MAX ) {
if (res) *(uint32_t *)res = (uint32_t) res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == UINT64 ) {
if (res) *(uint64_t *)res = res1;
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else {
return -1;
@@ -353,19 +353,19 @@ cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
case DEC_NEG_OK:
if ( nd.type == INT8 && res1 <= INT8_MAX + 1 ) {
if (res) *(int8_t *)res = (int8_t) (-res1);
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == INT16 && res1 <= (uint16_t)INT16_MAX + 1 ) {
if (res) *(int16_t *)res = (int16_t) (-res1);
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == INT32 && res1 <= (uint32_t)INT32_MAX + 1 ) {
if (res) *(int32_t *)res = (int32_t) (-res1);
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else if ( nd.type == INT64 && res1 <= (uint64_t)INT64_MAX + 1 ) {
if (res) *(int64_t *)res = (int64_t) (-res1);
- return (buf-srcbuf);
+ return buf-srcbuf;
}
else {
return -1;
@@ -102,7 +102,7 @@ parse_ports(cmdline_portlist_t *pl, const char *str)
ps = strtoul(first, &end, 10);
if (errno != 0 || end == first ||
(end[0] != '-' && end[0] != 0 && end != last))
- return (-1);
+ return -1;
/* Support for N-M portlist format */
if (end[0] == '-') {
@@ -111,18 +111,18 @@ parse_ports(cmdline_portlist_t *pl, const char *str)
pe = strtoul(first, &end, 10);
if (errno != 0 || end == first ||
(end[0] != 0 && end != last))
- return (-1);
+ return -1;
} else {
pe = ps;
}
if (ps > pe || pe >= sizeof (pl->map) * 8)
- return (-1);
+ return -1;
parse_set_list(pl, ps, pe);
}
- return (0);
+ return 0;
}
int
@@ -134,7 +134,7 @@ cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
cmdline_portlist_t *pl;
if (!buf || ! *buf)
- return (-1);
+ return -1;
if (res && ressize < sizeof(cmdline_portlist_t))
return -1;
@@ -146,7 +146,7 @@ cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
token_len++;
if (token_len >= PORTLIST_TOKEN_SIZE)
- return (-1);
+ return -1;
snprintf(portlist_str, token_len+1, "%s", buf);
@@ -155,7 +155,7 @@ cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
if (strcmp("all", portlist_str) == 0)
pl->map = UINT32_MAX;
else if (parse_ports(pl, portlist_str) != 0)
- return (-1);
+ return -1;
}
return token_len;
@@ -86,7 +86,7 @@ cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path)
dprintf("open() failed\n");
return NULL;
}
- return (cmdline_new(ctx, prompt, fd, -1));
+ return cmdline_new(ctx, prompt, fd, -1);
}
struct cmdline *
@@ -99,7 +99,7 @@ static int contigmem_modevent(module_t mod, int type, void *arg)
break;
}
- return (error);
+ return error;
}
moduledata_t contigmem_mod = {
@@ -128,14 +128,14 @@ contigmem_load()
if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
printf("%d buffers requested is greater than %d allowed\n",
contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
- return (EINVAL);
+ return EINVAL;
}
if (contigmem_buffer_size < PAGE_SIZE ||
(contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
"power of two\n", contigmem_buffer_size);
- return (EINVAL);
+ return EINVAL;
}
for (i = 0; i < contigmem_num_buffers; i++) {
@@ -145,7 +145,7 @@ contigmem_load()
if (contigmem_buffers[i] == NULL) {
printf("contigmalloc failed for buffer %d\n", i);
- return (ENOMEM);
+ return ENOMEM;
}
printf("%2u: virt=%p phys=%p\n", i, contigmem_buffers[i],
@@ -164,7 +164,7 @@ contigmem_load()
contigmem_cdev = make_dev_credf(0, &contigmem_ops, 0, NULL, UID_ROOT,
GID_WHEEL, 0600, "contigmem");
- return (0);
+ return 0;
}
static int
@@ -183,7 +183,7 @@ contigmem_unload()
contigfree(contigmem_buffers[i], contigmem_buffer_size,
M_CONTIGMEM);
- return (0);
+ return 0;
}
static int
@@ -193,14 +193,14 @@ contigmem_physaddr(SYSCTL_HANDLER_ARGS)
int index = (int)(uintptr_t)arg1;
physaddr = (uint64_t)vtophys(contigmem_buffers[index]);
- return (sysctl_handle_64(oidp, &physaddr, 0, req));
+ return sysctl_handle_64(oidp, &physaddr, 0, req);
}
static int
contigmem_open(struct cdev *cdev, int fflags, int devtype,
struct thread *td)
{
- return (0);
+ return 0;
}
static int
@@ -209,7 +209,7 @@ contigmem_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
{
*paddr = offset;
- return (0);
+ return 0;
}
static int
@@ -222,12 +222,12 @@ contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
* app.
*/
if ((*offset/PAGE_SIZE) >= contigmem_num_buffers)
- return (EINVAL);
+ return EINVAL;
*offset = (vm_ooffset_t)vtophys(contigmem_buffers[*offset/PAGE_SIZE]);
*obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
curthread->td_ucred);
- return (0);
+ return 0;
}
@@ -561,12 +561,12 @@ rte_eal_init(int argc, char **argv)
enum rte_lcore_role_t
rte_eal_lcore_role(unsigned lcore_id)
{
- return (rte_config.lcore_role[lcore_id]);
+ return rte_config.lcore_role[lcore_id];
}
enum rte_proc_type_t
rte_eal_process_type(void)
{
- return (rte_config.process_type);
+ return rte_config.process_type;
}
@@ -179,10 +179,10 @@ pci_uio_map_secondary(struct rte_pci_device *dev)
!= uio_res->maps[i].addr) {
RTE_LOG(ERR, EAL,
"Cannot mmap device resource\n");
- return (-1);
+ return -1;
}
}
- return (0);
+ return 0;
}
RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
@@ -209,7 +209,7 @@ pci_uio_map_resource(struct rte_pci_device *dev)
/* secondary processes - use already recorded details */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return (pci_uio_map_secondary(dev));
+ return pci_uio_map_secondary(dev);
snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
dev->addr.bus, dev->addr.devid, dev->addr.function);
@@ -233,7 +233,7 @@ pci_uio_map_resource(struct rte_pci_device *dev)
if ((uio_res = rte_zmalloc("UIO_RES", sizeof (*uio_res), 0)) == NULL) {
RTE_LOG(ERR, EAL,
"%s(): cannot store uio mmap details\n", __func__);
- return (-1);
+ return -1;
}
snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname);
@@ -261,7 +261,7 @@ pci_uio_map_resource(struct rte_pci_device *dev)
(size_t)maps[j].size)
) == NULL) {
rte_free(uio_res);
- return (-1);
+ return -1;
}
maps[j].addr = mapaddr;
@@ -271,7 +271,7 @@ pci_uio_map_resource(struct rte_pci_device *dev)
TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
- return (0);
+ return 0;
}
/* Scan one pci sysfs entry, and fill the devices list from it. */
@@ -131,7 +131,7 @@ nic_uio_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
int prot, vm_memattr_t *memattr)
{
*paddr = offset;
- return (0);
+ return 0;
}
static int
@@ -197,10 +197,10 @@ nic_uio_probe (device_t dev)
pci_get_device(dev) == devices[i].dev) {
device_set_desc(dev, "Intel(R) DPDK PCI Device");
- return (BUS_PROBE_SPECIFIC);
+ return BUS_PROBE_SPECIFIC;
}
- return (ENXIO);
+ return ENXIO;
}
static int
@@ -305,7 +305,7 @@ nic_uio_unload(void)
static int
nic_uio_shutdown(void)
{
- return (0);
+ return 0;
}
static int
@@ -326,5 +326,5 @@ nic_uio_modevent(module_t mod, int type, void *arg)
break;
}
- return (0);
+ return 0;
}
@@ -120,7 +120,7 @@ align_phys_boundary(const struct rte_memseg *ms, size_t len, size_t align,
addr_offset = start - ms->phys_addr;
}
- return (addr_offset);
+ return addr_offset;
}
static const struct rte_memzone *
@@ -291,7 +291,7 @@ rte_pause(void) {}
static inline uint32_t
rte_bsf32(uint32_t v)
{
- return (__builtin_ctz(v));
+ return __builtin_ctz(v);
}
#ifndef offsetof
@@ -227,7 +227,7 @@ do { \
errno = 0; \
val = strtoul((in), &end, 16); \
if (errno != 0 || end[0] != (dlm) || val > (lim)) \
- return (-EINVAL); \
+ return -EINVAL; \
(fd) = (typeof (fd))val; \
(in) = end + 1; \
} while(0)
@@ -252,7 +252,7 @@ eal_parse_pci_BDF(const char *input, struct rte_pci_addr *dev_addr)
GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
- return (0);
+ return 0;
}
/**
@@ -274,7 +274,7 @@ eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr)
GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
- return (0);
+ return 0;
}
#undef GET_PCIADDR_FIELD
@@ -852,13 +852,13 @@ rte_eal_init(int argc, char **argv)
enum rte_lcore_role_t
rte_eal_lcore_role(unsigned lcore_id)
{
- return (rte_config.lcore_role[lcore_id]);
+ return rte_config.lcore_role[lcore_id];
}
enum rte_proc_type_t
rte_eal_process_type(void)
{
- return (rte_config.process_type);
+ return rte_config.process_type;
}
int rte_eal_has_hugepages(void)
@@ -477,7 +477,7 @@ rte_intr_callback_register(struct rte_intr_handle *intr_handle,
if (write(intr_pipe.writefd, "1", 1) < 0)
return -EPIPE;
- return (ret);
+ return ret;
}
int
@@ -541,7 +541,7 @@ rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
ret = -EPIPE;
}
- return (ret);
+ return ret;
}
int
@@ -880,7 +880,7 @@ get_socket_mem_size(int socket)
size += hpi->hugepage_sz * hpi->num_pages[socket];
}
- return (size);
+ return size;
}
/*
@@ -1339,7 +1339,7 @@ rte_eal_hugepage_init(void)
"of memory.\n",
i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
RTE_MAX_MEMSEG);
- return (-ENOMEM);
+ return -ENOMEM;
}
return 0;
@@ -262,7 +262,7 @@ int igb_procfs_topdir_init(void)
{
igb_top_dir = proc_mkdir("driver/igb", NULL);
if (igb_top_dir == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
return 0;
}
@@ -88,7 +88,7 @@ kni_net_open(struct net_device *dev)
req.if_up = 1;
ret = kni_net_process_request(kni, &req);
- return (ret == 0 ? req.result : ret);
+ return (ret == 0) ? req.result : ret;
}
static int
@@ -107,7 +107,7 @@ kni_net_release(struct net_device *dev)
req.if_up = 0;
ret = kni_net_process_request(kni, &req);
- return (ret == 0 ? req.result : ret);
+ return (ret == 0) ? req.result : ret;
}
/*
@@ -511,7 +511,7 @@ kni_net_change_mtu(struct net_device *dev, int new_mtu)
if (ret == 0 && req.result == 0)
dev->mtu = new_mtu;
- return (ret == 0 ? req.result : ret);
+ return (ret == 0) ? req.result : ret;
}
/*
@@ -597,7 +597,7 @@ kni_net_header(struct sk_buff *skb, struct net_device *dev,
memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
eth->h_proto = htons(type);
- return (dev->hard_header_len);
+ return dev->hard_header_len;
}
@@ -200,7 +200,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
ip_frag_key_invalidate(&fp->key);
IP_FRAG_MBUF2DR(dr, mb);
- return (NULL);
+ return NULL;
}
fp->frags[idx].ofs = ofs;
@@ -211,7 +211,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
/* not all fragments are collected yet. */
if (likely (fp->frag_size < fp->total_size)) {
- return (mb);
+ return mb;
/* if we collected all fragments, then try to reassemble. */
} else if (fp->frag_size == fp->total_size &&
@@ -259,7 +259,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
/* we are done with that entry, invalidate it. */
ip_frag_key_invalidate(&fp->key);
- return (mb);
+ return mb;
}
@@ -327,7 +327,7 @@ ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
tbl->last = pkt;
- return (pkt);
+ return pkt;
}
struct ip_frag_pkt *
@@ -347,7 +347,7 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
assoc = tbl->bucket_entries;
if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
- return (tbl->last);
+ return tbl->last;
/* different hashing methods for IPv4 and IPv6 */
if (key->key_len == IPV4_KEYLEN)
@@ -414,5 +414,5 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
*free = empty;
*stale = old;
- return (NULL);
+ return NULL;
}
@@ -83,7 +83,7 @@ rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
nb_entries > UINT32_MAX || nb_entries == 0 ||
nb_entries < max_entries) {
RTE_LOG(ERR, USER1, "%s: invalid input parameter\n", __func__);
- return (NULL);
+ return NULL;
}
sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
@@ -92,7 +92,7 @@ rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
RTE_LOG(ERR, USER1,
"%s: allocation of %zu bytes at socket %d failed do\n",
__func__, sz, socket_id);
- return (NULL);
+ return NULL;
}
RTE_LOG(INFO, USER1, "%s: allocated of %zu bytes at socket %d\n",
@@ -106,7 +106,7 @@ rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries - 1);
TAILQ_INIT(&(tbl->lru));
- return (tbl);
+ return tbl;
}
/* dump frag table statistics to file */
@@ -73,7 +73,7 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
/* error - hole in the packet. */
if (m == prev) {
- return (NULL);
+ return NULL;
}
}
@@ -94,7 +94,7 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
ip_hdr->hdr_checksum = 0;
- return (m);
+ return m;
}
/*
@@ -151,7 +151,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
/* try to find/add entry into the fragment's table. */
if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) {
IP_FRAG_MBUF2DR(dr, mb);
- return (NULL);
+ return NULL;
}
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
@@ -178,5 +178,5 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
fp, fp->key.src_dst[0], fp->key.id, fp->start,
fp->total_size, fp->frag_size, fp->last_idx);
- return (mb);
+ return mb;
}
@@ -123,7 +123,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
(uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
- return (-EINVAL);
+ return -EINVAL;
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv6_hdr *);
@@ -142,7 +142,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
out_pkt = rte_pktmbuf_alloc(pool_direct);
if (unlikely(out_pkt == NULL)) {
__free_fragments(pkts_out, out_pkt_pos);
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Reserve space for the IP header that will be built later */
@@ -160,7 +160,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
if (unlikely(out_seg == NULL)) {
rte_pktmbuf_free(out_pkt);
__free_fragments(pkts_out, out_pkt_pos);
- return (-ENOMEM);
+ return -ENOMEM;
}
out_seg_prev->next = out_seg;
out_seg_prev = out_seg;
@@ -211,5 +211,5 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
out_pkt_pos ++;
}
- return (out_pkt_pos);
+ return out_pkt_pos;
}
@@ -371,7 +371,7 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
/* If rule is found return the rule index. */
if (lpm->rules_tbl[rule_index].ip == ip_masked)
- return (rule_index);
+ return rule_index;
}
/* If rule is not found return -EINVAL. */
@@ -530,7 +530,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
m = (struct rte_mbuf *)mb;
RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mbuf_refcnt_set(m, 1);
- return (m);
+ return m;
}
/**
@@ -626,7 +626,7 @@ void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
static inline int
rte_is_ctrlmbuf(struct rte_mbuf *m)
{
- return (!!(m->ol_flags & CTRL_MBUF_FLAG));
+ return !!(m->ol_flags & CTRL_MBUF_FLAG);
}
/* Operations on pkt mbuf */
@@ -797,7 +797,7 @@ static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
struct rte_mbuf *m;
if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
rte_pktmbuf_reset(m);
- return (m);
+ return m;
}
/**
@@ -910,9 +910,9 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
if (rte_mbuf_refcnt_update(md, -1) == 0)
__rte_mbuf_raw_free(md);
}
- return(m);
+ return m;
}
- return (NULL);
+ return NULL;
}
/**
@@ -980,7 +980,7 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
uint8_t nseg;
if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
- return (NULL);
+ return NULL;
mi = mc;
prev = &mi->next;
@@ -1002,11 +1002,11 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
/* Allocation of new indirect segment failed */
if (unlikely (mi == NULL)) {
rte_pktmbuf_free(mc);
- return (NULL);
+ return NULL;
}
__rte_mbuf_sanity_check(mc, 1);
- return (mc);
+ return mc;
}
/**
@@ -129,5 +129,5 @@ rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
free(pa);
- return (mp);
+ return mp;
}
@@ -200,7 +200,7 @@ rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
}
}
- return (i);
+ return i;
}
/*
@@ -309,7 +309,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
/* this is the size of an object, including header and trailer */
sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
- return (sz->total_size);
+ return sz->total_size;
}
@@ -330,7 +330,7 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num;
}
- return (sz);
+ return sz;
}
/*
@@ -359,12 +359,12 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
paddr, pg_num, pg_shift, mempool_lelem_iter,
&uv)) != elt_num) {
- return (-n);
+ return -n;
}
uv = RTE_ALIGN_CEIL(uv, pg_sz);
usz = uv - va;
- return (usz);
+ return usz;
}
/* create the mempool */
@@ -376,18 +376,18 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
int socket_id, unsigned flags)
{
#ifdef RTE_LIBRTE_XEN_DOM0
- return (rte_dom0_mempool_create(name, n, elt_size,
+ return rte_dom0_mempool_create(name, n, elt_size,
cache_size, private_data_size,
mp_init, mp_init_arg,
obj_init, obj_init_arg,
- socket_id, flags));
+ socket_id, flags);
#else
- return (rte_mempool_xmem_create(name, n, elt_size,
+ return rte_mempool_xmem_create(name, n, elt_size,
cache_size, private_data_size,
mp_init, mp_init_arg,
obj_init, obj_init_arg,
socket_id, flags,
- NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX));
+ NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX);
#endif
}
@@ -243,7 +243,7 @@ rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
{
uint16_t cksum;
cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr));
- return ((cksum == 0xffff) ? cksum : ~cksum);
+ return (cksum == 0xffff) ? cksum : ~cksum;
}
/**
@@ -277,7 +277,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
rte_intr_callback_register(&(pci_dev->intr_handle),
eth_em_interrupt_handler, (void *)eth_dev);
- return (0);
+ return 0;
}
static struct eth_driver rte_em_pmd = {
@@ -375,11 +375,11 @@ em_hw_init(struct e1000_hw *hw)
PMD_INIT_LOG(ERR, "PHY reset is blocked due to "
"SOL/IDER session");
}
- return (0);
+ return 0;
error:
em_hw_control_release(hw);
- return (diag);
+ return diag;
}
static int
@@ -392,7 +392,7 @@ eth_em_configure(struct rte_eth_dev *dev)
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
PMD_INIT_FUNC_TRACE();
- return (0);
+ return 0;
}
static void
@@ -476,7 +476,7 @@ eth_em_start(struct rte_eth_dev *dev)
/* Initialize the hardware */
if (em_hardware_init(hw)) {
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
- return (-EIO);
+ return -EIO;
}
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
@@ -567,14 +567,14 @@ eth_em_start(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "<<");
- return (0);
+ return 0;
error_invalid_config:
PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
dev->data->dev_conf.link_speed,
dev->data->dev_conf.link_duplex, dev->data->port_id);
em_dev_clear_queues(dev);
- return (-EINVAL);
+ return -EINVAL;
}
/*********************************************************************
@@ -687,9 +687,9 @@ em_hardware_init(struct e1000_hw *hw)
diag = e1000_init_hw(hw);
if (diag < 0)
- return (diag);
+ return diag;
e1000_check_for_link(hw);
- return (0);
+ return 0;
}
/* This function is based on em_update_stats_counters() in e1000/if_em.c */
@@ -843,15 +843,15 @@ em_get_max_pktlen(const struct e1000_hw *hw)
case e1000_pch2lan:
case e1000_82574:
case e1000_80003es2lan: /* 9K Jumbo Frame size */
- return (0x2412);
+ return 0x2412;
case e1000_pchlan:
- return (0x1000);
+ return 0x1000;
/* Adapters that do not support jumbo frames */
case e1000_82583:
case e1000_ich8lan:
- return (ETHER_MAX_LEN);
+ return ETHER_MAX_LEN;
default:
- return (MAX_JUMBO_FRAME_SIZE);
+ return MAX_JUMBO_FRAME_SIZE;
}
}
@@ -1223,7 +1223,7 @@ eth_em_interrupt_setup(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
rte_intr_enable(&(dev->pci_dev->intr_handle));
- return (0);
+ return 0;
}
/*
@@ -1349,7 +1349,7 @@ eth_em_led_on(struct rte_eth_dev *dev)
struct e1000_hw *hw;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+ return (e1000_led_on(hw) == E1000_SUCCESS) ? 0 : -ENOTSUP;
}
static int
@@ -1358,7 +1358,7 @@ eth_em_led_off(struct rte_eth_dev *dev)
struct e1000_hw *hw;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+ return (e1000_led_off(hw) == E1000_SUCCESS) ? 0 : -ENOTSUP;
}
static int
@@ -1430,7 +1430,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
- return (-EINVAL);
+ return -EINVAL;
}
hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
@@ -1460,7 +1460,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
- return (-EIO);
+ return -EIO;
}
static void
@@ -85,7 +85,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
- return (m);
+ return m;
}
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
@@ -310,10 +310,10 @@ what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
if (likely (txq->ctx_cache.flags == flags &&
((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
txq->ctx_cache.cmp_mask) == 0))
- return (EM_CTX_0);
+ return EM_CTX_0;
/* Mismatch */
- return (EM_CTX_NUM);
+ return EM_CTX_NUM;
}
/* Reset transmit descriptors after they have been used */
@@ -371,7 +371,7 @@ em_xmit_cleanup(struct em_tx_queue *txq)
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
/* No Error */
- return (0);
+ return 0;
}
static inline uint32_t
@@ -383,7 +383,7 @@ tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
- return (tmp);
+ return tmp;
}
uint16_t
@@ -492,7 +492,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (em_xmit_cleanup(txq) != 0) {
/* Could not clean any descriptors */
if (nb_tx == 0)
- return (0);
+ return 0;
goto end_of_tx;
}
}
@@ -629,7 +629,7 @@ end_of_tx:
E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
- return (nb_tx);
+ return nb_tx;
}
/*********************************************************************
@@ -658,7 +658,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_error)
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
if (rx_error & E1000_RXD_ERR_TCPE)
pkt_flags |= PKT_RX_L4_CKSUM_BAD;
- return (pkt_flags);
+ return pkt_flags;
}
uint16_t
@@ -832,7 +832,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
uint16_t
@@ -1077,7 +1077,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
/*
@@ -1115,7 +1115,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
queue_id);
if ((mz = rte_memzone_lookup(z_name)) != 0)
- return (mz);
+ return mz;
#ifdef RTE_LIBRTE_XEN_DOM0
return rte_memzone_reserve_bounded(z_name, ring_size,
@@ -1274,19 +1274,19 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
socket_id)) == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/* Allocate the tx queue data structure. */
if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
RTE_CACHE_LINE_SIZE)) == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/* Allocate software ring */
if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(txq->sw_ring[0]) * nb_desc,
RTE_CACHE_LINE_SIZE)) == NULL) {
em_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
@@ -1312,7 +1312,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
- return (0);
+ return 0;
}
static void
@@ -1379,7 +1379,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
(nb_desc > EM_MAX_RING_DESC) ||
(nb_desc < EM_MIN_RING_DESC)) {
- return (-EINVAL);
+ return -EINVAL;
}
/*
@@ -1388,7 +1388,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
if (rx_conf->rx_drop_en) {
PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
"device");
- return (-EINVAL);
+ return -EINVAL;
}
/* Free memory prior to re-allocation if needed. */
@@ -1401,19 +1401,19 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
socket_id)) == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/* Allocate the RX queue data structure. */
if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
RTE_CACHE_LINE_SIZE)) == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/* Allocate software ring. */
if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
sizeof (rxq->sw_ring[0]) * nb_desc,
RTE_CACHE_LINE_SIZE)) == NULL) {
em_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->mb_pool = mp;
@@ -1442,7 +1442,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
- return (0);
+ return 0;
}
uint32_t
@@ -1575,12 +1575,12 @@ em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
i++) {
if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
*bufsz = bufsz_to_rctl[i].bufsz;
- return (bufsz_to_rctl[i].rctl);
+ return bufsz_to_rctl[i].rctl;
}
}
/* Should never happen. */
- return (-EINVAL);
+ return -EINVAL;
}
static int
@@ -1601,7 +1601,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
"queue_id=%hu", rxq->queue_id);
- return (-ENOMEM);
+ return -ENOMEM;
}
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
@@ -605,7 +605,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
err_late:
igb_hw_control_release(hw);
- return (error);
+ return error;
}
/*
@@ -731,7 +731,7 @@ rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
PMD_INIT_FUNC_TRACE();
rte_eth_driver_register(&rte_igbvf_pmd);
- return (0);
+ return 0;
}
static int
@@ -744,7 +744,7 @@ eth_igb_configure(struct rte_eth_dev *dev)
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
PMD_INIT_FUNC_TRACE();
- return (0);
+ return 0;
}
static int
@@ -778,7 +778,7 @@ eth_igb_start(struct rte_eth_dev *dev)
/* Initialize the hardware */
if (igb_hardware_init(hw)) {
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
- return (-EIO);
+ return -EIO;
}
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
@@ -904,14 +904,14 @@ eth_igb_start(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "<<");
- return (0);
+ return 0;
error_invalid_config:
PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
dev->data->dev_conf.link_speed,
dev->data->dev_conf.link_duplex, dev->data->port_id);
igb_dev_clear_queues(dev);
- return (-EINVAL);
+ return -EINVAL;
}
/*********************************************************************
@@ -1075,13 +1075,13 @@ igb_hardware_init(struct e1000_hw *hw)
diag = e1000_init_hw(hw);
if (diag < 0)
- return (diag);
+ return diag;
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
e1000_get_phy_info(hw);
e1000_check_for_link(hw);
- return (0);
+ return 0;
}
/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
@@ -1968,7 +1968,7 @@ eth_igb_led_on(struct rte_eth_dev *dev)
struct e1000_hw *hw;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+ return (e1000_led_on(hw) == E1000_SUCCESS) ? 0 : -ENOTSUP;
}
static int
@@ -1977,7 +1977,7 @@ eth_igb_led_off(struct rte_eth_dev *dev)
struct e1000_hw *hw;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+ return (e1000_led_off(hw) == E1000_SUCCESS) ? 0 : -ENOTSUP;
}
static int
@@ -2049,7 +2049,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
- return (-EINVAL);
+ return -EINVAL;
}
hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
@@ -2079,7 +2079,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
- return (-EIO);
+ return -EIO;
}
#define E1000_RAH_POOLSEL_SHIFT (18)
@@ -2284,7 +2284,7 @@ static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
if (on)
msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
- return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
+ return mbx->ops.write_posted(hw, msgbuf, 2, 0);
}
static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
@@ -85,7 +85,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
- return (m);
+ return m;
}
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
@@ -321,7 +321,7 @@ what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
}
/* Mismatch, use the previous context */
- return (IGB_CTX_NUM);
+ return IGB_CTX_NUM;
}
static inline uint32_t
@@ -473,7 +473,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
if (nb_tx == 0)
- return (0);
+ return 0;
goto end_of_tx;
}
@@ -582,7 +582,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
(unsigned) tx_id, (unsigned) nb_tx);
txq->tx_tail = tx_id;
- return (nb_tx);
+ return nb_tx;
}
/*********************************************************************
@@ -821,7 +821,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
uint16_t
@@ -1074,7 +1074,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
/*
@@ -1244,7 +1244,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
@@ -1256,7 +1256,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
size, socket_id);
if (tz == NULL) {
igb_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
@@ -1283,7 +1283,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (txq->sw_ring == NULL) {
igb_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
@@ -1292,7 +1292,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
dev->tx_pkt_burst = eth_igb_xmit_pkts;
dev->data->tx_queues[queue_idx] = txq;
- return (0);
+ return 0;
}
static void
@@ -1364,7 +1364,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
*/
if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
(nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
- return (-EINVAL);
+ return -EINVAL;
}
/* Free memory prior to re-allocation if needed */
@@ -1377,7 +1377,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
@@ -1403,7 +1403,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
if (rz == NULL) {
igb_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
@@ -1420,7 +1420,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (rxq->sw_ring == NULL) {
igb_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
@@ -1863,7 +1863,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
"queue_id=%hu", rxq->queue_id);
- return (-ENOMEM);
+ return -ENOMEM;
}
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
@@ -876,7 +876,7 @@ handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
FM10K_RX_FREE_THRESH_MIN(q),
FM10K_RX_FREE_THRESH_DIV(q));
- return (-EINVAL);
+ return -EINVAL;
}
q->alloc_thresh = rx_free_thresh;
@@ -936,7 +936,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
- return (-EINVAL);
+ return -EINVAL;
}
/* make sure a valid number of descriptors have been requested */
@@ -948,7 +948,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
"and a multiple of %u",
nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
FM10K_MULT_RX_DESC);
- return (-EINVAL);
+ return -EINVAL;
}
/*
@@ -966,7 +966,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
socket_id);
if (q == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* setup queue */
@@ -977,7 +977,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
if (handle_rxconf(q, conf))
- return (-EINVAL);
+ return -EINVAL;
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
@@ -986,7 +986,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
if (q->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
/*
@@ -1001,7 +1001,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
q->hw_ring = mz->addr;
q->hw_ring_phys_addr = mz->phys_addr;
@@ -1043,7 +1043,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
FM10K_TX_FREE_THRESH_MIN(q),
FM10K_TX_FREE_THRESH_DIV(q));
- return (-EINVAL);
+ return -EINVAL;
}
q->free_thresh = tx_free_thresh;
@@ -1067,7 +1067,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
FM10K_TX_RS_THRESH_MIN(q),
FM10K_TX_RS_THRESH_DIV(q));
- return (-EINVAL);
+ return -EINVAL;
}
q->rs_thresh = tx_rs_thresh;
@@ -1095,7 +1095,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
"and a multiple of %u",
nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
FM10K_MULT_TX_DESC);
- return (-EINVAL);
+ return -EINVAL;
}
/*
@@ -1113,7 +1113,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
socket_id);
if (q == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* setup queue */
@@ -1123,7 +1123,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
if (handle_txconf(q, conf))
- return (-EINVAL);
+ return -EINVAL;
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
@@ -1132,7 +1132,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
if (q->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
/*
@@ -1147,7 +1147,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
q->hw_ring = mz->addr;
q->hw_ring_phys_addr = mz->phys_addr;
@@ -1164,7 +1164,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
rte_free(q->sw_ring);
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
dev->data->tx_queues[queue_id] = q;
@@ -1647,7 +1647,7 @@ i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
return queue_idx % pf->vmdq_nb_qps;
else {
PMD_INIT_LOG(ERR, "Fail to get queue offset");
- return (uint16_t)(-1);
+ return (uint16_t)-1;
}
}
@@ -1821,7 +1821,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
"rx queue data structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->mp = mp;
rxq->nb_rx_desc = nb_desc;
@@ -1851,7 +1851,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Zero all the descriptors in the ring. */
@@ -1880,7 +1880,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (!rxq->sw_ring) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
- return (-ENOMEM);
+ return -ENOMEM;
}
i40e_reset_rx_queue(rxq);
@@ -2105,7 +2105,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
"tx queue structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Allocate TX hardware ring descriptors. */
@@ -2119,7 +2119,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
@@ -2156,7 +2156,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (!txq->sw_ring) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
- return (-ENOMEM);
+ return -ENOMEM;
}
i40e_reset_tx_queue(txq);
@@ -268,7 +268,7 @@ ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
} else {
media_type = ixgbe_get_media_type_82599(hw);
}
- return (media_type);
+ return media_type;
}
/*
@@ -310,5 +310,5 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
hw->mac.ops.flap_tx_laser = NULL;
}
- return (rc);
+ return rc;
}
@@ -153,7 +153,7 @@ ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
*/
*state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK;
- return (ret_val);
+ return ret_val;
}
@@ -1025,7 +1025,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
*/
if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return (diag);
+ return diag;
}
/* negotiate mailbox API version to use with the PF. */
@@ -1076,7 +1076,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
default:
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return (-EIO);
+ return -EIO;
}
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
@@ -1134,7 +1134,7 @@ rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unus
PMD_INIT_FUNC_TRACE();
rte_eth_driver_register(&rte_ixgbevf_pmd);
- return (0);
+ return 0;
}
static int
@@ -1597,7 +1597,7 @@ skip_link_setup:
ixgbe_restore_statistics_mapping(dev);
- return (0);
+ return 0;
error:
PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
@@ -2451,7 +2451,7 @@ ixgbe_dev_led_on(struct rte_eth_dev *dev)
struct ixgbe_hw *hw;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+ return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS) ? 0 : -ENOTSUP;
}
static int
@@ -2460,7 +2460,7 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
struct ixgbe_hw *hw;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+ return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS) ? 0 : -ENOTSUP;
}
static int
@@ -2544,7 +2544,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
- return (-EINVAL);
+ return -EINVAL;
}
hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
@@ -2765,7 +2765,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
- return (-EINVAL);
+ return -EINVAL;
}
hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
@@ -3145,7 +3145,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
- return (-1);
+ return -1;
}
return 0;
@@ -3202,7 +3202,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
/* The UTA table only exists on 82599 hardware and newer */
if (hw->mac.type < ixgbe_mac_82599EB)
- return (-ENOTSUP);
+ return -ENOTSUP;
vector = ixgbe_uta_vector(hw,mac_addr);
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
@@ -3245,7 +3245,7 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
/* The UTA table only exists on 82599 hardware and newer */
if (hw->mac.type < ixgbe_mac_82599EB)
- return (-ENOTSUP);
+ return -ENOTSUP;
if(on) {
for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
@@ -3294,10 +3294,10 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
if (hw->mac.type == ixgbe_mac_82598EB) {
PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
" on 82599 hardware and newer");
- return (-ENOTSUP);
+ return -ENOTSUP;
}
if (ixgbe_vmdq_mode_check(hw) < 0)
- return (-ENOTSUP);
+ return -ENOTSUP;
val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
@@ -3322,7 +3322,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (ixgbe_vmdq_mode_check(hw) < 0)
- return (-ENOTSUP);
+ return -ENOTSUP;
addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
reg = IXGBE_READ_REG(hw, addr);
@@ -3349,7 +3349,7 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (ixgbe_vmdq_mode_check(hw) < 0)
- return (-ENOTSUP);
+ return -ENOTSUP;
addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
reg = IXGBE_READ_REG(hw, addr);
@@ -3375,7 +3375,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (ixgbe_vmdq_mode_check(hw) < 0)
- return (-ENOTSUP);
+ return -ENOTSUP;
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
@@ -3412,12 +3412,12 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (ixgbe_vmdq_mode_check(hw) < 0)
- return (-ENOTSUP);
+ return -ENOTSUP;
/* Check if vlan mask is valid */
if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
if (mirror_conf->vlan.vlan_mask == 0)
- return (-EINVAL);
+ return -EINVAL;
}
/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
@@ -3428,14 +3428,14 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
reg_index = ixgbe_find_vlvf_slot(hw,
mirror_conf->vlan.vlan_id[i]);
if(reg_index < 0)
- return (-EINVAL);
+ return -EINVAL;
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
if ((vlvf & IXGBE_VLVF_VIEN) &&
((vlvf & IXGBE_VLVF_VLANID_MASK)
== mirror_conf->vlan.vlan_id[i]))
vlan_mask |= (1ULL << reg_index);
else
- return (-EINVAL);
+ return -EINVAL;
}
}
@@ -3523,7 +3523,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
if (ixgbe_vmdq_mode_check(hw) < 0)
- return (-ENOTSUP);
+ return -ENOTSUP;
memset(&mr_info->mr_conf[rule_id], 0,
sizeof(struct rte_eth_vmdq_mirror_conf));
@@ -94,7 +94,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, 0);
- return (m);
+ return m;
}
@@ -461,7 +461,7 @@ what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
}
/* Mismatch, use the previous context */
- return (IXGBE_CTX_NUM);
+ return IXGBE_CTX_NUM;
}
static inline uint32_t
@@ -552,7 +552,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
/* No Error */
- return (0);
+ return 0;
}
uint16_t
@@ -668,7 +668,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (ixgbe_xmit_cleanup(txq) != 0) {
/* Could not clean any descriptors */
if (nb_tx == 0)
- return (0);
+ return 0;
goto end_of_tx;
}
@@ -697,7 +697,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* descriptors
*/
if (nb_tx == 0)
- return (0);
+ return 0;
goto end_of_tx;
}
}
@@ -847,7 +847,7 @@ end_of_tx:
IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
- return (nb_tx);
+ return nb_tx;
}
/*********************************************************************
@@ -1037,7 +1037,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
rxq->rx_free_thresh);
if (unlikely(diag != 0))
- return (-ENOMEM);
+ return -ENOMEM;
rxdp = &rxq->rx_ring[alloc_idx];
for (i = 0; i < rxq->rx_free_thresh; ++i) {
@@ -1372,7 +1372,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
+ return nb_rx;
}
/**
@@ -2003,7 +2003,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
@@ -2015,7 +2015,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (tz == NULL) {
ixgbe_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->nb_tx_desc = nb_desc;
@@ -2055,7 +2055,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
- return (-ENOMEM);
+ return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
@@ -2068,7 +2068,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
dev->data->tx_queues[queue_idx] = txq;
- return (0);
+ return 0;
}
/**
@@ -2284,7 +2284,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
(nb_desc > IXGBE_MAX_RING_DESC) ||
(nb_desc < IXGBE_MIN_RING_DESC)) {
- return (-EINVAL);
+ return -EINVAL;
}
/* Free memory prior to re-allocation if needed... */
@@ -2297,7 +2297,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
@@ -2319,7 +2319,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
RX_RING_SZ, socket_id);
if (rz == NULL) {
ixgbe_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
/*
@@ -2379,7 +2379,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq->sw_ring) {
ixgbe_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
/*
@@ -2396,7 +2396,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq->sw_sc_ring) {
ixgbe_rx_queue_release(rxq);
- return (-ENOMEM);
+ return -ENOMEM;
}
PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
@@ -3452,7 +3452,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned) rxq->queue_id);
- return (-ENOMEM);
+ return -ENOMEM;
}
rte_mbuf_refcnt_set(mbuf, 1);
@@ -296,7 +296,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
}
if (vq == NULL) {
PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
- return (-ENOMEM);
+ return -ENOMEM;
}
vq->hw = hw;
@@ -1293,7 +1293,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (rxmode->hw_ip_checksum) {
PMD_DRV_LOG(ERR, "HW IP checksum not supported");
- return (-EINVAL);
+ return -EINVAL;
}
hw->vlan_strip = rxmode->hw_vlan_strip;