Use __atomic_fetch_{add,and,or,sub,xor} instead of
__atomic_{add,and,or,sub,xor}_fetch when we have no interest in the
result of the operation.
Reduces unnecessary codegen that provided the result of the atomic
operation that was not used.
Change brings closer alignment with atomics available in C11 standard
and will reduce review effort when they are integrated.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/cxgbe/clip_tbl.c | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 12 ++++++------
drivers/net/cxgbe/l2t.c | 4 ++--
drivers/net/cxgbe/mps_tcam.c | 2 +-
drivers/net/cxgbe/smt.c | 4 ++--
5 files changed, 12 insertions(+), 12 deletions(-)
@@ -129,7 +129,7 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- __atomic_add_fetch(&ce->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
}
t4_os_unlock(&ce->lock);
}
@@ -418,14 +418,14 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- __atomic_sub_fetch(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_sub_fetch(&t->hash_tids_in_use, 1,
+ __atomic_fetch_sub(&t->hash_tids_in_use, 1,
__ATOMIC_RELAXED);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_sub_fetch(&t->tids_in_use, 1,
+ __atomic_fetch_sub(&t->tids_in_use, 1,
__ATOMIC_RELAXED);
}
}
@@ -448,15 +448,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_add_fetch(&t->hash_tids_in_use, 1,
+ __atomic_fetch_add(&t->hash_tids_in_use, 1,
__ATOMIC_RELAXED);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_add_fetch(&t->tids_in_use, 1,
+ __atomic_fetch_add(&t->tids_in_use, 1,
__ATOMIC_RELAXED);
}
- __atomic_add_fetch(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
}
/**
@@ -15,7 +15,7 @@
void cxgbe_l2t_release(struct l2t_entry *e)
{
if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_sub_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
}
/**
@@ -162,7 +162,7 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- __atomic_add_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
}
t4_os_unlock(&e->lock);
}
@@ -76,7 +76,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- __atomic_add_fetch(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
@@ -170,7 +170,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
e->state = SMT_STATE_SWITCHING;
__atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
} else {
- __atomic_add_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
}
t4_os_unlock(&e->lock);
}
@@ -196,7 +196,7 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
void cxgbe_smt_release(struct smt_entry *e)
{
if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_sub_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
}
/**