[12/46] net/cxgbe: use rte stdatomic API

Message ID 1710967892-7046-13-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series use stdatomic API |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Tyler Retzlaff March 20, 2024, 8:50 p.m. UTC
  Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 drivers/net/cxgbe/clip_tbl.c   | 12 ++++++------
 drivers/net/cxgbe/clip_tbl.h   |  2 +-
 drivers/net/cxgbe/cxgbe_main.c | 20 ++++++++++----------
 drivers/net/cxgbe/cxgbe_ofld.h |  6 +++---
 drivers/net/cxgbe/l2t.c        | 12 ++++++------
 drivers/net/cxgbe/l2t.h        |  2 +-
 drivers/net/cxgbe/mps_tcam.c   | 21 +++++++++++----------
 drivers/net/cxgbe/mps_tcam.h   |  2 +-
 drivers/net/cxgbe/smt.c        | 12 ++++++------
 drivers/net/cxgbe/smt.h        |  2 +-
 10 files changed, 46 insertions(+), 45 deletions(-)
  

Patch

diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
index b709e26..8588b88 100644
--- a/drivers/net/cxgbe/clip_tbl.c
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -55,7 +55,7 @@  void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
 	int ret;
 
 	t4_os_lock(&ce->lock);
-	if (__atomic_fetch_sub(&ce->refcnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+	if (rte_atomic_fetch_sub_explicit(&ce->refcnt, 1, rte_memory_order_relaxed) - 1 == 0) {
 		ret = clip6_release_mbox(dev, ce->addr);
 		if (ret)
 			dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
@@ -79,7 +79,7 @@  static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
 	unsigned int clipt_size = c->clipt_size;
 
 	for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
-		if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
 			if (!first_free)
 				first_free = e;
 		} else {
@@ -114,12 +114,12 @@  static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
 	ce = find_or_alloc_clipe(ctbl, lip);
 	if (ce) {
 		t4_os_lock(&ce->lock);
-		if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
+		if (rte_atomic_load_explicit(&ce->refcnt, rte_memory_order_relaxed) == 0) {
 			rte_memcpy(ce->addr, lip, sizeof(ce->addr));
 			if (v6) {
 				ce->type = FILTER_TYPE_IPV6;
-				__atomic_store_n(&ce->refcnt, 1,
-						 __ATOMIC_RELAXED);
+				rte_atomic_store_explicit(&ce->refcnt, 1,
+						 rte_memory_order_relaxed);
 				ret = clip6_get_mbox(dev, lip);
 				if (ret)
 					dev_debug(adap,
@@ -129,7 +129,7 @@  static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
 				ce->type = FILTER_TYPE_IPV4;
 			}
 		} else {
-			__atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&ce->refcnt, 1, rte_memory_order_relaxed);
 		}
 		t4_os_unlock(&ce->lock);
 	}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
index 3b2be66..439fcf6 100644
--- a/drivers/net/cxgbe/clip_tbl.h
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -13,7 +13,7 @@  struct clip_entry {
 	enum filter_type type;       /* entry type */
 	u32 addr[4];                 /* IPV4 or IPV6 address */
 	rte_spinlock_t lock;         /* entry lock */
-	u32 refcnt;                  /* entry reference count */
+	RTE_ATOMIC(u32) refcnt;                  /* entry reference count */
 };
 
 struct clip_tbl {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c479454..2ed21f2 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -418,15 +418,15 @@  void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
 
 	if (t->tid_tab[tid]) {
 		t->tid_tab[tid] = NULL;
-		__atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
 		if (t->hash_base && tid >= t->hash_base) {
 			if (family == FILTER_TYPE_IPV4)
-				__atomic_fetch_sub(&t->hash_tids_in_use, 1,
-						   __ATOMIC_RELAXED);
+				rte_atomic_fetch_sub_explicit(&t->hash_tids_in_use, 1,
+						   rte_memory_order_relaxed);
 		} else {
 			if (family == FILTER_TYPE_IPV4)
-				__atomic_fetch_sub(&t->tids_in_use, 1,
-						   __ATOMIC_RELAXED);
+				rte_atomic_fetch_sub_explicit(&t->tids_in_use, 1,
+						   rte_memory_order_relaxed);
 		}
 	}
 
@@ -448,15 +448,15 @@  void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
 	t->tid_tab[tid] = data;
 	if (t->hash_base && tid >= t->hash_base) {
 		if (family == FILTER_TYPE_IPV4)
-			__atomic_fetch_add(&t->hash_tids_in_use, 1,
-					   __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&t->hash_tids_in_use, 1,
+					   rte_memory_order_relaxed);
 	} else {
 		if (family == FILTER_TYPE_IPV4)
-			__atomic_fetch_add(&t->tids_in_use, 1,
-					   __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&t->tids_in_use, 1,
+					   rte_memory_order_relaxed);
 	}
 
-	__atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+	rte_atomic_fetch_add_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
 }
 
 /**
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 33697c7..48a5ec0 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -60,10 +60,10 @@  struct tid_info {
 	unsigned int atids_in_use;
 
 	/* TIDs in the TCAM */
-	u32 tids_in_use;
+	RTE_ATOMIC(u32) tids_in_use;
 	/* TIDs in the HASH */
-	u32 hash_tids_in_use;
-	u32 conns_in_use;
+	RTE_ATOMIC(u32) hash_tids_in_use;
+	RTE_ATOMIC(u32) conns_in_use;
 
 	rte_spinlock_t atid_lock __rte_cache_aligned;
 	rte_spinlock_t ftid_lock;
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
index 21f4019..ecb5fec 100644
--- a/drivers/net/cxgbe/l2t.c
+++ b/drivers/net/cxgbe/l2t.c
@@ -14,8 +14,8 @@ 
  */
 void cxgbe_l2t_release(struct l2t_entry *e)
 {
-	if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
-		__atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+	if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+		rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
 }
 
 /**
@@ -112,7 +112,7 @@  static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
 	struct l2t_entry *first_free = NULL;
 
 	for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
-		if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
 			if (!first_free)
 				first_free = e;
 		} else {
@@ -151,18 +151,18 @@  static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
 	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
 	if (e) {
 		t4_os_lock(&e->lock);
-		if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
 			e->state = L2T_STATE_SWITCHING;
 			e->vlan = vlan;
 			e->lport = port;
 			rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
-			__atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
 			ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
 			if (ret < 0)
 				dev_debug(adap, "Failed to write L2T entry: %d",
 					  ret);
 		} else {
-			__atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
 		}
 		t4_os_unlock(&e->lock);
 	}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
index e4c0ebe..67d0197 100644
--- a/drivers/net/cxgbe/l2t.h
+++ b/drivers/net/cxgbe/l2t.h
@@ -30,7 +30,7 @@  struct l2t_entry {
 	u8  lport;                  /* destination port */
 	u8  dmac[RTE_ETHER_ADDR_LEN];   /* destination MAC address */
 	rte_spinlock_t lock;        /* entry lock */
-	u32 refcnt;                 /* entry reference count */
+	RTE_ATOMIC(u32) refcnt;                 /* entry reference count */
 };
 
 struct l2t_data {
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 8e0da9c..79a7daa 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -76,7 +76,7 @@  int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
 	t4_os_write_lock(&mpstcam->lock);
 	entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
 	if (entry) {
-		__atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
 		t4_os_write_unlock(&mpstcam->lock);
 		return entry->idx;
 	}
@@ -98,7 +98,7 @@  int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
 	entry = &mpstcam->entry[ret];
 	memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
 	memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
-	__atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
 	entry->state = MPS_ENTRY_USED;
 
 	if (cxgbe_update_free_idx(mpstcam))
@@ -147,7 +147,7 @@  int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
 	 * provided value is -1
 	 */
 	if (entry->state == MPS_ENTRY_UNUSED) {
-		__atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
 		entry->state = MPS_ENTRY_USED;
 	}
 
@@ -165,7 +165,7 @@  static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
 {
 	memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
 	memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
-	__atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
 	entry->state = MPS_ENTRY_UNUSED;
 }
 
@@ -190,12 +190,13 @@  int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
 		return -EINVAL;
 	}
 
-	if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+	if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
 		ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
 					   entry->mask, idx, 1, pi->port_id,
 					   false);
 	else
-		ret = __atomic_fetch_sub(&entry->refcnt, 1, __ATOMIC_RELAXED) - 1;
+		ret = rte_atomic_fetch_sub_explicit(&entry->refcnt, 1,
+		    rte_memory_order_relaxed) - 1;
 
 	if (ret == 0) {
 		reset_mpstcam_entry(entry);
@@ -222,7 +223,7 @@  int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
 	t4_os_write_lock(&t->lock);
 	rawf_idx = adap->params.rawf_start + pi->port_id;
 	entry = &t->entry[rawf_idx];
-	if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+	if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
 		goto out_unlock;
 
 	ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -231,7 +232,7 @@  int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
 	if (ret < 0)
 		goto out_unlock;
 
-	__atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
 
 out_unlock:
 	t4_os_write_unlock(&t->lock);
@@ -253,7 +254,7 @@  int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
 	t4_os_write_lock(&t->lock);
 	rawf_idx = adap->params.rawf_start + pi->port_id;
 	entry = &t->entry[rawf_idx];
-	if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+	if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) != 1)
 		goto out_unlock;
 
 	ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -262,7 +263,7 @@  int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
 	if (ret < 0)
 		goto out_unlock;
 
-	__atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
 
 out_unlock:
 	t4_os_write_unlock(&t->lock);
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
index 363786b..4b421f7 100644
--- a/drivers/net/cxgbe/mps_tcam.h
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -29,7 +29,7 @@  struct mps_tcam_entry {
 	u8 mask[RTE_ETHER_ADDR_LEN];
 
 	struct mpstcam_table *mpstcam; /* backptr */
-	u32 refcnt;
+	RTE_ATOMIC(u32) refcnt;
 };
 
 struct mpstcam_table {
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index 4e14a73..2f961c1 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -119,7 +119,7 @@  static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
 	struct smt_entry *e, *end, *first_free = NULL;
 
 	for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
-		if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
 			if (!first_free)
 				first_free = e;
 		} else {
@@ -156,7 +156,7 @@  static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
 	e = find_or_alloc_smte(s, smac);
 	if (e) {
 		t4_os_lock(&e->lock);
-		if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
 			e->pfvf = pfvf;
 			rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
 			ret = write_smt_entry(dev, e);
@@ -168,9 +168,9 @@  static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
 				goto out_write_unlock;
 			}
 			e->state = SMT_STATE_SWITCHING;
-			__atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
 		} else {
-			__atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+			rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
 		}
 		t4_os_unlock(&e->lock);
 	}
@@ -195,8 +195,8 @@  struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
 
 void cxgbe_smt_release(struct smt_entry *e)
 {
-	if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
-		__atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+	if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+		rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
 }
 
 /**
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index 531810e..8b378ae 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -23,7 +23,7 @@  struct smt_entry {
 	u16 pfvf;
 	u16 hw_idx;
 	u8 src_mac[RTE_ETHER_ADDR_LEN];
-	u32 refcnt;
+	RTE_ATOMIC(u32) refcnt;
 	rte_spinlock_t lock;
 };