[v4,2/6] dma/idxd: replace rte atomics with GCC builtin atomics

Message ID 1685735107-19208-3-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series replace rte atomics with GCC builtin atomics |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Tyler Retzlaff June 2, 2023, 7:45 p.m. UTC
  Replace the use of rte_atomic.h types and functions, instead use GCC
supplied C++11 memory model builtins.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Kevin Laatz <kevin.laatz@intel.com>
---
 drivers/dma/idxd/idxd_internal.h |  3 +--
 drivers/dma/idxd/idxd_pci.c      | 11 ++++++-----
 2 files changed, 7 insertions(+), 7 deletions(-)
  

Patch

diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index 180a858..cd41777 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -7,7 +7,6 @@ 
 
 #include <rte_dmadev_pmd.h>
 #include <rte_spinlock.h>
-#include <rte_atomic.h>
 
 #include "idxd_hw_defs.h"
 
@@ -34,7 +33,7 @@  struct idxd_pci_common {
 	rte_spinlock_t lk;
 
 	uint8_t wq_cfg_sz;
-	rte_atomic16_t ref_count;
+	uint16_t ref_count;
 	volatile struct rte_idxd_bar0 *regs;
 	volatile uint32_t *wq_regs_base;
 	volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 5e56240..3696c7f 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -6,7 +6,6 @@ 
 #include <rte_devargs.h>
 #include <rte_dmadev_pmd.h>
 #include <rte_malloc.h>
-#include <rte_atomic.h>
 
 #include "idxd_internal.h"
 
@@ -136,7 +135,8 @@ 
 	/* if this is the last WQ on the device, disable the device and free
 	 * the PCI struct
 	 */
-	is_last_wq = rte_atomic16_dec_and_test(&idxd->u.pci->ref_count);
+	/* NOTE: review for potential ordering optimization */
+	is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, __ATOMIC_SEQ_CST) == 1);
 	if (is_last_wq) {
 		/* disable the device */
 		err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -322,8 +322,9 @@ 
 			return ret;
 		}
 		qid = rte_dma_get_dev_id_by_name(qname);
-		max_qid = rte_atomic16_read(
-			&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count);
+		max_qid = __atomic_load_n(
+			&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
+			__ATOMIC_SEQ_CST);
 
 		/* we have queue 0 done, now configure the rest of the queues */
 		for (qid = 1; qid < max_qid; qid++) {
@@ -380,7 +381,7 @@ 
 				free(idxd.u.pci);
 			return ret;
 		}
-		rte_atomic16_inc(&idxd.u.pci->ref_count);
+		__atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
 	}
 
 	return 0;