From patchwork Fri Jun 4 09:46:17 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93864 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D8526A0524; Fri, 4 Jun 2021 11:46:53 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6834D410E8; Fri, 4 Jun 2021 11:46:51 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 52100410E0 for ; Fri, 4 Jun 2021 11:46:49 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id B68D91063; Fri, 4 Jun 2021 02:46:48 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id DC5E03F73D; Fri, 4 Jun 2021 02:46:45 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:17 -0500 Message-Id: <20210604094624.31308-2-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 1/8] test/ticketlock: use GCC atomic builtins for lcores sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Convert rte_atomic usages to GCC atomic builtins for lcores sync in ticketlock testcases. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang Reviewed-by: Honnappa Nagarahalli --- app/test/test_ticketlock.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c index 7aab8665b..9aa212fa9 100644 --- a/app/test/test_ticketlock.c +++ b/app/test/test_ticketlock.c @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -49,7 +48,7 @@ static rte_ticketlock_t tl_tab[RTE_MAX_LCORE]; static rte_ticketlock_recursive_t tlr; static unsigned int count; -static rte_atomic32_t synchro; +static uint32_t synchro; static int test_ticketlock_per_core(__rte_unused void *arg) @@ -112,7 +111,7 @@ load_loop_fn(void *func_param) /* wait synchro for workers */ if (lcore != rte_get_main_lcore()) - while (rte_atomic32_read(&synchro) == 0) + while (__atomic_load_n(&synchro, __ATOMIC_RELAXED) == 0) ; begin = rte_rdtsc_precise(); @@ -155,11 +154,11 @@ test_ticketlock_perf(void) printf("\nTest with lock on %u cores...\n", rte_lcore_count()); /* Clear synchro and start workers */ - rte_atomic32_set(&synchro, 0); + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN); /* start synchro and launch test on main */ - rte_atomic32_set(&synchro, 1); + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); load_loop_fn(&lock); rte_eal_mp_wait_lcore(); From patchwork Fri Jun 4 09:46:18 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93865 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2ED2DA0524; Fri, 4 Jun 2021 11:47:02 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DF6A240147; Fri, 4 Jun 2021 11:46:53 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 92F0F410F0 for ; Fri, 4 Jun 2021 11:46:52 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 14DA51063; Fri, 4 Jun 2021 02:46:52 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 3B4AF3F73D; Fri, 4 Jun 2021 02:46:48 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:18 -0500 Message-Id: <20210604094624.31308-3-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 2/8] test/spinlock: use GCC atomic builtins for lcores sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Convert rte_atomic usages to GCC atomic builtins for lcores sync in spinlock testcases. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang --- app/test/test_spinlock.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c index 054fb43a9..77b9b7086 100644 --- a/app/test/test_spinlock.c +++ b/app/test/test_spinlock.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "test.h" @@ -49,7 +48,7 @@ static rte_spinlock_t sl_tab[RTE_MAX_LCORE]; static rte_spinlock_recursive_t slr; static unsigned count = 0; -static rte_atomic32_t synchro; +static uint32_t synchro; static int test_spinlock_per_core(__rte_unused void *arg) @@ -111,7 +110,8 @@ load_loop_fn(void *func_param) /* wait synchro for workers */ if (lcore != rte_get_main_lcore()) - while (rte_atomic32_read(&synchro) == 0); + while (__atomic_load_n(&synchro, __ATOMIC_RELAXED) == 0) + ; begin = rte_get_timer_cycles(); while (lcount < MAX_LOOP) { @@ -150,11 +150,11 @@ test_spinlock_perf(void) printf("\nTest with lock on %u cores...\n", rte_lcore_count()); /* Clear synchro and start workers */ - rte_atomic32_set(&synchro, 0); + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN); /* start synchro and launch test on main */ - rte_atomic32_set(&synchro, 1); + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); load_loop_fn(&lock); rte_eal_mp_wait_lcore(); From patchwork Fri Jun 4 09:46:19 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93866 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5CD90A0524; Fri, 4 Jun 2021 11:47:08 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0EE81410FA; Fri, 4 Jun 2021 11:46:57 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id D4112410FA for ; Fri, 4 Jun 2021 11:46:55 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 68E411063; Fri, 4 Jun 2021 02:46:55 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 8E8D33F73D; Fri, 4 Jun 2021 02:46:52 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:19 -0500 Message-Id: <20210604094624.31308-4-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 3/8] test/rwlock: use GCC atomic builtins for lcores sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Convert rte_atomic usages to GCC atomic builtins for lcores sync in rwlock testcases. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang --- app/test/test_rwlock.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c index b47150a86..ef89ae44c 100644 --- a/app/test/test_rwlock.c +++ b/app/test/test_rwlock.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +35,7 @@ static rte_rwlock_t sl; static rte_rwlock_t sl_tab[RTE_MAX_LCORE]; -static rte_atomic32_t synchro; +static uint32_t synchro; enum { LC_TYPE_RDLOCK, @@ -102,7 +101,7 @@ load_loop_fn(__rte_unused void *arg) /* wait synchro for workers */ if (lcore != rte_get_main_lcore()) - while (rte_atomic32_read(&synchro) == 0) + while (__atomic_load_n(&synchro, __ATOMIC_RELAXED) == 0) ; begin = rte_rdtsc_precise(); @@ -136,12 +135,12 @@ test_rwlock_perf(void) printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count()); /* clear synchro and start workers */ - rte_atomic32_set(&synchro, 0); + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0) return -1; /* start synchro and launch test on main */ - rte_atomic32_set(&synchro, 1); + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); load_loop_fn(NULL); rte_eal_mp_wait_lcore(); From patchwork Fri Jun 4 09:46:20 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93867 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E7345A0524; Fri, 4 Jun 2021 11:47:13 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 323CC410E0; Fri, 4 Jun 2021 11:47:00 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 489C1410E0 for ; Fri, 4 Jun 2021 11:46:59 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id BC4311063; Fri, 4 Jun 2021 02:46:58 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id E21C63F73D; Fri, 4 Jun 2021 02:46:55 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:20 -0500 Message-Id: <20210604094624.31308-5-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 4/8] test/mcslock: use GCC atomic builtins for lcores sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Convert rte_atomic usages to GCC atomic builtins for lcores sync in mcslock testcases. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang --- app/test/test_mcslock.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c index 80eaecc90..e6bdeb966 100644 --- a/app/test/test_mcslock.c +++ b/app/test/test_mcslock.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "test.h" @@ -43,7 +42,7 @@ rte_mcslock_t *p_ml_perf; static unsigned int count; -static rte_atomic32_t synchro; +static uint32_t synchro; static int test_mcslock_per_core(__rte_unused void *arg) @@ -76,7 +75,7 @@ load_loop_fn(void *func_param) rte_mcslock_t ml_perf_me; /* wait synchro */ - while (rte_atomic32_read(&synchro) == 0) + while (__atomic_load_n(&synchro, __ATOMIC_RELAXED) == 0) ; begin = rte_get_timer_cycles(); @@ -102,15 +101,15 @@ test_mcslock_perf(void) const unsigned int lcore = rte_lcore_id(); printf("\nTest with no lock on single core...\n"); - rte_atomic32_set(&synchro, 1); + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); load_loop_fn(&lock); printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, time_count[lcore]); memset(time_count, 0, sizeof(time_count)); printf("\nTest with lock on single core...\n"); + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); lock = 1; - rte_atomic32_set(&synchro, 1); load_loop_fn(&lock); printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, time_count[lcore]); @@ -118,11 +117,11 @@ test_mcslock_perf(void) printf("\nTest with lock on %u cores...\n", (rte_lcore_count())); - rte_atomic32_set(&synchro, 0); + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN); /* start synchro and launch test on main */ - rte_atomic32_set(&synchro, 1); + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); load_loop_fn(&lock); rte_eal_mp_wait_lcore(); From patchwork Fri Jun 4 09:46:21 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93868 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9E495A0524; Fri, 4 Jun 2021 11:47:19 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5A18041102; Fri, 4 Jun 2021 11:47:04 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 9E09541102 for ; Fri, 4 Jun 2021 11:47:02 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 2477E1063; Fri, 4 Jun 2021 02:47:02 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 418C53F73D; Fri, 4 Jun 2021 02:46:59 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:21 -0500 Message-Id: <20210604094624.31308-6-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 5/8] test/mempool: remove unused variable for lcores sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Remove the unused synchro variable as there is no lcores sync in mempool function test. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang --- app/test/test_mempool.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index 3adadd673..7675a3e60 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -57,8 +56,6 @@ goto label; \ } while (0) -static rte_atomic32_t synchro; - /* * save the object number in the first 4 bytes of object data. All * other bytes are set to 0. @@ -491,8 +488,6 @@ test_mempool(void) }; const char *default_pool_ops = rte_mbuf_best_mempool_ops(); - rte_atomic32_init(&synchro); - /* create a mempool (without cache) */ mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE, MEMPOOL_ELT_SIZE, 0, 0, From patchwork Fri Jun 4 09:46:22 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93869 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 73BB4A0524; Fri, 4 Jun 2021 11:47:25 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9DFF0410EE; Fri, 4 Jun 2021 11:47:07 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id F094640E78 for ; Fri, 4 Jun 2021 11:47:05 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 78ACA1063; Fri, 4 Jun 2021 02:47:05 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 9E0203F73D; Fri, 4 Jun 2021 02:47:02 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:22 -0500 Message-Id: <20210604094624.31308-7-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 6/8] test/mempool_perf: use GCC atomic builtins for lcores sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Convert rte_atomic usages to GCC atomic builtins for lcores sync in mempool_perf testcases. Meanwhile, remove unnecessary synchro init as it would be set to 0 when launching cores. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang --- app/test/test_mempool_perf.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c index d7d0aaa33..9271378aa 100644 --- a/app/test/test_mempool_perf.c +++ b/app/test/test_mempool_perf.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -83,7 +82,7 @@ static int use_external_cache; static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; -static rte_atomic32_t synchro; +static uint32_t synchro; /* number of objects in one bulk operation (get or put) */ static unsigned n_get_bulk; @@ -145,7 +144,8 @@ per_lcore_mempool_test(void *arg) /* wait synchro for workers */ if (lcore_id != rte_get_main_lcore()) - while (rte_atomic32_read(&synchro) == 0); + while (__atomic_load_n(&synchro, __ATOMIC_RELAXED) == 0) + ; start_cycles = rte_get_timer_cycles(); @@ -198,7 +198,7 @@ launch_cores(struct rte_mempool *mp, unsigned int cores) int ret; unsigned cores_save = cores; - rte_atomic32_set(&synchro, 0); + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); /* reset stats */ memset(stats, 0, sizeof(stats)); @@ -223,7 +223,7 @@ launch_cores(struct rte_mempool *mp, unsigned int cores) } /* start synchro and launch test on main */ - rte_atomic32_set(&synchro, 1); + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); ret = per_lcore_mempool_test(mp); @@ -288,8 +288,6 @@ test_mempool_perf(void) const char *default_pool_ops; int ret = -1; - rte_atomic32_init(&synchro); - /* create a mempool (without cache) */ mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE, MEMPOOL_ELT_SIZE, 0, 0, From patchwork Fri Jun 4 09:46:23 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93870 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1C48BA0524; Fri, 4 Jun 2021 11:47:31 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B8C724111F; Fri, 4 Jun 2021 11:47:10 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 492364111E for ; Fri, 4 Jun 2021 11:47:09 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id CB6301063; Fri, 4 Jun 2021 02:47:08 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id F1D333F73D; Fri, 4 Jun 2021 02:47:05 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:23 -0500 Message-Id: <20210604094624.31308-8-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 7/8] test/service_cores: use GCC atomic builtins for lock sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Convert rte_atomic usages to GCC atomic builtins for lock sync in service core testcases. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang --- app/test/test_service_cores.c | 36 +++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c index 37d7172d5..9d908d44e 100644 --- a/app/test/test_service_cores.c +++ b/app/test/test_service_cores.c @@ -53,18 +53,20 @@ static int32_t dummy_cb(void *args) static int32_t dummy_mt_unsafe_cb(void *args) { /* before running test, the initialization has set pass_test to 1. - * If the cmpset in service-cores is working correctly, the code here + * If the cas in service-cores is working correctly, the code here * should never fail to take the lock. If the lock *is* taken, fail the * test, because two threads are concurrently in a non-MT safe callback. */ uint32_t *test_params = args; - uint32_t *atomic_lock = &test_params[0]; + uint32_t *lock = &test_params[0]; uint32_t *pass_test = &test_params[1]; - int lock_taken = rte_atomic32_cmpset(atomic_lock, 0, 1); + uint32_t exp = 0; + int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); if (lock_taken) { /* delay with the lock held */ rte_delay_ms(250); - rte_atomic32_clear((rte_atomic32_t *)atomic_lock); + __atomic_store_n(lock, 0, __ATOMIC_RELAXED); } else { /* 2nd thread will fail to take lock, so set pass flag */ *pass_test = 0; @@ -83,13 +85,15 @@ static int32_t dummy_mt_safe_cb(void *args) * that 2 threads are running the callback at the same time: MT safe */ uint32_t *test_params = args; - uint32_t *atomic_lock = &test_params[0]; + uint32_t *lock = &test_params[0]; uint32_t *pass_test = &test_params[1]; - int lock_taken = rte_atomic32_cmpset(atomic_lock, 0, 1); + uint32_t exp = 0; + int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); if (lock_taken) { /* delay with the lock held */ rte_delay_ms(250); - rte_atomic32_clear((rte_atomic32_t *)atomic_lock); + __atomic_store_n(lock, 0, __ATOMIC_RELAXED); } else { /* 2nd thread will fail to take lock, so set pass flag */ *pass_test = 1; @@ -622,9 +626,9 @@ service_threaded_test(int mt_safe) TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2), "mt safe lcore add fail"); - /* Use atomic locks to verify that two threads are in the same function - * at the same time. These are passed to the unit tests through the - * callback userdata parameter + /* Use locks to verify that two threads are in the same function + * at the same time. These are passed to the unit tests through + * the callback userdata parameter. */ uint32_t test_params[2]; memset(test_params, 0, sizeof(uint32_t) * 2); @@ -713,7 +717,7 @@ service_mt_safe_poll(void) } /* tests a NON mt safe service with two cores, the callback is serialized - * using the atomic cmpset. + * using the cas. */ static int service_mt_unsafe_poll(void) @@ -735,17 +739,17 @@ delay_as_a_mt_safe_service(void *args) RTE_SET_USED(args); uint32_t *params = args; - /* retrieve done flag and atomic lock to inc/dec */ + /* retrieve done flag and lock to add/sub */ uint32_t *done = ¶ms[0]; - rte_atomic32_t *lock = (rte_atomic32_t *)¶ms[1]; + uint32_t *lock = ¶ms[1]; while (!*done) { - rte_atomic32_inc(lock); + __atomic_add_fetch(lock, 1, __ATOMIC_RELAXED); rte_delay_us(500); - if (rte_atomic32_read(lock) > 1) + if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1) /* pass: second core has simultaneously incremented */ *done = 1; - rte_atomic32_dec(lock); + __atomic_sub_fetch(lock, 1, __ATOMIC_RELAXED); } return 0; From patchwork Fri Jun 4 09:46:24 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joyce Kong X-Patchwork-Id: 93871 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E8A1FA0524; Fri, 4 Jun 2021 11:47:36 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id ED4C2410FB; Fri, 4 Jun 2021 11:47:14 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 954A8410EF for ; Fri, 4 Jun 2021 11:47:12 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 2A5C81063; Fri, 4 Jun 2021 02:47:12 -0700 (PDT) Received: from net-arm-n1sdp.shanghai.arm.com (net-arm-n1sdp.shanghai.arm.com [10.169.208.222]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 50B2F3F73D; Fri, 4 Jun 2021 02:47:09 -0700 (PDT) From: Joyce Kong To: thomas@monjalon.net, david.marchand@redhat.com, olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, harry.van.haaren@intel.com, honnappa.nagarahalli@arm.com, ruifeng.wang@arm.com Cc: dev@dpdk.org, nd@arm.com Date: Fri, 4 Jun 2021 04:46:24 -0500 Message-Id: <20210604094624.31308-9-joyce.kong@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210604094624.31308-1-joyce.kong@arm.com> References: <20210604094624.31308-1-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v1 8/8] test/rcu_perf: use GCC atomic builtins for data sync X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Covert rte_atomic usages to GCC atomic builtins in rcu_perf testcases. Signed-off-by: Joyce Kong Reviewed-by: Ruifeng Wang --- app/test/test_rcu_qsbr_perf.c | 98 +++++++++++++++++------------------ 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c index 3017e7112..cf7b158d2 100644 --- a/app/test/test_rcu_qsbr_perf.c +++ b/app/test/test_rcu_qsbr_perf.c @@ -30,8 +30,8 @@ static volatile uint32_t thr_id; static struct rte_rcu_qsbr *t[RTE_MAX_LCORE]; static struct rte_hash *h; static char hash_name[8]; -static rte_atomic64_t updates, checks; -static rte_atomic64_t update_cycles, check_cycles; +static uint64_t updates, checks; +static uint64_t update_cycles, check_cycles; /* Scale down results to 1000 operations to support lower * granularity clocks. @@ -81,8 +81,8 @@ test_rcu_qsbr_reader_perf(void *arg) } cycles = rte_rdtsc_precise() - begin; - rte_atomic64_add(&update_cycles, cycles); - rte_atomic64_add(&updates, loop_cnt); + __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED); + __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED); /* Make the thread offline */ rte_rcu_qsbr_thread_offline(t[0], thread_id); @@ -113,8 +113,8 @@ test_rcu_qsbr_writer_perf(void *arg) } while (loop_cnt < 20000000); cycles = rte_rdtsc_precise() - begin; - rte_atomic64_add(&check_cycles, cycles); - rte_atomic64_add(&checks, loop_cnt); + __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED); + __atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED); return 0; } @@ -130,10 +130,10 @@ test_rcu_qsbr_perf(void) writer_done = 0; - rte_atomic64_clear(&updates); - rte_atomic64_clear(&update_cycles); - rte_atomic64_clear(&checks); - rte_atomic64_clear(&check_cycles); + __atomic_store_n(&updates, 0, __ATOMIC_RELAXED); + __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED); + __atomic_store_n(&checks, 0, __ATOMIC_RELAXED); + __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED); printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n", num_cores - 1); @@ -168,15 +168,15 @@ test_rcu_qsbr_perf(void) rte_eal_mp_wait_lcore(); printf("Total quiescent state updates = %"PRIi64"\n", - rte_atomic64_read(&updates)); + __atomic_load_n(&updates, __ATOMIC_RELAXED)); printf("Cycles per %d quiescent state updates: %"PRIi64"\n", RCU_SCALE_DOWN, - rte_atomic64_read(&update_cycles) / - (rte_atomic64_read(&updates) / RCU_SCALE_DOWN)); - printf("Total RCU checks = %"PRIi64"\n", rte_atomic64_read(&checks)); + __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) / + (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN)); + printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED)); printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN, - rte_atomic64_read(&check_cycles) / - (rte_atomic64_read(&checks) / RCU_SCALE_DOWN)); + __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) / + (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN)); rte_free(t[0]); @@ -193,8 +193,8 @@ test_rcu_qsbr_rperf(void) size_t sz; unsigned int i, tmp_num_cores; - rte_atomic64_clear(&updates); - rte_atomic64_clear(&update_cycles); + __atomic_store_n(&updates, 0, __ATOMIC_RELAXED); + __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED); __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST); @@ -220,11 +220,11 @@ test_rcu_qsbr_rperf(void) rte_eal_mp_wait_lcore(); printf("Total quiescent state updates = %"PRIi64"\n", - rte_atomic64_read(&updates)); + __atomic_load_n(&updates, __ATOMIC_RELAXED)); printf("Cycles per %d quiescent state updates: %"PRIi64"\n", RCU_SCALE_DOWN, - rte_atomic64_read(&update_cycles) / - (rte_atomic64_read(&updates) / RCU_SCALE_DOWN)); + __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) / + (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN)); rte_free(t[0]); @@ -241,8 +241,8 @@ test_rcu_qsbr_wperf(void) size_t sz; unsigned int i; - rte_atomic64_clear(&checks); - rte_atomic64_clear(&check_cycles); + __atomic_store_n(&checks, 0, __ATOMIC_RELAXED); + __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED); __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST); @@ -266,10 +266,10 @@ test_rcu_qsbr_wperf(void) /* Wait until all readers have exited */ rte_eal_mp_wait_lcore(); - printf("Total RCU checks = %"PRIi64"\n", rte_atomic64_read(&checks)); + printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED)); printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN, - rte_atomic64_read(&check_cycles) / - (rte_atomic64_read(&checks) / RCU_SCALE_DOWN)); + __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) / + (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN)); rte_free(t[0]); @@ -317,8 +317,8 @@ test_rcu_qsbr_hash_reader(void *arg) } while (!writer_done); cycles = rte_rdtsc_precise() - begin; - rte_atomic64_add(&update_cycles, cycles); - rte_atomic64_add(&updates, loop_cnt); + __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED); + __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED); rte_rcu_qsbr_thread_unregister(temp, thread_id); @@ -389,10 +389,10 @@ test_rcu_qsbr_sw_sv_1qs(void) writer_done = 0; - rte_atomic64_clear(&updates); - rte_atomic64_clear(&update_cycles); - rte_atomic64_clear(&checks); - rte_atomic64_clear(&check_cycles); + __atomic_store_n(&updates, 0, __ATOMIC_RELAXED); + __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED); + __atomic_store_n(&checks, 0, __ATOMIC_RELAXED); + __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED); __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST); @@ -453,8 +453,8 @@ test_rcu_qsbr_sw_sv_1qs(void) } cycles = rte_rdtsc_precise() - begin; - rte_atomic64_add(&check_cycles, cycles); - rte_atomic64_add(&checks, i); + __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED); + __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED); writer_done = 1; @@ -467,12 +467,12 @@ test_rcu_qsbr_sw_sv_1qs(void) printf("Following numbers include calls to rte_hash functions\n"); printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n", - rte_atomic64_read(&update_cycles) / - rte_atomic64_read(&updates)); + __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) / + __atomic_load_n(&updates, __ATOMIC_RELAXED)); printf("Cycles per 1 check(start, check): %"PRIi64"\n\n", - rte_atomic64_read(&check_cycles) / - rte_atomic64_read(&checks)); + __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) / + __atomic_load_n(&checks, __ATOMIC_RELAXED)); rte_free(t[0]); @@ -511,7 +511,7 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void) printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores); - __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST); + __atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED); if (all_registered == 1) tmp_num_cores = num_cores; @@ -570,8 +570,8 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void) } cycles = rte_rdtsc_precise() - begin; - rte_atomic64_add(&check_cycles, cycles); - rte_atomic64_add(&checks, i); + __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED); + __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED); writer_done = 1; /* Wait and check return value from reader threads */ @@ -583,12 +583,12 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void) printf("Following numbers include calls to rte_hash functions\n"); printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n", - rte_atomic64_read(&update_cycles) / - rte_atomic64_read(&updates)); + __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) / + __atomic_load_n(&updates, __ATOMIC_RELAXED)); printf("Cycles per 1 check(start, check): %"PRIi64"\n\n", - rte_atomic64_read(&check_cycles) / - rte_atomic64_read(&checks)); + __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) / + __atomic_load_n(&checks, __ATOMIC_RELAXED)); rte_free(t[0]); @@ -619,10 +619,10 @@ test_rcu_qsbr_main(void) return TEST_SKIPPED; } - rte_atomic64_init(&updates); - rte_atomic64_init(&update_cycles); - rte_atomic64_init(&checks); - rte_atomic64_init(&check_cycles); + __atomic_store_n(&updates, 0, __ATOMIC_RELAXED); + __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED); + __atomic_store_n(&checks, 0, __ATOMIC_RELAXED); + __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED); num_cores = 0; RTE_LCORE_FOREACH_WORKER(core_id) {