test/mcslock: remove unneeded per-lcore copy
diff mbox series

Message ID 20201104170425.8882-1-olivier.matz@6wind.com
State Deferred
Delegated to: David Marchand
Headers show
Series
  • test/mcslock: remove unneeded per-lcore copy
Related show

Checks

Context Check Description
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional fail Functional Testing issues
ci/iol-testing success Testing PASS
ci/iol-broadcom-Functional fail Functional Testing issues
ci/travis-robot success Travis build: passed
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Olivier Matz Nov. 4, 2020, 5:04 p.m. UTC
Each core already comes with its local storage for mcslock (in its
stack), therefore there is no need to define an additional per-lcore
mcslock.

Fixes: 32dcb9fd2a22 ("test/mcslock: add MCS queued lock unit test")

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
---
 app/test/test_mcslock.c | 16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)

Comments

Honnappa Nagarahalli Nov. 4, 2020, 5:57 p.m. UTC | #1
<snip>

> 
> Each core already comes with its local storage for mcslock (in its stack),
> therefore there is no need to define an additional per-lcore mcslock.
> 
> Fixes: 32dcb9fd2a22 ("test/mcslock: add MCS queued lock unit test")
> 
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> ---
>  app/test/test_mcslock.c | 16 ++++++----------
>  1 file changed, 6 insertions(+), 10 deletions(-)
> 
> diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c index
> fbca78707d..80eaecc90a 100644
> --- a/app/test/test_mcslock.c
> +++ b/app/test/test_mcslock.c
> @@ -37,10 +37,6 @@
>   *   lock multiple times.
>   */
> 
> -RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me); -
> RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me); -
> RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me);
> -
>  rte_mcslock_t *p_ml;
>  rte_mcslock_t *p_ml_try;
>  rte_mcslock_t *p_ml_perf;
> @@ -53,7 +49,7 @@ static int
>  test_mcslock_per_core(__rte_unused void *arg)  {
>  	/* Per core me node. */
> -	rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
> +	rte_mcslock_t ml_me;
These variables are modified by other threads. IMO, it is better to keep them global (and not on the stack). From that perspective, I think we should be taking the address of the per lcore variable. For ex:
rte_mcslock_t *ml_me = &RTE_PER_LCORE(_ml_me);

> 
>  	rte_mcslock_lock(&p_ml, &ml_me);
>  	printf("MCS lock taken on core %u\n", rte_lcore_id()); @@ -77,7
> +73,7 @@ load_loop_fn(void *func_param)
>  	const unsigned int lcore = rte_lcore_id();
> 
>  	/**< Per core me node. */
> -	rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me);
> +	rte_mcslock_t ml_perf_me;
> 
>  	/* wait synchro */
>  	while (rte_atomic32_read(&synchro) == 0) @@ -151,8 +147,8 @@
> static int  test_mcslock_try(__rte_unused void *arg)  {
>  	/**< Per core me node. */
> -	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
> -	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
> +	rte_mcslock_t ml_me;
> +	rte_mcslock_t ml_try_me;
> 
>  	/* Locked ml_try in the main lcore, so it should fail
>  	 * when trying to lock it in the worker lcore.
> @@ -178,8 +174,8 @@ test_mcslock(void)
>  	int i;
> 
>  	/* Define per core me node. */
> -	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
> -	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
> +	rte_mcslock_t ml_me;
> +	rte_mcslock_t ml_try_me;
> 
>  	/*
>  	 * Test mcs lock & unlock on each core
> --
> 2.25.1
Olivier Matz Nov. 4, 2020, 9:03 p.m. UTC | #2
Hi Honnappa,

On Wed, Nov 04, 2020 at 05:57:19PM +0000, Honnappa Nagarahalli wrote:
> <snip>
> 
> > 
> > Each core already comes with its local storage for mcslock (in its stack),
> > therefore there is no need to define an additional per-lcore mcslock.
> > 
> > Fixes: 32dcb9fd2a22 ("test/mcslock: add MCS queued lock unit test")
> > 
> > Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> > ---
> >  app/test/test_mcslock.c | 16 ++++++----------
> >  1 file changed, 6 insertions(+), 10 deletions(-)
> > 
> > diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c index
> > fbca78707d..80eaecc90a 100644
> > --- a/app/test/test_mcslock.c
> > +++ b/app/test/test_mcslock.c
> > @@ -37,10 +37,6 @@
> >   *   lock multiple times.
> >   */
> > 
> > -RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me); -
> > RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me); -
> > RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me);
> > -
> >  rte_mcslock_t *p_ml;
> >  rte_mcslock_t *p_ml_try;
> >  rte_mcslock_t *p_ml_perf;
> > @@ -53,7 +49,7 @@ static int
> >  test_mcslock_per_core(__rte_unused void *arg)  {
> >  	/* Per core me node. */
> > -	rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
> > +	rte_mcslock_t ml_me;
> These variables are modified by other threads. IMO, it is better to keep them global (and not on the stack). From that perspective, I think we should be taking the address of the per lcore variable. For ex:
> rte_mcslock_t *ml_me = &RTE_PER_LCORE(_ml_me);

In my understanding, the only case where another thread modifies our
local variable is when the other thread releases the lock we are waiting
for.  I can't see how it could cause an issue to have the locks in the
stack. Am I missing something?

Thanks,
Olivier


> 
> > 
> >  	rte_mcslock_lock(&p_ml, &ml_me);
> >  	printf("MCS lock taken on core %u\n", rte_lcore_id()); @@ -77,7
> > +73,7 @@ load_loop_fn(void *func_param)
> >  	const unsigned int lcore = rte_lcore_id();
> > 
> >  	/**< Per core me node. */
> > -	rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me);
> > +	rte_mcslock_t ml_perf_me;
> > 
> >  	/* wait synchro */
> >  	while (rte_atomic32_read(&synchro) == 0) @@ -151,8 +147,8 @@
> > static int  test_mcslock_try(__rte_unused void *arg)  {
> >  	/**< Per core me node. */
> > -	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
> > -	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
> > +	rte_mcslock_t ml_me;
> > +	rte_mcslock_t ml_try_me;
> > 
> >  	/* Locked ml_try in the main lcore, so it should fail
> >  	 * when trying to lock it in the worker lcore.
> > @@ -178,8 +174,8 @@ test_mcslock(void)
> >  	int i;
> > 
> >  	/* Define per core me node. */
> > -	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
> > -	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
> > +	rte_mcslock_t ml_me;
> > +	rte_mcslock_t ml_try_me;
> > 
> >  	/*
> >  	 * Test mcs lock & unlock on each core
> > --
> > 2.25.1
>
Honnappa Nagarahalli Nov. 4, 2020, 9:20 p.m. UTC | #3
<snip>

> > >
> > > Each core already comes with its local storage for mcslock (in its
> > > stack), therefore there is no need to define an additional per-lcore
> mcslock.
> > >
> > > Fixes: 32dcb9fd2a22 ("test/mcslock: add MCS queued lock unit test")
> > >
> > > Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

> > > ---
> > >  app/test/test_mcslock.c | 16 ++++++----------
> > >  1 file changed, 6 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c index
> > > fbca78707d..80eaecc90a 100644
> > > --- a/app/test/test_mcslock.c
> > > +++ b/app/test/test_mcslock.c
> > > @@ -37,10 +37,6 @@
> > >   *   lock multiple times.
> > >   */
> > >
> > > -RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me); -
> > > RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me); -
> > > RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me);
> > > -
> > >  rte_mcslock_t *p_ml;
> > >  rte_mcslock_t *p_ml_try;
> > >  rte_mcslock_t *p_ml_perf;
> > > @@ -53,7 +49,7 @@ static int
> > >  test_mcslock_per_core(__rte_unused void *arg)  {
> > >  	/* Per core me node. */
> > > -	rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
> > > +	rte_mcslock_t ml_me;
> > These variables are modified by other threads. IMO, it is better to keep
> them global (and not on the stack). From that perspective, I think we should
> be taking the address of the per lcore variable. For ex:
> > rte_mcslock_t *ml_me = &RTE_PER_LCORE(_ml_me);
> 
> In my understanding, the only case where another thread modifies our local
> variable is when the other thread releases the lock we are waiting for.  I can't
> see how it could cause an issue to have the locks in the stack. Am I missing
> something?
Agree, it was just my personal preference. I am fine with the patch.

> 
> Thanks,
> Olivier
> 
> 
> >
> > >
> > >  	rte_mcslock_lock(&p_ml, &ml_me);
> > >  	printf("MCS lock taken on core %u\n", rte_lcore_id()); @@ -77,7
> > > +73,7 @@ load_loop_fn(void *func_param)
> > >  	const unsigned int lcore = rte_lcore_id();
> > >
> > >  	/**< Per core me node. */
> > > -	rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me);
> > > +	rte_mcslock_t ml_perf_me;
> > >
> > >  	/* wait synchro */
> > >  	while (rte_atomic32_read(&synchro) == 0) @@ -151,8 +147,8 @@
> > > static int  test_mcslock_try(__rte_unused void *arg)  {
> > >  	/**< Per core me node. */
> > > -	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
> > > -	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
> > > +	rte_mcslock_t ml_me;
> > > +	rte_mcslock_t ml_try_me;
> > >
> > >  	/* Locked ml_try in the main lcore, so it should fail
> > >  	 * when trying to lock it in the worker lcore.
> > > @@ -178,8 +174,8 @@ test_mcslock(void)
> > >  	int i;
> > >
> > >  	/* Define per core me node. */
> > > -	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
> > > -	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
> > > +	rte_mcslock_t ml_me;
> > > +	rte_mcslock_t ml_try_me;
> > >
> > >  	/*
> > >  	 * Test mcs lock & unlock on each core
> > > --
> > > 2.25.1
> >

Patch
diff mbox series

diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index fbca78707d..80eaecc90a 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -37,10 +37,6 @@ 
  *   lock multiple times.
  */
 
-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me);
-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me);
-RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me);
-
 rte_mcslock_t *p_ml;
 rte_mcslock_t *p_ml_try;
 rte_mcslock_t *p_ml_perf;
@@ -53,7 +49,7 @@  static int
 test_mcslock_per_core(__rte_unused void *arg)
 {
 	/* Per core me node. */
-	rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
+	rte_mcslock_t ml_me;
 
 	rte_mcslock_lock(&p_ml, &ml_me);
 	printf("MCS lock taken on core %u\n", rte_lcore_id());
@@ -77,7 +73,7 @@  load_loop_fn(void *func_param)
 	const unsigned int lcore = rte_lcore_id();
 
 	/**< Per core me node. */
-	rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me);
+	rte_mcslock_t ml_perf_me;
 
 	/* wait synchro */
 	while (rte_atomic32_read(&synchro) == 0)
@@ -151,8 +147,8 @@  static int
 test_mcslock_try(__rte_unused void *arg)
 {
 	/**< Per core me node. */
-	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
-	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
+	rte_mcslock_t ml_me;
+	rte_mcslock_t ml_try_me;
 
 	/* Locked ml_try in the main lcore, so it should fail
 	 * when trying to lock it in the worker lcore.
@@ -178,8 +174,8 @@  test_mcslock(void)
 	int i;
 
 	/* Define per core me node. */
-	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
-	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
+	rte_mcslock_t ml_me;
+	rte_mcslock_t ml_try_me;
 
 	/*
 	 * Test mcs lock & unlock on each core