[dpdk-dev,v4,3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64

Message ID 000101d1f21d$82985610$87c90230$@linux.vnet.ibm.com (mailing list archive)
State Not Applicable, archived
Headers

Commit Message

Chao Zhu Aug. 9, 2016, 9:07 a.m. UTC
  Gowrishankar,

Can you give more description about this patch? 
Thank you!

-----Original Message-----
From: Gowrishankar Muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com] 
Sent: 2016年8月6日 20:33
To: dev@dpdk.org
Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson
<bruce.richardson@intel.com>; Konstantin Ananyev
<konstantin.ananyev@intel.com>; Thomas Monjalon <thomas.monjalon@6wind.com>;
Cristian Dumitrescu <cristian.dumitrescu@intel.com>; Pradeep
<pradeep@us.ibm.com>; gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT
threads as in ppc64

From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>

offline lcore would still refer to original core id and this has to be
considered while creating cpu core mask.

Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
---
 config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
 examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
 examples/ip_pipeline/init.c                 |  4 ++++
 3 files changed, 5 insertions(+), 14 deletions(-)

 		rte_panic("Cannot create CPU core map\n");
--
1.9.1
  

Comments

Gowrishankar Aug. 9, 2016, 11:13 a.m. UTC | #1
Hi Chao,
Sure. Please find below one.

This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu
core map in powerpc with SMT off. cpu_core_map_compute_linux currently 
prepares
core mapping based on file existence in sysfs ie.

/sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
   /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id

These files do not exist for lcores which are offline for any reason (as in
powerpc, while SMT is off). In this situation, this function should further
continue preparing map for other online lcores instead of returning with -1
for a first unavailable lcore.

Also, in SMT=off scenario for powerpc, lcore ids can not be always 
indexed from
0 upto 'number of cores present' (/sys/devices/system/cpu/present). For 
eg, for
an online lcore 32, core_id returned in sysfs is 112 where online lcores are
10 (as in one configuration), hence sysfs lcore id can not be checked with
indexing lcore number before positioning lcore map array.

Thanks,
Gowrishankar

On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
> Gowrishankar,
>
> Can you give more description about this patch?
> Thank you!
>
> -----Original Message-----
> From: Gowrishankar Muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月6日 20:33
> To: dev@dpdk.org
> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson
> <bruce.richardson@intel.com>; Konstantin Ananyev
> <konstantin.ananyev@intel.com>; Thomas Monjalon <thomas.monjalon@6wind.com>;
> Cristian Dumitrescu <cristian.dumitrescu@intel.com>; Pradeep
> <pradeep@us.ibm.com>; gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT
> threads as in ppc64
>
> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>
> offline lcore would still refer to original core id and this has to be
> considered while creating cpu core mask.
>
> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
> ---
>   config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>   examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>   examples/ip_pipeline/init.c                 |  4 ++++
>   3 files changed, 5 insertions(+), 14 deletions(-)
>
> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
> b/config/defconfig_ppc_64-power8-linuxapp-gcc
> index dede34f..a084672 100644
> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>
>   # This following libraries are not available on Power. So they're turned
> off.
>   CONFIG_RTE_LIBRTE_SCHED=n
> -CONFIG_RTE_LIBRTE_PORT=n
> -CONFIG_RTE_LIBRTE_TABLE=n
> -CONFIG_RTE_LIBRTE_PIPELINE=n
> diff --git a/examples/ip_pipeline/cpu_core_map.c
> b/examples/ip_pipeline/cpu_core_map.c
> index cb088b1..482e68e 100644
> --- a/examples/ip_pipeline/cpu_core_map.c
> +++ b/examples/ip_pipeline/cpu_core_map.c
> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>   			int lcore_socket_id =
>   				cpu_core_map_get_socket_id_linux(lcore_id);
>
> -			if (lcore_socket_id < 0)
> -				return -1;
> -
>   			if (((uint32_t) lcore_socket_id) == socket_id)
>   				n_detected++;
>   		}
> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>   					cpu_core_map_get_socket_id_linux(
>   					lcore_id);
>
> -				if (lcore_socket_id < 0)
> -					return -1;
> -
> Why to remove the lcore_socket_id check?
>
>   				int lcore_core_id =
>   					cpu_core_map_get_core_id_linux(
>   						lcore_id);
>
> -				if (lcore_core_id < 0)
> -					return -1;
> -
> -				if (((uint32_t) lcore_socket_id ==
> socket_id) &&
> -					((uint32_t) lcore_core_id ==
> core_id)) {
> +				if ((uint32_t) lcore_socket_id == socket_id)
> {
>   					uint32_t pos = cpu_core_map_pos(map,
>   						socket_id,
>   						core_id_contig,
> diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c index
> cd167f6..60c931f 100644
> --- a/examples/ip_pipeline/init.c
> +++ b/examples/ip_pipeline/init.c
> @@ -61,7 +61,11 @@ static void
>   app_init_core_map(struct app_params *app)  {
>   	APP_LOG(app, HIGH, "Initializing CPU core map ...");
> +#if defined(RTE_ARCH_PPC_64)
> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>
> This value seems quite strange. Can you give more detail?
>
>   	app->core_map = cpu_core_map_init(4, 32, 4, 0);
> +#endif
>
>   	if (app->core_map == NULL)
>   		rte_panic("Cannot create CPU core map\n");
> --
> 1.9.1
>
>
>
  
Chao Zhu Aug. 11, 2016, 10:29 a.m. UTC | #2
Gowrishankar,

Thanks for the detail.
If my understanding is correct, Power8 has different chips. Some of the OpenPOWER chips have 8 cores per socket. And the max threads per core is 8. Should we support this in cpu_core_map_init()?

Here's a dump from the OpenPOWER system.
======================================
# lscpu
Architecture:          ppc64le
Byte Order:            Little Endian
CPU(s):                64
On-line CPU(s) list:   0,8,16,24,32,40,48,56
Off-line CPU(s) list:  1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63
Thread(s) per core:    1
Core(s) per socket:    8
Socket(s):             1
NUMA node(s):          1
Model:                 unknown
L1d cache:             64K
L1i cache:             32K
L2 cache:              512K
L3 cache:              8192K
NUMA node0 CPU(s):     0,8,16,24,32,40,48,56
=========================================


> +#if defined(RTE_ARCH_PPC_64)
> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>
> This value seems quite strange. Can you give more detail?
>
>   	app->core_map = cpu_core_map_init(4, 32, 4, 0);
> +#endif


-----Original Message-----
From: gowrishankar muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com] 
Sent: 2016年8月9日 19:14
To: Chao Zhu <chaozhu@linux.vnet.ibm.com>; dev@dpdk.org
Cc: 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64

Hi Chao,
Sure. Please find below one.

This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu core map in powerpc with SMT off. cpu_core_map_compute_linux currently prepares core mapping based on file existence in sysfs ie.

/sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
   /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id

These files do not exist for lcores which are offline for any reason (as in powerpc, while SMT is off). In this situation, this function should further continue preparing map for other online lcores instead of returning with -1 for a first unavailable lcore.

Also, in SMT=off scenario for powerpc, lcore ids can not be always indexed from
0 upto 'number of cores present' (/sys/devices/system/cpu/present). For eg, for an online lcore 32, core_id returned in sysfs is 112 where online lcores are
10 (as in one configuration), hence sysfs lcore id can not be checked with indexing lcore number before positioning lcore map array.

Thanks,
Gowrishankar

On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
> Gowrishankar,
>
> Can you give more description about this patch?
> Thank you!
>
> -----Original Message-----
> From: Gowrishankar Muthukrishnan 
> [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月6日 20:33
> To: dev@dpdk.org
> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson 
> <bruce.richardson@intel.com>; Konstantin Ananyev 
> <konstantin.ananyev@intel.com>; Thomas Monjalon 
> <thomas.monjalon@6wind.com>; Cristian Dumitrescu 
> <cristian.dumitrescu@intel.com>; Pradeep <pradeep@us.ibm.com>; 
> gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT 
> threads as in ppc64
>
> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>
> offline lcore would still refer to original core id and this has to be 
> considered while creating cpu core mask.
>
> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
> ---
>   config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>   examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>   examples/ip_pipeline/init.c                 |  4 ++++
>   3 files changed, 5 insertions(+), 14 deletions(-)
>
> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
> b/config/defconfig_ppc_64-power8-linuxapp-gcc
> index dede34f..a084672 100644
> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>
>   # This following libraries are not available on Power. So they're 
> turned off.
>   CONFIG_RTE_LIBRTE_SCHED=n
> -CONFIG_RTE_LIBRTE_PORT=n
> -CONFIG_RTE_LIBRTE_TABLE=n
> -CONFIG_RTE_LIBRTE_PIPELINE=n
> diff --git a/examples/ip_pipeline/cpu_core_map.c
> b/examples/ip_pipeline/cpu_core_map.c
> index cb088b1..482e68e 100644
> --- a/examples/ip_pipeline/cpu_core_map.c
> +++ b/examples/ip_pipeline/cpu_core_map.c
> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>   			int lcore_socket_id =
>   				cpu_core_map_get_socket_id_linux(lcore_id);
>
> -			if (lcore_socket_id < 0)
> -				return -1;
> -
>   			if (((uint32_t) lcore_socket_id) == socket_id)
>   				n_detected++;
>   		}
> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>   					cpu_core_map_get_socket_id_linux(
>   					lcore_id);
>
> -				if (lcore_socket_id < 0)
> -					return -1;
> -
> Why to remove the lcore_socket_id check?
>
>   				int lcore_core_id =
>   					cpu_core_map_get_core_id_linux(
>   						lcore_id);
>
> -				if (lcore_core_id < 0)
> -					return -1;
> -
> -				if (((uint32_t) lcore_socket_id ==
> socket_id) &&
> -					((uint32_t) lcore_core_id ==
> core_id)) {
> +				if ((uint32_t) lcore_socket_id == socket_id)
> {
>   					uint32_t pos = cpu_core_map_pos(map,
>   						socket_id,
>   						core_id_contig,
> diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c 
> index cd167f6..60c931f 100644
> --- a/examples/ip_pipeline/init.c
> +++ b/examples/ip_pipeline/init.c
> @@ -61,7 +61,11 @@ static void
>   app_init_core_map(struct app_params *app)  {
>   	APP_LOG(app, HIGH, "Initializing CPU core map ...");
> +#if defined(RTE_ARCH_PPC_64)
> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>
> This value seems quite strange. Can you give more detail?
>
>   	app->core_map = cpu_core_map_init(4, 32, 4, 0);
> +#endif
>
>   	if (app->core_map == NULL)
>   		rte_panic("Cannot create CPU core map\n");
> --
> 1.9.1
>
>
>
  
Gowrishankar Aug. 11, 2016, 12:01 p.m. UTC | #3
On Thursday 11 August 2016 03:59 PM, Chao Zhu wrote:
> Gowrishankar,
>
> Thanks for the detail.
> If my understanding is correct, Power8 has different chips. Some of the OpenPOWER chips have 8 cores per socket. And the max threads per core is 8. Should we support this in cpu_core_map_init()?
>
> Here's a dump from the OpenPOWER system.
> ======================================
> # lscpu
> Architecture:          ppc64le
> Byte Order:            Little Endian
> CPU(s):                64
> On-line CPU(s) list:   0,8,16,24,32,40,48,56
> Off-line CPU(s) list:  1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63
> Thread(s) per core:    1
> Core(s) per socket:    8
> Socket(s):             1
> NUMA node(s):          1
> Model:                 unknown
> L1d cache:             64K
> L1i cache:             32K
> L2 cache:              512K
> L3 cache:              8192K
> NUMA node0 CPU(s):     0,8,16,24,32,40,48,56
> =========================================
>
>
>> +#if defined(RTE_ARCH_PPC_64)
>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>
>> This value seems quite strange. Can you give more detail?

Based on config of tested server (as below output),

CPU(s):                80
On-line CPU(s) list:   0,8,16,24,32,40,48,56,64,72
Off-line CPU(s) list: 
1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63,65-71,73-79
Thread(s) per core:    1  <<<
Core(s) per socket:    5   <<<
Socket(s):             2       <<<
NUMA node(s):          2

cpu_core_map_init parameters (2,5,1,0) were prepared. Instead, we can 
cap max sockets/core/ht
counts to possible maximum supported today.

Regards,
Gowrishankar
>>
>>    	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>> +#endif
>
> -----Original Message-----
> From: gowrishankar muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月9日 19:14
> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>; dev@dpdk.org
> Cc: 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64
>
> Hi Chao,
> Sure. Please find below one.
>
> This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu core map in powerpc with SMT off. cpu_core_map_compute_linux currently prepares core mapping based on file existence in sysfs ie.
>
> /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
>     /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id
>
> These files do not exist for lcores which are offline for any reason (as in powerpc, while SMT is off). In this situation, this function should further continue preparing map for other online lcores instead of returning with -1 for a first unavailable lcore.
>
> Also, in SMT=off scenario for powerpc, lcore ids can not be always indexed from
> 0 upto 'number of cores present' (/sys/devices/system/cpu/present). For eg, for an online lcore 32, core_id returned in sysfs is 112 where online lcores are
> 10 (as in one configuration), hence sysfs lcore id can not be checked with indexing lcore number before positioning lcore map array.
>
> Thanks,
> Gowrishankar
>
> On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
>> Gowrishankar,
>>
>> Can you give more description about this patch?
>> Thank you!
>>
>> -----Original Message-----
>> From: Gowrishankar Muthukrishnan
>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>> Sent: 2016年8月6日 20:33
>> To: dev@dpdk.org
>> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson
>> <bruce.richardson@intel.com>; Konstantin Ananyev
>> <konstantin.ananyev@intel.com>; Thomas Monjalon
>> <thomas.monjalon@6wind.com>; Cristian Dumitrescu
>> <cristian.dumitrescu@intel.com>; Pradeep <pradeep@us.ibm.com>;
>> gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT
>> threads as in ppc64
>>
>> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>
>> offline lcore would still refer to original core id and this has to be
>> considered while creating cpu core mask.
>>
>> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>> ---
>>    config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>>    examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>>    examples/ip_pipeline/init.c                 |  4 ++++
>>    3 files changed, 5 insertions(+), 14 deletions(-)
>>
>> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
>> b/config/defconfig_ppc_64-power8-linuxapp-gcc
>> index dede34f..a084672 100644
>> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
>> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
>> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>>
>>    # This following libraries are not available on Power. So they're
>> turned off.
>>    CONFIG_RTE_LIBRTE_SCHED=n
>> -CONFIG_RTE_LIBRTE_PORT=n
>> -CONFIG_RTE_LIBRTE_TABLE=n
>> -CONFIG_RTE_LIBRTE_PIPELINE=n
>> diff --git a/examples/ip_pipeline/cpu_core_map.c
>> b/examples/ip_pipeline/cpu_core_map.c
>> index cb088b1..482e68e 100644
>> --- a/examples/ip_pipeline/cpu_core_map.c
>> +++ b/examples/ip_pipeline/cpu_core_map.c
>> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>    			int lcore_socket_id =
>>    				cpu_core_map_get_socket_id_linux(lcore_id);
>>
>> -			if (lcore_socket_id < 0)
>> -				return -1;
>> -
>>    			if (((uint32_t) lcore_socket_id) == socket_id)
>>    				n_detected++;
>>    		}
>> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>    					cpu_core_map_get_socket_id_linux(
>>    					lcore_id);
>>
>> -				if (lcore_socket_id < 0)
>> -					return -1;
>> -
>> Why to remove the lcore_socket_id check?
>>
>>    				int lcore_core_id =
>>    					cpu_core_map_get_core_id_linux(
>>    						lcore_id);
>>
>> -				if (lcore_core_id < 0)
>> -					return -1;
>> -
>> -				if (((uint32_t) lcore_socket_id ==
>> socket_id) &&
>> -					((uint32_t) lcore_core_id ==
>> core_id)) {
>> +				if ((uint32_t) lcore_socket_id == socket_id)
>> {
>>    					uint32_t pos = cpu_core_map_pos(map,
>>    						socket_id,
>>    						core_id_contig,
>> diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
>> index cd167f6..60c931f 100644
>> --- a/examples/ip_pipeline/init.c
>> +++ b/examples/ip_pipeline/init.c
>> @@ -61,7 +61,11 @@ static void
>>    app_init_core_map(struct app_params *app)  {
>>    	APP_LOG(app, HIGH, "Initializing CPU core map ...");
>> +#if defined(RTE_ARCH_PPC_64)
>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>
>> This value seems quite strange. Can you give more detail?
>>
>>    	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>> +#endif
>>
>>    	if (app->core_map == NULL)
>>    		rte_panic("Cannot create CPU core map\n");
>> --
>> 1.9.1
>>
>>
>>
>
>
>
  
Chao Zhu Aug. 12, 2016, 8:44 a.m. UTC | #4
Gowrishankar,

I suggest to set the following value:

n_max_cores_per_socket = 8
n_max_ht_per_core = 8

This will cover most of the Power8 servers. 
Any comments?

-----Original Message-----
From: gowrishankar muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com] 
Sent: 2016年8月11日 20:02
To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64

On Thursday 11 August 2016 03:59 PM, Chao Zhu wrote:
> Gowrishankar,
>
> Thanks for the detail.
> If my understanding is correct, Power8 has different chips. Some of the OpenPOWER chips have 8 cores per socket. And the max threads per core is 8. Should we support this in cpu_core_map_init()?
>
> Here's a dump from the OpenPOWER system.
> ======================================
> # lscpu
> Architecture:          ppc64le
> Byte Order:            Little Endian
> CPU(s):                64
> On-line CPU(s) list:   0,8,16,24,32,40,48,56
> Off-line CPU(s) list:  1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63
> Thread(s) per core:    1
> Core(s) per socket:    8
> Socket(s):             1
> NUMA node(s):          1
> Model:                 unknown
> L1d cache:             64K
> L1i cache:             32K
> L2 cache:              512K
> L3 cache:              8192K
> NUMA node0 CPU(s):     0,8,16,24,32,40,48,56
> =========================================
>
>
>> +#if defined(RTE_ARCH_PPC_64)
>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>
>> This value seems quite strange. Can you give more detail?

Based on config of tested server (as below output),

CPU(s):                80
On-line CPU(s) list:   0,8,16,24,32,40,48,56,64,72
Off-line CPU(s) list: 
1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63,65-71,73-79
Thread(s) per core:    1  <<<
Core(s) per socket:    5   <<<
Socket(s):             2       <<<
NUMA node(s):          2

cpu_core_map_init parameters (2,5,1,0) were prepared. Instead, we can cap max sockets/core/ht counts to possible maximum supported today.

Regards,
Gowrishankar
>>
>>    	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>> +#endif
>
> -----Original Message-----
> From: gowrishankar muthukrishnan 
> [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月9日 19:14
> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>; dev@dpdk.org
> Cc: 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin 
> Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' 
> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' 
> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying 
> SMT threads as in ppc64
>
> Hi Chao,
> Sure. Please find below one.
>
> This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu core map in powerpc with SMT off. cpu_core_map_compute_linux currently prepares core mapping based on file existence in sysfs ie.
>
> /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
>     /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id
>
> These files do not exist for lcores which are offline for any reason (as in powerpc, while SMT is off). In this situation, this function should further continue preparing map for other online lcores instead of returning with -1 for a first unavailable lcore.
>
> Also, in SMT=off scenario for powerpc, lcore ids can not be always 
> indexed from
> 0 upto 'number of cores present' (/sys/devices/system/cpu/present). 
> For eg, for an online lcore 32, core_id returned in sysfs is 112 where 
> online lcores are
> 10 (as in one configuration), hence sysfs lcore id can not be checked with indexing lcore number before positioning lcore map array.
>
> Thanks,
> Gowrishankar
>
> On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
>> Gowrishankar,
>>
>> Can you give more description about this patch?
>> Thank you!
>>
>> -----Original Message-----
>> From: Gowrishankar Muthukrishnan
>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>> Sent: 2016年8月6日 20:33
>> To: dev@dpdk.org
>> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson 
>> <bruce.richardson@intel.com>; Konstantin Ananyev 
>> <konstantin.ananyev@intel.com>; Thomas Monjalon 
>> <thomas.monjalon@6wind.com>; Cristian Dumitrescu 
>> <cristian.dumitrescu@intel.com>; Pradeep <pradeep@us.ibm.com>; 
>> gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying 
>> SMT threads as in ppc64
>>
>> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>
>> offline lcore would still refer to original core id and this has to 
>> be considered while creating cpu core mask.
>>
>> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>> ---
>>    config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>>    examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>>    examples/ip_pipeline/init.c                 |  4 ++++
>>    3 files changed, 5 insertions(+), 14 deletions(-)
>>
>> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
>> b/config/defconfig_ppc_64-power8-linuxapp-gcc
>> index dede34f..a084672 100644
>> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
>> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
>> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>>
>>    # This following libraries are not available on Power. So they're 
>> turned off.
>>    CONFIG_RTE_LIBRTE_SCHED=n
>> -CONFIG_RTE_LIBRTE_PORT=n
>> -CONFIG_RTE_LIBRTE_TABLE=n
>> -CONFIG_RTE_LIBRTE_PIPELINE=n
>> diff --git a/examples/ip_pipeline/cpu_core_map.c
>> b/examples/ip_pipeline/cpu_core_map.c
>> index cb088b1..482e68e 100644
>> --- a/examples/ip_pipeline/cpu_core_map.c
>> +++ b/examples/ip_pipeline/cpu_core_map.c
>> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>    			int lcore_socket_id =
>>    				cpu_core_map_get_socket_id_linux(lcore_id);
>>
>> -			if (lcore_socket_id < 0)
>> -				return -1;
>> -
>>    			if (((uint32_t) lcore_socket_id) == socket_id)
>>    				n_detected++;
>>    		}
>> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>    					cpu_core_map_get_socket_id_linux(
>>    					lcore_id);
>>
>> -				if (lcore_socket_id < 0)
>> -					return -1;
>> -
>> Why to remove the lcore_socket_id check?
>>
>>    				int lcore_core_id =
>>    					cpu_core_map_get_core_id_linux(
>>    						lcore_id);
>>
>> -				if (lcore_core_id < 0)
>> -					return -1;
>> -
>> -				if (((uint32_t) lcore_socket_id ==
>> socket_id) &&
>> -					((uint32_t) lcore_core_id ==
>> core_id)) {
>> +				if ((uint32_t) lcore_socket_id == socket_id)
>> {
>>    					uint32_t pos = cpu_core_map_pos(map,
>>    						socket_id,
>>    						core_id_contig,
>> diff --git a/examples/ip_pipeline/init.c 
>> b/examples/ip_pipeline/init.c index cd167f6..60c931f 100644
>> --- a/examples/ip_pipeline/init.c
>> +++ b/examples/ip_pipeline/init.c
>> @@ -61,7 +61,11 @@ static void
>>    app_init_core_map(struct app_params *app)  {
>>    	APP_LOG(app, HIGH, "Initializing CPU core map ...");
>> +#if defined(RTE_ARCH_PPC_64)
>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>
>> This value seems quite strange. Can you give more detail?
>>
>>    	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>> +#endif
>>
>>    	if (app->core_map == NULL)
>>    		rte_panic("Cannot create CPU core map\n");
>> --
>> 1.9.1
>>
>>
>>
>
>
>
  
Gowrishankar Aug. 12, 2016, 8:59 a.m. UTC | #5
On Friday 12 August 2016 02:14 PM, Chao Zhu wrote:
> Gowrishankar,
>
> I suggest to set the following value:
>
> n_max_cores_per_socket = 8
> n_max_ht_per_core = 8
>
> This will cover most of the Power8 servers.
> Any comments?
Sure Chao. I will include this change in v5. If there are no other comments,
I can spin out v5, with changes in this patch.

Regards,
Gowrishankar
>
> -----Original Message-----
> From: gowrishankar muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月11日 20:02
> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
> Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64
>
> On Thursday 11 August 2016 03:59 PM, Chao Zhu wrote:
>> Gowrishankar,
>>
>> Thanks for the detail.
>> If my understanding is correct, Power8 has different chips. Some of the OpenPOWER chips have 8 cores per socket. And the max threads per core is 8. Should we support this in cpu_core_map_init()?
>>
>> Here's a dump from the OpenPOWER system.
>> ======================================
>> # lscpu
>> Architecture:          ppc64le
>> Byte Order:            Little Endian
>> CPU(s):                64
>> On-line CPU(s) list:   0,8,16,24,32,40,48,56
>> Off-line CPU(s) list:  1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63
>> Thread(s) per core:    1
>> Core(s) per socket:    8
>> Socket(s):             1
>> NUMA node(s):          1
>> Model:                 unknown
>> L1d cache:             64K
>> L1i cache:             32K
>> L2 cache:              512K
>> L3 cache:              8192K
>> NUMA node0 CPU(s):     0,8,16,24,32,40,48,56
>> =========================================
>>
>>
>>> +#if defined(RTE_ARCH_PPC_64)
>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>
>>> This value seems quite strange. Can you give more detail?
> Based on config of tested server (as below output),
>
> CPU(s):                80
> On-line CPU(s) list:   0,8,16,24,32,40,48,56,64,72
> Off-line CPU(s) list:
> 1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63,65-71,73-79
> Thread(s) per core:    1  <<<
> Core(s) per socket:    5   <<<
> Socket(s):             2       <<<
> NUMA node(s):          2
>
> cpu_core_map_init parameters (2,5,1,0) were prepared. Instead, we can cap max sockets/core/ht counts to possible maximum supported today.
>
> Regards,
> Gowrishankar
>>>     	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>> +#endif
>> -----Original Message-----
>> From: gowrishankar muthukrishnan
>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>> Sent: 2016年8月9日 19:14
>> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>; dev@dpdk.org
>> Cc: 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin
>> Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon'
>> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu'
>> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
>> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying
>> SMT threads as in ppc64
>>
>> Hi Chao,
>> Sure. Please find below one.
>>
>> This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu core map in powerpc with SMT off. cpu_core_map_compute_linux currently prepares core mapping based on file existence in sysfs ie.
>>
>> /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
>>      /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id
>>
>> These files do not exist for lcores which are offline for any reason (as in powerpc, while SMT is off). In this situation, this function should further continue preparing map for other online lcores instead of returning with -1 for a first unavailable lcore.
>>
>> Also, in SMT=off scenario for powerpc, lcore ids can not be always
>> indexed from
>> 0 upto 'number of cores present' (/sys/devices/system/cpu/present).
>> For eg, for an online lcore 32, core_id returned in sysfs is 112 where
>> online lcores are
>> 10 (as in one configuration), hence sysfs lcore id can not be checked with indexing lcore number before positioning lcore map array.
>>
>> Thanks,
>> Gowrishankar
>>
>> On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
>>> Gowrishankar,
>>>
>>> Can you give more description about this patch?
>>> Thank you!
>>>
>>> -----Original Message-----
>>> From: Gowrishankar Muthukrishnan
>>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>>> Sent: 2016年8月6日 20:33
>>> To: dev@dpdk.org
>>> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson
>>> <bruce.richardson@intel.com>; Konstantin Ananyev
>>> <konstantin.ananyev@intel.com>; Thomas Monjalon
>>> <thomas.monjalon@6wind.com>; Cristian Dumitrescu
>>> <cristian.dumitrescu@intel.com>; Pradeep <pradeep@us.ibm.com>;
>>> gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying
>>> SMT threads as in ppc64
>>>
>>> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>
>>> offline lcore would still refer to original core id and this has to
>>> be considered while creating cpu core mask.
>>>
>>> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>> ---
>>>     config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>>>     examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>>>     examples/ip_pipeline/init.c                 |  4 ++++
>>>     3 files changed, 5 insertions(+), 14 deletions(-)
>>>
>>> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> index dede34f..a084672 100644
>>> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>>>
>>>     # This following libraries are not available on Power. So they're
>>> turned off.
>>>     CONFIG_RTE_LIBRTE_SCHED=n
>>> -CONFIG_RTE_LIBRTE_PORT=n
>>> -CONFIG_RTE_LIBRTE_TABLE=n
>>> -CONFIG_RTE_LIBRTE_PIPELINE=n
>>> diff --git a/examples/ip_pipeline/cpu_core_map.c
>>> b/examples/ip_pipeline/cpu_core_map.c
>>> index cb088b1..482e68e 100644
>>> --- a/examples/ip_pipeline/cpu_core_map.c
>>> +++ b/examples/ip_pipeline/cpu_core_map.c
>>> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>     			int lcore_socket_id =
>>>     				cpu_core_map_get_socket_id_linux(lcore_id);
>>>
>>> -			if (lcore_socket_id < 0)
>>> -				return -1;
>>> -
>>>     			if (((uint32_t) lcore_socket_id) == socket_id)
>>>     				n_detected++;
>>>     		}
>>> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>     					cpu_core_map_get_socket_id_linux(
>>>     					lcore_id);
>>>
>>> -				if (lcore_socket_id < 0)
>>> -					return -1;
>>> -
>>> Why to remove the lcore_socket_id check?
>>>
>>>     				int lcore_core_id =
>>>     					cpu_core_map_get_core_id_linux(
>>>     						lcore_id);
>>>
>>> -				if (lcore_core_id < 0)
>>> -					return -1;
>>> -
>>> -				if (((uint32_t) lcore_socket_id ==
>>> socket_id) &&
>>> -					((uint32_t) lcore_core_id ==
>>> core_id)) {
>>> +				if ((uint32_t) lcore_socket_id == socket_id)
>>> {
>>>     					uint32_t pos = cpu_core_map_pos(map,
>>>     						socket_id,
>>>     						core_id_contig,
>>> diff --git a/examples/ip_pipeline/init.c
>>> b/examples/ip_pipeline/init.c index cd167f6..60c931f 100644
>>> --- a/examples/ip_pipeline/init.c
>>> +++ b/examples/ip_pipeline/init.c
>>> @@ -61,7 +61,11 @@ static void
>>>     app_init_core_map(struct app_params *app)  {
>>>     	APP_LOG(app, HIGH, "Initializing CPU core map ...");
>>> +#if defined(RTE_ARCH_PPC_64)
>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>
>>> This value seems quite strange. Can you give more detail?
>>>
>>>     	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>> +#endif
>>>
>>>     	if (app->core_map == NULL)
>>>     		rte_panic("Cannot create CPU core map\n");
>>> --
>>> 1.9.1
>>>
>>>
>>>
>>
>>
>
>
  
Chao Zhu Aug. 12, 2016, 10:15 a.m. UTC | #6
Another comment is, comment out lcore_socket_id check will influence other architectures. If possible, I would like to make this change to Power specific. 

-----Original Message-----
From: gowrishankar muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com] 
Sent: 2016年8月12日 17:00
To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64

On Friday 12 August 2016 02:14 PM, Chao Zhu wrote:
> Gowrishankar,
>
> I suggest to set the following value:
>
> n_max_cores_per_socket = 8
> n_max_ht_per_core = 8
>
> This will cover most of the Power8 servers.
> Any comments?
Sure Chao. I will include this change in v5. If there are no other comments, I can spin out v5, with changes in this patch.

Regards,
Gowrishankar
>
> -----Original Message-----
> From: gowrishankar muthukrishnan 
> [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月11日 20:02
> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
> Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>; 
> 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' 
> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' 
> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying 
> SMT threads as in ppc64
>
> On Thursday 11 August 2016 03:59 PM, Chao Zhu wrote:
>> Gowrishankar,
>>
>> Thanks for the detail.
>> If my understanding is correct, Power8 has different chips. Some of the OpenPOWER chips have 8 cores per socket. And the max threads per core is 8. Should we support this in cpu_core_map_init()?
>>
>> Here's a dump from the OpenPOWER system.
>> ======================================
>> # lscpu
>> Architecture:          ppc64le
>> Byte Order:            Little Endian
>> CPU(s):                64
>> On-line CPU(s) list:   0,8,16,24,32,40,48,56
>> Off-line CPU(s) list:  1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63
>> Thread(s) per core:    1
>> Core(s) per socket:    8
>> Socket(s):             1
>> NUMA node(s):          1
>> Model:                 unknown
>> L1d cache:             64K
>> L1i cache:             32K
>> L2 cache:              512K
>> L3 cache:              8192K
>> NUMA node0 CPU(s):     0,8,16,24,32,40,48,56
>> =========================================
>>
>>
>>> +#if defined(RTE_ARCH_PPC_64)
>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>
>>> This value seems quite strange. Can you give more detail?
> Based on config of tested server (as below output),
>
> CPU(s):                80
> On-line CPU(s) list:   0,8,16,24,32,40,48,56,64,72
> Off-line CPU(s) list:
> 1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63,65-71,73-79
> Thread(s) per core:    1  <<<
> Core(s) per socket:    5   <<<
> Socket(s):             2       <<<
> NUMA node(s):          2
>
> cpu_core_map_init parameters (2,5,1,0) were prepared. Instead, we can cap max sockets/core/ht counts to possible maximum supported today.
>
> Regards,
> Gowrishankar
>>>     	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>> +#endif
>> -----Original Message-----
>> From: gowrishankar muthukrishnan
>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>> Sent: 2016年8月9日 19:14
>> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>; dev@dpdk.org
>> Cc: 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin 
>> Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon'
>> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu'
>> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
>> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for 
>> varying SMT threads as in ppc64
>>
>> Hi Chao,
>> Sure. Please find below one.
>>
>> This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu core map in powerpc with SMT off. cpu_core_map_compute_linux currently prepares core mapping based on file existence in sysfs ie.
>>
>> /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
>>      /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id
>>
>> These files do not exist for lcores which are offline for any reason (as in powerpc, while SMT is off). In this situation, this function should further continue preparing map for other online lcores instead of returning with -1 for a first unavailable lcore.
>>
>> Also, in SMT=off scenario for powerpc, lcore ids can not be always 
>> indexed from
>> 0 upto 'number of cores present' (/sys/devices/system/cpu/present).
>> For eg, for an online lcore 32, core_id returned in sysfs is 112 
>> where online lcores are
>> 10 (as in one configuration), hence sysfs lcore id can not be checked with indexing lcore number before positioning lcore map array.
>>
>> Thanks,
>> Gowrishankar
>>
>> On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
>>> Gowrishankar,
>>>
>>> Can you give more description about this patch?
>>> Thank you!
>>>
>>> -----Original Message-----
>>> From: Gowrishankar Muthukrishnan
>>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>>> Sent: 2016年8月6日 20:33
>>> To: dev@dpdk.org
>>> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson 
>>> <bruce.richardson@intel.com>; Konstantin Ananyev 
>>> <konstantin.ananyev@intel.com>; Thomas Monjalon 
>>> <thomas.monjalon@6wind.com>; Cristian Dumitrescu 
>>> <cristian.dumitrescu@intel.com>; Pradeep <pradeep@us.ibm.com>; 
>>> gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying 
>>> SMT threads as in ppc64
>>>
>>> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>
>>> offline lcore would still refer to original core id and this has to 
>>> be considered while creating cpu core mask.
>>>
>>> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>> ---
>>>     config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>>>     examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>>>     examples/ip_pipeline/init.c                 |  4 ++++
>>>     3 files changed, 5 insertions(+), 14 deletions(-)
>>>
>>> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> index dede34f..a084672 100644
>>> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>>>
>>>     # This following libraries are not available on Power. So 
>>> they're turned off.
>>>     CONFIG_RTE_LIBRTE_SCHED=n
>>> -CONFIG_RTE_LIBRTE_PORT=n
>>> -CONFIG_RTE_LIBRTE_TABLE=n
>>> -CONFIG_RTE_LIBRTE_PIPELINE=n
>>> diff --git a/examples/ip_pipeline/cpu_core_map.c
>>> b/examples/ip_pipeline/cpu_core_map.c
>>> index cb088b1..482e68e 100644
>>> --- a/examples/ip_pipeline/cpu_core_map.c
>>> +++ b/examples/ip_pipeline/cpu_core_map.c
>>> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>     			int lcore_socket_id =
>>>     				cpu_core_map_get_socket_id_linux(lcore_id);
>>>
>>> -			if (lcore_socket_id < 0)
>>> -				return -1;
>>> -
>>>     			if (((uint32_t) lcore_socket_id) == socket_id)
>>>     				n_detected++;
>>>     		}
>>> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>     					cpu_core_map_get_socket_id_linux(
>>>     					lcore_id);
>>>
>>> -				if (lcore_socket_id < 0)
>>> -					return -1;
>>> -
>>> Why to remove the lcore_socket_id check?
>>>
>>>     				int lcore_core_id =
>>>     					cpu_core_map_get_core_id_linux(
>>>     						lcore_id);
>>>
>>> -				if (lcore_core_id < 0)
>>> -					return -1;
>>> -
>>> -				if (((uint32_t) lcore_socket_id ==
>>> socket_id) &&
>>> -					((uint32_t) lcore_core_id ==
>>> core_id)) {
>>> +				if ((uint32_t) lcore_socket_id == socket_id)
>>> {
>>>     					uint32_t pos = cpu_core_map_pos(map,
>>>     						socket_id,
>>>     						core_id_contig,
>>> diff --git a/examples/ip_pipeline/init.c 
>>> b/examples/ip_pipeline/init.c index cd167f6..60c931f 100644
>>> --- a/examples/ip_pipeline/init.c
>>> +++ b/examples/ip_pipeline/init.c
>>> @@ -61,7 +61,11 @@ static void
>>>     app_init_core_map(struct app_params *app)  {
>>>     	APP_LOG(app, HIGH, "Initializing CPU core map ...");
>>> +#if defined(RTE_ARCH_PPC_64)
>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>
>>> This value seems quite strange. Can you give more detail?
>>>
>>>     	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>> +#endif
>>>
>>>     	if (app->core_map == NULL)
>>>     		rte_panic("Cannot create CPU core map\n");
>>> --
>>> 1.9.1
>>>
>>>
>>>
>>
>>
>
>
  
Gowrishankar Aug. 12, 2016, 10:34 a.m. UTC | #7
On Friday 12 August 2016 03:45 PM, Chao Zhu wrote:
> Another comment is, comment out lcore_socket_id check will influence other architectures. If possible, I would like to make this change to Power specific.

Hi Chao,
I am revisiting cpu_core_map_init() fn. I realize, all we handle is max 
values of socket/core/ht.
So, ideally the function should not break in any arch.

In my quick test w/o adjusting prev max values (4,32,4,0), only for ht > 
1, creating cpu map panics
in ppc i.e (4,32,1,0) is able to create core map and app runs. I 
continue to debug this and fix
this example app separately.

So, I will be sending powerpc specific enablement of lpm,acl,port,table 
and sched in v5.
Example ip_pipeline will be fixed in separate patch.

Thanks,
Gowrishankar
> -----Original Message-----
> From: gowrishankar muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月12日 17:00
> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
> Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64
>
> On Friday 12 August 2016 02:14 PM, Chao Zhu wrote:
>> Gowrishankar,
>>
>> I suggest to set the following value:
>>
>> n_max_cores_per_socket = 8
>> n_max_ht_per_core = 8
>>
>> This will cover most of the Power8 servers.
>> Any comments?
> Sure Chao. I will include this change in v5. If there are no other comments, I can spin out v5, with changes in this patch.
>
> Regards,
> Gowrishankar
>> -----Original Message-----
>> From: gowrishankar muthukrishnan
>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>> Sent: 2016年8月11日 20:02
>> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
>> Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>;
>> 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon'
>> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu'
>> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
>> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying
>> SMT threads as in ppc64
>>
>> On Thursday 11 August 2016 03:59 PM, Chao Zhu wrote:
>>> Gowrishankar,
>>>
>>> Thanks for the detail.
>>> If my understanding is correct, Power8 has different chips. Some of the OpenPOWER chips have 8 cores per socket. And the max threads per core is 8. Should we support this in cpu_core_map_init()?
>>>
>>> Here's a dump from the OpenPOWER system.
>>> ======================================
>>> # lscpu
>>> Architecture:          ppc64le
>>> Byte Order:            Little Endian
>>> CPU(s):                64
>>> On-line CPU(s) list:   0,8,16,24,32,40,48,56
>>> Off-line CPU(s) list:  1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63
>>> Thread(s) per core:    1
>>> Core(s) per socket:    8
>>> Socket(s):             1
>>> NUMA node(s):          1
>>> Model:                 unknown
>>> L1d cache:             64K
>>> L1i cache:             32K
>>> L2 cache:              512K
>>> L3 cache:              8192K
>>> NUMA node0 CPU(s):     0,8,16,24,32,40,48,56
>>> =========================================
>>>
>>>
>>>> +#if defined(RTE_ARCH_PPC_64)
>>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>>
>>>> This value seems quite strange. Can you give more detail?
>> Based on config of tested server (as below output),
>>
>> CPU(s):                80
>> On-line CPU(s) list:   0,8,16,24,32,40,48,56,64,72
>> Off-line CPU(s) list:
>> 1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63,65-71,73-79
>> Thread(s) per core:    1  <<<
>> Core(s) per socket:    5   <<<
>> Socket(s):             2       <<<
>> NUMA node(s):          2
>>
>> cpu_core_map_init parameters (2,5,1,0) were prepared. Instead, we can cap max sockets/core/ht counts to possible maximum supported today.
>>
>> Regards,
>> Gowrishankar
>>>>      	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>>> +#endif
>>> -----Original Message-----
>>> From: gowrishankar muthukrishnan
>>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>>> Sent: 2016年8月9日 19:14
>>> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>; dev@dpdk.org
>>> Cc: 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin
>>> Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon'
>>> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu'
>>> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
>>> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for
>>> varying SMT threads as in ppc64
>>>
>>> Hi Chao,
>>> Sure. Please find below one.
>>>
>>> This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu core map in powerpc with SMT off. cpu_core_map_compute_linux currently prepares core mapping based on file existence in sysfs ie.
>>>
>>> /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
>>>       /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id
>>>
>>> These files do not exist for lcores which are offline for any reason (as in powerpc, while SMT is off). In this situation, this function should further continue preparing map for other online lcores instead of returning with -1 for a first unavailable lcore.
>>>
>>> Also, in SMT=off scenario for powerpc, lcore ids can not be always
>>> indexed from
>>> 0 upto 'number of cores present' (/sys/devices/system/cpu/present).
>>> For eg, for an online lcore 32, core_id returned in sysfs is 112
>>> where online lcores are
>>> 10 (as in one configuration), hence sysfs lcore id can not be checked with indexing lcore number before positioning lcore map array.
>>>
>>> Thanks,
>>> Gowrishankar
>>>
>>> On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
>>>> Gowrishankar,
>>>>
>>>> Can you give more description about this patch?
>>>> Thank you!
>>>>
>>>> -----Original Message-----
>>>> From: Gowrishankar Muthukrishnan
>>>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>>>> Sent: 2016年8月6日 20:33
>>>> To: dev@dpdk.org
>>>> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson
>>>> <bruce.richardson@intel.com>; Konstantin Ananyev
>>>> <konstantin.ananyev@intel.com>; Thomas Monjalon
>>>> <thomas.monjalon@6wind.com>; Cristian Dumitrescu
>>>> <cristian.dumitrescu@intel.com>; Pradeep <pradeep@us.ibm.com>;
>>>> gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying
>>>> SMT threads as in ppc64
>>>>
>>>> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>>
>>>> offline lcore would still refer to original core id and this has to
>>>> be considered while creating cpu core mask.
>>>>
>>>> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>> ---
>>>>      config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>>>>      examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>>>>      examples/ip_pipeline/init.c                 |  4 ++++
>>>>      3 files changed, 5 insertions(+), 14 deletions(-)
>>>>
>>>> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> index dede34f..a084672 100644
>>>> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>>>>
>>>>      # This following libraries are not available on Power. So
>>>> they're turned off.
>>>>      CONFIG_RTE_LIBRTE_SCHED=n
>>>> -CONFIG_RTE_LIBRTE_PORT=n
>>>> -CONFIG_RTE_LIBRTE_TABLE=n
>>>> -CONFIG_RTE_LIBRTE_PIPELINE=n
>>>> diff --git a/examples/ip_pipeline/cpu_core_map.c
>>>> b/examples/ip_pipeline/cpu_core_map.c
>>>> index cb088b1..482e68e 100644
>>>> --- a/examples/ip_pipeline/cpu_core_map.c
>>>> +++ b/examples/ip_pipeline/cpu_core_map.c
>>>> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>>      			int lcore_socket_id =
>>>>      				cpu_core_map_get_socket_id_linux(lcore_id);
>>>>
>>>> -			if (lcore_socket_id < 0)
>>>> -				return -1;
>>>> -
>>>>      			if (((uint32_t) lcore_socket_id) == socket_id)
>>>>      				n_detected++;
>>>>      		}
>>>> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>>      					cpu_core_map_get_socket_id_linux(
>>>>      					lcore_id);
>>>>
>>>> -				if (lcore_socket_id < 0)
>>>> -					return -1;
>>>> -
>>>> Why to remove the lcore_socket_id check?
>>>>
>>>>      				int lcore_core_id =
>>>>      					cpu_core_map_get_core_id_linux(
>>>>      						lcore_id);
>>>>
>>>> -				if (lcore_core_id < 0)
>>>> -					return -1;
>>>> -
>>>> -				if (((uint32_t) lcore_socket_id ==
>>>> socket_id) &&
>>>> -					((uint32_t) lcore_core_id ==
>>>> core_id)) {
>>>> +				if ((uint32_t) lcore_socket_id == socket_id)
>>>> {
>>>>      					uint32_t pos = cpu_core_map_pos(map,
>>>>      						socket_id,
>>>>      						core_id_contig,
>>>> diff --git a/examples/ip_pipeline/init.c
>>>> b/examples/ip_pipeline/init.c index cd167f6..60c931f 100644
>>>> --- a/examples/ip_pipeline/init.c
>>>> +++ b/examples/ip_pipeline/init.c
>>>> @@ -61,7 +61,11 @@ static void
>>>>      app_init_core_map(struct app_params *app)  {
>>>>      	APP_LOG(app, HIGH, "Initializing CPU core map ...");
>>>> +#if defined(RTE_ARCH_PPC_64)
>>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>>
>>>> This value seems quite strange. Can you give more detail?
>>>>
>>>>      	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>>> +#endif
>>>>
>>>>      	if (app->core_map == NULL)
>>>>      		rte_panic("Cannot create CPU core map\n");
>>>> --
>>>> 1.9.1
>>>>
>>>>
>>>>
>>>
>>
>
>
  
Gowrishankar Aug. 12, 2016, 12:05 p.m. UTC | #8
Hi Chao,
I have simplified the approach for this patch in v5.
* Including ppc64le specific changes
* App panic in creating core map only in SMT=off case, so that would be
   addressed separately.

Hoping with new patch set v5, your review would be easier.

Regards,
Gowrishankar

On Friday 12 August 2016 03:45 PM, Chao Zhu wrote:
> Another comment is, comment out lcore_socket_id check will influence other architectures. If possible, I would like to make this change to Power specific.
>
> -----Original Message-----
> From: gowrishankar muthukrishnan [mailto:gowrishankar.m@linux.vnet.ibm.com]
> Sent: 2016年8月12日 17:00
> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
> Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon' <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu' <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying SMT threads as in ppc64
>
> On Friday 12 August 2016 02:14 PM, Chao Zhu wrote:
>> Gowrishankar,
>>
>> I suggest to set the following value:
>>
>> n_max_cores_per_socket = 8
>> n_max_ht_per_core = 8
>>
>> This will cover most of the Power8 servers.
>> Any comments?
> Sure Chao. I will include this change in v5. If there are no other comments, I can spin out v5, with changes in this patch.
>
> Regards,
> Gowrishankar
>> -----Original Message-----
>> From: gowrishankar muthukrishnan
>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>> Sent: 2016年8月11日 20:02
>> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>
>> Cc: dev@dpdk.org; 'Bruce Richardson' <bruce.richardson@intel.com>;
>> 'Konstantin Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon'
>> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu'
>> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
>> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying
>> SMT threads as in ppc64
>>
>> On Thursday 11 August 2016 03:59 PM, Chao Zhu wrote:
>>> Gowrishankar,
>>>
>>> Thanks for the detail.
>>> If my understanding is correct, Power8 has different chips. Some of the OpenPOWER chips have 8 cores per socket. And the max threads per core is 8. Should we support this in cpu_core_map_init()?
>>>
>>> Here's a dump from the OpenPOWER system.
>>> ======================================
>>> # lscpu
>>> Architecture:          ppc64le
>>> Byte Order:            Little Endian
>>> CPU(s):                64
>>> On-line CPU(s) list:   0,8,16,24,32,40,48,56
>>> Off-line CPU(s) list:  1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63
>>> Thread(s) per core:    1
>>> Core(s) per socket:    8
>>> Socket(s):             1
>>> NUMA node(s):          1
>>> Model:                 unknown
>>> L1d cache:             64K
>>> L1i cache:             32K
>>> L2 cache:              512K
>>> L3 cache:              8192K
>>> NUMA node0 CPU(s):     0,8,16,24,32,40,48,56
>>> =========================================
>>>
>>>
>>>> +#if defined(RTE_ARCH_PPC_64)
>>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>>
>>>> This value seems quite strange. Can you give more detail?
>> Based on config of tested server (as below output),
>>
>> CPU(s):                80
>> On-line CPU(s) list:   0,8,16,24,32,40,48,56,64,72
>> Off-line CPU(s) list:
>> 1-7,9-15,17-23,25-31,33-39,41-47,49-55,57-63,65-71,73-79
>> Thread(s) per core:    1  <<<
>> Core(s) per socket:    5   <<<
>> Socket(s):             2       <<<
>> NUMA node(s):          2
>>
>> cpu_core_map_init parameters (2,5,1,0) were prepared. Instead, we can cap max sockets/core/ht counts to possible maximum supported today.
>>
>> Regards,
>> Gowrishankar
>>>>      	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>>> +#endif
>>> -----Original Message-----
>>> From: gowrishankar muthukrishnan
>>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>>> Sent: 2016年8月9日 19:14
>>> To: Chao Zhu <chaozhu@linux.vnet.ibm.com>; dev@dpdk.org
>>> Cc: 'Bruce Richardson' <bruce.richardson@intel.com>; 'Konstantin
>>> Ananyev' <konstantin.ananyev@intel.com>; 'Thomas Monjalon'
>>> <thomas.monjalon@6wind.com>; 'Cristian Dumitrescu'
>>> <cristian.dumitrescu@intel.com>; 'Pradeep' <pradeep@us.ibm.com>
>>> Subject: Re: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for
>>> varying SMT threads as in ppc64
>>>
>>> Hi Chao,
>>> Sure. Please find below one.
>>>
>>> This patch fixes ip_pipeline panic in app_init_core_map while preparing cpu core map in powerpc with SMT off. cpu_core_map_compute_linux currently prepares core mapping based on file existence in sysfs ie.
>>>
>>> /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/physical_package_id
>>>       /sys/devices/system/cpu/cpu<LCORE_NUM>/topology/core_id
>>>
>>> These files do not exist for lcores which are offline for any reason (as in powerpc, while SMT is off). In this situation, this function should further continue preparing map for other online lcores instead of returning with -1 for a first unavailable lcore.
>>>
>>> Also, in SMT=off scenario for powerpc, lcore ids can not be always
>>> indexed from
>>> 0 upto 'number of cores present' (/sys/devices/system/cpu/present).
>>> For eg, for an online lcore 32, core_id returned in sysfs is 112
>>> where online lcores are
>>> 10 (as in one configuration), hence sysfs lcore id can not be checked with indexing lcore number before positioning lcore map array.
>>>
>>> Thanks,
>>> Gowrishankar
>>>
>>> On Tuesday 09 August 2016 02:37 PM, Chao Zhu wrote:
>>>> Gowrishankar,
>>>>
>>>> Can you give more description about this patch?
>>>> Thank you!
>>>>
>>>> -----Original Message-----
>>>> From: Gowrishankar Muthukrishnan
>>>> [mailto:gowrishankar.m@linux.vnet.ibm.com]
>>>> Sent: 2016年8月6日 20:33
>>>> To: dev@dpdk.org
>>>> Cc: Chao Zhu <chaozhu@linux.vnet.ibm.com>; Bruce Richardson
>>>> <bruce.richardson@intel.com>; Konstantin Ananyev
>>>> <konstantin.ananyev@intel.com>; Thomas Monjalon
>>>> <thomas.monjalon@6wind.com>; Cristian Dumitrescu
>>>> <cristian.dumitrescu@intel.com>; Pradeep <pradeep@us.ibm.com>;
>>>> gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>> Subject: [PATCH v4 3/6] ip_pipeline: fix lcore mapping for varying
>>>> SMT threads as in ppc64
>>>>
>>>> From: gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>>
>>>> offline lcore would still refer to original core id and this has to
>>>> be considered while creating cpu core mask.
>>>>
>>>> Signed-off-by: Gowrishankar <gowrishankar.m@linux.vnet.ibm.com>
>>>> ---
>>>>      config/defconfig_ppc_64-power8-linuxapp-gcc |  3 ---
>>>>      examples/ip_pipeline/cpu_core_map.c         | 12 +-----------
>>>>      examples/ip_pipeline/init.c                 |  4 ++++
>>>>      3 files changed, 5 insertions(+), 14 deletions(-)
>>>>
>>>> diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> index dede34f..a084672 100644
>>>> --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
>>>> @@ -58,6 +58,3 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
>>>>
>>>>      # This following libraries are not available on Power. So
>>>> they're turned off.
>>>>      CONFIG_RTE_LIBRTE_SCHED=n
>>>> -CONFIG_RTE_LIBRTE_PORT=n
>>>> -CONFIG_RTE_LIBRTE_TABLE=n
>>>> -CONFIG_RTE_LIBRTE_PIPELINE=n
>>>> diff --git a/examples/ip_pipeline/cpu_core_map.c
>>>> b/examples/ip_pipeline/cpu_core_map.c
>>>> index cb088b1..482e68e 100644
>>>> --- a/examples/ip_pipeline/cpu_core_map.c
>>>> +++ b/examples/ip_pipeline/cpu_core_map.c
>>>> @@ -351,9 +351,6 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>>      			int lcore_socket_id =
>>>>      				cpu_core_map_get_socket_id_linux(lcore_id);
>>>>
>>>> -			if (lcore_socket_id < 0)
>>>> -				return -1;
>>>> -
>>>>      			if (((uint32_t) lcore_socket_id) == socket_id)
>>>>      				n_detected++;
>>>>      		}
>>>> @@ -368,18 +365,11 @@ cpu_core_map_compute_linux(struct cpu_core_map *map)
>>>>      					cpu_core_map_get_socket_id_linux(
>>>>      					lcore_id);
>>>>
>>>> -				if (lcore_socket_id < 0)
>>>> -					return -1;
>>>> -
>>>> Why to remove the lcore_socket_id check?
>>>>
>>>>      				int lcore_core_id =
>>>>      					cpu_core_map_get_core_id_linux(
>>>>      						lcore_id);
>>>>
>>>> -				if (lcore_core_id < 0)
>>>> -					return -1;
>>>> -
>>>> -				if (((uint32_t) lcore_socket_id ==
>>>> socket_id) &&
>>>> -					((uint32_t) lcore_core_id ==
>>>> core_id)) {
>>>> +				if ((uint32_t) lcore_socket_id == socket_id)
>>>> {
>>>>      					uint32_t pos = cpu_core_map_pos(map,
>>>>      						socket_id,
>>>>      						core_id_contig,
>>>> diff --git a/examples/ip_pipeline/init.c
>>>> b/examples/ip_pipeline/init.c index cd167f6..60c931f 100644
>>>> --- a/examples/ip_pipeline/init.c
>>>> +++ b/examples/ip_pipeline/init.c
>>>> @@ -61,7 +61,11 @@ static void
>>>>      app_init_core_map(struct app_params *app)  {
>>>>      	APP_LOG(app, HIGH, "Initializing CPU core map ...");
>>>> +#if defined(RTE_ARCH_PPC_64)
>>>> +	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else
>>>>
>>>> This value seems quite strange. Can you give more detail?
>>>>
>>>>      	app->core_map = cpu_core_map_init(4, 32, 4, 0);
>>>> +#endif
>>>>
>>>>      	if (app->core_map == NULL)
>>>>      		rte_panic("Cannot create CPU core map\n");
>>>> --
>>>> 1.9.1
>>>>
>>>>
>>>>
>>>
>>
>
>
  

Patch

diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc
b/config/defconfig_ppc_64-power8-linuxapp-gcc
index dede34f..a084672 100644
--- a/config/defconfig_ppc_64-power8-linuxapp-gcc
+++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
@@ -58,6 +58,3 @@  CONFIG_RTE_LIBRTE_FM10K_PMD=n

 # This following libraries are not available on Power. So they're turned
off.
 CONFIG_RTE_LIBRTE_SCHED=n
-CONFIG_RTE_LIBRTE_PORT=n
-CONFIG_RTE_LIBRTE_TABLE=n
-CONFIG_RTE_LIBRTE_PIPELINE=n
diff --git a/examples/ip_pipeline/cpu_core_map.c
b/examples/ip_pipeline/cpu_core_map.c
index cb088b1..482e68e 100644
--- a/examples/ip_pipeline/cpu_core_map.c
+++ b/examples/ip_pipeline/cpu_core_map.c
@@ -351,9 +351,6 @@  cpu_core_map_compute_linux(struct cpu_core_map *map)
 			int lcore_socket_id =
 				cpu_core_map_get_socket_id_linux(lcore_id);

-			if (lcore_socket_id < 0)
-				return -1;
-
 			if (((uint32_t) lcore_socket_id) == socket_id)
 				n_detected++;
 		}
@@ -368,18 +365,11 @@  cpu_core_map_compute_linux(struct cpu_core_map *map)
 					cpu_core_map_get_socket_id_linux(
 					lcore_id);

-				if (lcore_socket_id < 0)
-					return -1;
-
Why to remove the lcore_socket_id check?

 				int lcore_core_id =
 					cpu_core_map_get_core_id_linux(
 						lcore_id);

-				if (lcore_core_id < 0)
-					return -1;
-
-				if (((uint32_t) lcore_socket_id ==
socket_id) &&
-					((uint32_t) lcore_core_id ==
core_id)) {
+				if ((uint32_t) lcore_socket_id == socket_id)
{
 					uint32_t pos = cpu_core_map_pos(map,
 						socket_id,
 						core_id_contig,
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c index
cd167f6..60c931f 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -61,7 +61,11 @@  static void
 app_init_core_map(struct app_params *app)  {
 	APP_LOG(app, HIGH, "Initializing CPU core map ...");
+#if defined(RTE_ARCH_PPC_64)
+	app->core_map = cpu_core_map_init(2, 5, 1, 0); #else

This value seems quite strange. Can you give more detail?

 	app->core_map = cpu_core_map_init(4, 32, 4, 0);
+#endif

 	if (app->core_map == NULL)