[v2] hash: fix thash gfni implementation

Message ID 20211112141719.232932-1-vladimir.medvedkin@intel.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series [v2] hash: fix thash gfni implementation |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS

Commit Message

Vladimir Medvedkin Nov. 12, 2021, 2:17 p.m. UTC
  1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
due to the lack of support by some compilers.
2. This patch checks if AVX512F is supported along with GFNI.
This is done if the code is built on a platform that supports GFNI,
but does not support AVX512.
3. Also this patch fixes compilation problems on 32bit arch due to
lack of support for _mm_extract_epi64() by implementing XOR folding
with _mm_extract_epi32() on 32-bit arch.

Fixes: 4fd8c4cb0de1 ("hash: add new Toeplitz hash implementation")
Cc: vladimir.medvedkin@intel.com

Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Lance Richardson <lance.richardson@broadcom.com>
Acked-by: Ji, Kai <kai.ji@intel.com>
---
 lib/hash/rte_thash_x86_gfni.h | 44 ++++++++++++++++++++---------------
 1 file changed, 25 insertions(+), 19 deletions(-)
  

Comments

David Marchand Nov. 16, 2021, 1:53 p.m. UTC | #1
On Fri, Nov 12, 2021 at 3:17 PM Vladimir Medvedkin
<vladimir.medvedkin@intel.com> wrote:
>
> 1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
> due to the lack of support by some compilers.

Ok, it was the initial report from Lance.

> 2. This patch checks if AVX512F is supported along with GFNI.
> This is done if the code is built on a platform that supports GFNI,
> but does not support AVX512.

Ok.

> 3. Also this patch fixes compilation problems on 32bit arch due to
> lack of support for _mm_extract_epi64() by implementing XOR folding
> with _mm_extract_epi32() on 32-bit arch.

This code is under a #if defined(__GFNI__) && defined(__AVX512F__).

Does such a 32 bits processor exist, that supports AVX512 and GFNI?


>
> Fixes: 4fd8c4cb0de1 ("hash: add new Toeplitz hash implementation")
> Cc: vladimir.medvedkin@intel.com
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> Acked-by: Lance Richardson <lance.richardson@broadcom.com>
> Acked-by: Ji, Kai <kai.ji@intel.com>
> ---
>  lib/hash/rte_thash_x86_gfni.h | 44 ++++++++++++++++++++---------------
>  1 file changed, 25 insertions(+), 19 deletions(-)
>
> diff --git a/lib/hash/rte_thash_x86_gfni.h b/lib/hash/rte_thash_x86_gfni.h
> index c2889c3734..987dec4988 100644
> --- a/lib/hash/rte_thash_x86_gfni.h
> +++ b/lib/hash/rte_thash_x86_gfni.h
> @@ -18,7 +18,7 @@
>  extern "C" {
>  #endif
>
> -#ifdef __GFNI__
> +#if defined(__GFNI__) && defined(__AVX512F__)

Please update #endif comments accordingly, or remove invalid/obsolete
comment about _GFNI_.


>  #define RTE_THASH_GFNI_DEFINED
>
>  #define RTE_THASH_FIRST_ITER_MSK       0x0f0f0f0f0f0e0c08
> @@ -33,7 +33,6 @@ __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
>  {
>         __m256i tmp_256_1, tmp_256_2;
>         __m128i tmp128_1, tmp128_2;
> -       uint64_t tmp_1, tmp_2;
>
>         tmp_256_1 = _mm512_castsi512_si256(xor_acc);
>         tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
> @@ -43,12 +42,24 @@ __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
>         tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
>         tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
>
> +#ifdef RTE_ARCH_X86_64
> +       uint64_t tmp_1, tmp_2;
>         tmp_1 = _mm_extract_epi64(tmp128_1, 0);
>         tmp_2 = _mm_extract_epi64(tmp128_1, 1);
>         tmp_1 ^= tmp_2;
>
>         *val_1 = (uint32_t)tmp_1;
>         *val_2 = (uint32_t)(tmp_1 >> 32);
> +#else
> +       uint32_t tmp_1, tmp_2;
> +       tmp_1 = _mm_extract_epi32(tmp128_1, 0);
> +       tmp_2 = _mm_extract_epi32(tmp128_1, 1);
> +       tmp_1 ^= _mm_extract_epi32(tmp128_1, 2);
> +       tmp_2 ^= _mm_extract_epi32(tmp128_1, 3);
> +
> +       *val_1 = tmp_1;
> +       *val_2 = tmp_2;
> +#endif
>  }
>
>  __rte_internal
> @@ -56,23 +67,18 @@ static inline __m512i
>  __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple,
>         const uint8_t *secondary_tuple, int len)
>  {
> -       __m512i permute_idx = _mm512_set_epi8(7, 6, 5, 4, 7, 6, 5, 4,
> -                                               6, 5, 4, 3, 6, 5, 4, 3,
> -                                               5, 4, 3, 2, 5, 4, 3, 2,
> -                                               4, 3, 2, 1, 4, 3, 2, 1,
> -                                               3, 2, 1, 0, 3, 2, 1, 0,
> -                                               2, 1, 0, -1, 2, 1, 0, -1,
> -                                               1, 0, -1, -2, 1, 0, -1, -2,
> -                                               0, -1, -2, -3, 0, -1, -2, -3);
> -
> -       const __m512i rewind_idx = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
> -                                               0, 0, 0, 0, 0, 0, 0, 0,
> -                                               0, 0, 0, 0, 0, 0, 0, 0,
> -                                               0, 0, 0, 0, 0, 0, 0, 0,
> -                                               0, 0, 0, 0, 0, 0, 0, 0,
> -                                               0, 0, 0, 59, 0, 0, 0, 59,
> -                                               0, 0, 59, 58, 0, 0, 59, 58,
> -                                               0, 59, 58, 57, 0, 59, 58, 57);
> +       __m512i permute_idx = _mm512_set_epi32(0x7060504, 0x7060504,

Nit: it is easier to read fully expanded 32 bits values, like
0x07060504 instead of 0x7060504
Etc...


> +                                               0x6050403, 0x6050403,
> +                                               0x5040302, 0x5040302,
> +                                               0x4030201, 0x4030201,
> +                                               0x3020100, 0x3020100,
> +                                               0x20100FF, 0x20100FF,
> +                                               0x100FFFE, 0x100FFFE,
> +                                               0xFFFEFD, 0xFFFEFD);
> +       const __m512i rewind_idx = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0,
> +                                                       0, 0, 0x3B, 0x3B,
> +                                                       0x3B3A, 0x3B3A,
> +                                                       0x3B3A39, 0x3B3A39);
>         const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
>         const __m512i shift_8 = _mm512_set1_epi8(8);
>         __m512i xor_acc = _mm512_setzero_si512();
> --
> 2.25.1
>
  
Conor Walsh Nov. 16, 2021, 2:08 p.m. UTC | #2
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday 16 November 2021 13:54
> To: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Cc: dev <dev@dpdk.org>; Thomas Monjalon <thomas@monjalon.net>;
> Ananyev, Konstantin <konstantin.ananyev@intel.com>; Lance Richardson
> <lance.richardson@broadcom.com>; Ji@dpdk.org; Ji, Kai <kai.ji@intel.com>;
> Wang, Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
> <sameh.gobriel@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>
> Subject: Re: [PATCH v2] hash: fix thash gfni implementation
> 
> On Fri, Nov 12, 2021 at 3:17 PM Vladimir Medvedkin
> <vladimir.medvedkin@intel.com> wrote:
> >
> > 1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
> > due to the lack of support by some compilers.
> 
> Ok, it was the initial report from Lance.
> 
> > 2. This patch checks if AVX512F is supported along with GFNI.
> > This is done if the code is built on a platform that supports GFNI,
> > but does not support AVX512.
> 
> Ok.
> 
> > 3. Also this patch fixes compilation problems on 32bit arch due to
> > lack of support for _mm_extract_epi64() by implementing XOR folding
> > with _mm_extract_epi32() on 32-bit arch.
> 
> This code is under a #if defined(__GFNI__) && defined(__AVX512F__).
> 
> Does such a 32 bits processor exist, that supports AVX512 and GFNI?


Hi David,

I ran into this issue and reported it to Vladimir.
Currently when you try to cross compile main for 32-bit (e.g. with test-meson-builds.sh) its broken because of this issue, this affects our ability to test.
So in that scenario it sees that GFNI and AVX-512 are on the system but cant build because its trying to cross compile 64-bit only functions for a 32-bit build.

Thanks,
Conor.


> 
> 
> >
> > Fixes: 4fd8c4cb0de1 ("hash: add new Toeplitz hash implementation")
> > Cc: vladimir.medvedkin@intel.com
> >
> > Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> > Acked-by: Lance Richardson <lance.richardson@broadcom.com>
> > Acked-by: Ji, Kai <kai.ji@intel.com>
> > ---
> >  lib/hash/rte_thash_x86_gfni.h | 44 ++++++++++++++++++++---------------
> >  1 file changed, 25 insertions(+), 19 deletions(-)
> >
> > diff --git a/lib/hash/rte_thash_x86_gfni.h b/lib/hash/rte_thash_x86_gfni.h
> > index c2889c3734..987dec4988 100644
> > --- a/lib/hash/rte_thash_x86_gfni.h
> > +++ b/lib/hash/rte_thash_x86_gfni.h
> > @@ -18,7 +18,7 @@
> >  extern "C" {
> >  #endif
> >
> > -#ifdef __GFNI__
> > +#if defined(__GFNI__) && defined(__AVX512F__)
> 
> Please update #endif comments accordingly, or remove invalid/obsolete
> comment about _GFNI_.
> 
> 
> >  #define RTE_THASH_GFNI_DEFINED
> >
> >  #define RTE_THASH_FIRST_ITER_MSK       0x0f0f0f0f0f0e0c08
> > @@ -33,7 +33,6 @@ __rte_thash_xor_reduce(__m512i xor_acc, uint32_t
> *val_1, uint32_t *val_2)
> >  {
> >         __m256i tmp_256_1, tmp_256_2;
> >         __m128i tmp128_1, tmp128_2;
> > -       uint64_t tmp_1, tmp_2;
> >
> >         tmp_256_1 = _mm512_castsi512_si256(xor_acc);
> >         tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
> > @@ -43,12 +42,24 @@ __rte_thash_xor_reduce(__m512i xor_acc,
> uint32_t *val_1, uint32_t *val_2)
> >         tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
> >         tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
> >
> > +#ifdef RTE_ARCH_X86_64
> > +       uint64_t tmp_1, tmp_2;
> >         tmp_1 = _mm_extract_epi64(tmp128_1, 0);
> >         tmp_2 = _mm_extract_epi64(tmp128_1, 1);
> >         tmp_1 ^= tmp_2;
> >
> >         *val_1 = (uint32_t)tmp_1;
> >         *val_2 = (uint32_t)(tmp_1 >> 32);
> > +#else
> > +       uint32_t tmp_1, tmp_2;
> > +       tmp_1 = _mm_extract_epi32(tmp128_1, 0);
> > +       tmp_2 = _mm_extract_epi32(tmp128_1, 1);
> > +       tmp_1 ^= _mm_extract_epi32(tmp128_1, 2);
> > +       tmp_2 ^= _mm_extract_epi32(tmp128_1, 3);
> > +
> > +       *val_1 = tmp_1;
> > +       *val_2 = tmp_2;
> > +#endif
> >  }
> >
> >  __rte_internal
> > @@ -56,23 +67,18 @@ static inline __m512i
> >  __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple,
> >         const uint8_t *secondary_tuple, int len)
> >  {
> > -       __m512i permute_idx = _mm512_set_epi8(7, 6, 5, 4, 7, 6, 5, 4,
> > -                                               6, 5, 4, 3, 6, 5, 4, 3,
> > -                                               5, 4, 3, 2, 5, 4, 3, 2,
> > -                                               4, 3, 2, 1, 4, 3, 2, 1,
> > -                                               3, 2, 1, 0, 3, 2, 1, 0,
> > -                                               2, 1, 0, -1, 2, 1, 0, -1,
> > -                                               1, 0, -1, -2, 1, 0, -1, -2,
> > -                                               0, -1, -2, -3, 0, -1, -2, -3);
> > -
> > -       const __m512i rewind_idx = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
> > -                                               0, 0, 0, 0, 0, 0, 0, 0,
> > -                                               0, 0, 0, 0, 0, 0, 0, 0,
> > -                                               0, 0, 0, 0, 0, 0, 0, 0,
> > -                                               0, 0, 0, 0, 0, 0, 0, 0,
> > -                                               0, 0, 0, 59, 0, 0, 0, 59,
> > -                                               0, 0, 59, 58, 0, 0, 59, 58,
> > -                                               0, 59, 58, 57, 0, 59, 58, 57);
> > +       __m512i permute_idx = _mm512_set_epi32(0x7060504, 0x7060504,
> 
> Nit: it is easier to read fully expanded 32 bits values, like
> 0x07060504 instead of 0x7060504
> Etc...
> 
> 
> > +                                               0x6050403, 0x6050403,
> > +                                               0x5040302, 0x5040302,
> > +                                               0x4030201, 0x4030201,
> > +                                               0x3020100, 0x3020100,
> > +                                               0x20100FF, 0x20100FF,
> > +                                               0x100FFFE, 0x100FFFE,
> > +                                               0xFFFEFD, 0xFFFEFD);
> > +       const __m512i rewind_idx = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0,
> > +                                                       0, 0, 0x3B, 0x3B,
> > +                                                       0x3B3A, 0x3B3A,
> > +                                                       0x3B3A39, 0x3B3A39);
> >         const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
> >         const __m512i shift_8 = _mm512_set1_epi8(8);
> >         __m512i xor_acc = _mm512_setzero_si512();
> > --
> > 2.25.1
> >
> 
> 
> --
> David Marchand
  
Bruce Richardson Nov. 16, 2021, 2:10 p.m. UTC | #3
On Tue, Nov 16, 2021 at 02:53:49PM +0100, David Marchand wrote:
> On Fri, Nov 12, 2021 at 3:17 PM Vladimir Medvedkin
> <vladimir.medvedkin@intel.com> wrote:
> >
> > 1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
> > due to the lack of support by some compilers.
> 
> Ok, it was the initial report from Lance.
> 
> > 2. This patch checks if AVX512F is supported along with GFNI.
> > This is done if the code is built on a platform that supports GFNI,
> > but does not support AVX512.
> 
> Ok.
> 
> > 3. Also this patch fixes compilation problems on 32bit arch due to
> > lack of support for _mm_extract_epi64() by implementing XOR folding
> > with _mm_extract_epi32() on 32-bit arch.
> 
> This code is under a #if defined(__GFNI__) && defined(__AVX512F__).
> 
> Does such a 32 bits processor exist, that supports AVX512 and GFNI?
> 
The processor would be 64-bit but the code could well be 32-bit, and even
then can still use these AVX512 instruction sets.

/Bruce
  
David Marchand Nov. 16, 2021, 2:17 p.m. UTC | #4
On Tue, Nov 16, 2021 at 3:10 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Tue, Nov 16, 2021 at 02:53:49PM +0100, David Marchand wrote:
> > On Fri, Nov 12, 2021 at 3:17 PM Vladimir Medvedkin
> > <vladimir.medvedkin@intel.com> wrote:
> > >
> > > 1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
> > > due to the lack of support by some compilers.
> >
> > Ok, it was the initial report from Lance.
> >
> > > 2. This patch checks if AVX512F is supported along with GFNI.
> > > This is done if the code is built on a platform that supports GFNI,
> > > but does not support AVX512.
> >
> > Ok.
> >
> > > 3. Also this patch fixes compilation problems on 32bit arch due to
> > > lack of support for _mm_extract_epi64() by implementing XOR folding
> > > with _mm_extract_epi32() on 32-bit arch.
> >
> > This code is under a #if defined(__GFNI__) && defined(__AVX512F__).
> >
> > Does such a 32 bits processor exist, that supports AVX512 and GFNI?
> >
> The processor would be 64-bit but the code could well be 32-bit, and even
> then can still use these AVX512 instruction sets.

Oh, thanks for the explanation.
  
Vladimir Medvedkin Nov. 16, 2021, 2:18 p.m. UTC | #5
Hi David,

On 16/11/2021 14:53, David Marchand wrote:
> On Fri, Nov 12, 2021 at 3:17 PM Vladimir Medvedkin
> <vladimir.medvedkin@intel.com> wrote:
>>
>> 1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
>> due to the lack of support by some compilers.
> 
> Ok, it was the initial report from Lance.
> 
>> 2. This patch checks if AVX512F is supported along with GFNI.
>> This is done if the code is built on a platform that supports GFNI,
>> but does not support AVX512.
> 
> Ok.
> 
>> 3. Also this patch fixes compilation problems on 32bit arch due to
>> lack of support for _mm_extract_epi64() by implementing XOR folding
>> with _mm_extract_epi32() on 32-bit arch.
> 
> This code is under a #if defined(__GFNI__) && defined(__AVX512F__).
> 
> Does such a 32 bits processor exist, that supports AVX512 and GFNI?
> 
> 

This breaks the 32 bit build.

>>
>> Fixes: 4fd8c4cb0de1 ("hash: add new Toeplitz hash implementation")
>> Cc: vladimir.medvedkin@intel.com
>>
>> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
>> Acked-by: Lance Richardson <lance.richardson@broadcom.com>
>> Acked-by: Ji, Kai <kai.ji@intel.com>
>> ---
>>   lib/hash/rte_thash_x86_gfni.h | 44 ++++++++++++++++++++---------------
>>   1 file changed, 25 insertions(+), 19 deletions(-)
>>
>> diff --git a/lib/hash/rte_thash_x86_gfni.h b/lib/hash/rte_thash_x86_gfni.h
>> index c2889c3734..987dec4988 100644
>> --- a/lib/hash/rte_thash_x86_gfni.h
>> +++ b/lib/hash/rte_thash_x86_gfni.h
>> @@ -18,7 +18,7 @@
>>   extern "C" {
>>   #endif
>>
>> -#ifdef __GFNI__
>> +#if defined(__GFNI__) && defined(__AVX512F__)
> 
> Please update #endif comments accordingly, or remove invalid/obsolete
> comment about _GFNI_.
> 

Sure, will do.

> 
>>   #define RTE_THASH_GFNI_DEFINED
>>
>>   #define RTE_THASH_FIRST_ITER_MSK       0x0f0f0f0f0f0e0c08
>> @@ -33,7 +33,6 @@ __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
>>   {
>>          __m256i tmp_256_1, tmp_256_2;
>>          __m128i tmp128_1, tmp128_2;
>> -       uint64_t tmp_1, tmp_2;
>>
>>          tmp_256_1 = _mm512_castsi512_si256(xor_acc);
>>          tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
>> @@ -43,12 +42,24 @@ __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
>>          tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
>>          tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
>>
>> +#ifdef RTE_ARCH_X86_64
>> +       uint64_t tmp_1, tmp_2;
>>          tmp_1 = _mm_extract_epi64(tmp128_1, 0);
>>          tmp_2 = _mm_extract_epi64(tmp128_1, 1);
>>          tmp_1 ^= tmp_2;
>>
>>          *val_1 = (uint32_t)tmp_1;
>>          *val_2 = (uint32_t)(tmp_1 >> 32);
>> +#else
>> +       uint32_t tmp_1, tmp_2;
>> +       tmp_1 = _mm_extract_epi32(tmp128_1, 0);
>> +       tmp_2 = _mm_extract_epi32(tmp128_1, 1);
>> +       tmp_1 ^= _mm_extract_epi32(tmp128_1, 2);
>> +       tmp_2 ^= _mm_extract_epi32(tmp128_1, 3);
>> +
>> +       *val_1 = tmp_1;
>> +       *val_2 = tmp_2;
>> +#endif
>>   }
>>
>>   __rte_internal
>> @@ -56,23 +67,18 @@ static inline __m512i
>>   __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple,
>>          const uint8_t *secondary_tuple, int len)
>>   {
>> -       __m512i permute_idx = _mm512_set_epi8(7, 6, 5, 4, 7, 6, 5, 4,
>> -                                               6, 5, 4, 3, 6, 5, 4, 3,
>> -                                               5, 4, 3, 2, 5, 4, 3, 2,
>> -                                               4, 3, 2, 1, 4, 3, 2, 1,
>> -                                               3, 2, 1, 0, 3, 2, 1, 0,
>> -                                               2, 1, 0, -1, 2, 1, 0, -1,
>> -                                               1, 0, -1, -2, 1, 0, -1, -2,
>> -                                               0, -1, -2, -3, 0, -1, -2, -3);
>> -
>> -       const __m512i rewind_idx = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
>> -                                               0, 0, 0, 0, 0, 0, 0, 0,
>> -                                               0, 0, 0, 0, 0, 0, 0, 0,
>> -                                               0, 0, 0, 0, 0, 0, 0, 0,
>> -                                               0, 0, 0, 0, 0, 0, 0, 0,
>> -                                               0, 0, 0, 59, 0, 0, 0, 59,
>> -                                               0, 0, 59, 58, 0, 0, 59, 58,
>> -                                               0, 59, 58, 57, 0, 59, 58, 57);
>> +       __m512i permute_idx = _mm512_set_epi32(0x7060504, 0x7060504,
> 
> Nit: it is easier to read fully expanded 32 bits values, like
> 0x07060504 instead of 0x7060504
> Etc...
> 

Will fix in v3.

> 
>> +                                               0x6050403, 0x6050403,
>> +                                               0x5040302, 0x5040302,
>> +                                               0x4030201, 0x4030201,
>> +                                               0x3020100, 0x3020100,
>> +                                               0x20100FF, 0x20100FF,
>> +                                               0x100FFFE, 0x100FFFE,
>> +                                               0xFFFEFD, 0xFFFEFD);
>> +       const __m512i rewind_idx = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0,
>> +                                                       0, 0, 0x3B, 0x3B,
>> +                                                       0x3B3A, 0x3B3A,
>> +                                                       0x3B3A39, 0x3B3A39);
>>          const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
>>          const __m512i shift_8 = _mm512_set1_epi8(8);
>>          __m512i xor_acc = _mm512_setzero_si512();
>> --
>> 2.25.1
>>
> 
>
  

Patch

diff --git a/lib/hash/rte_thash_x86_gfni.h b/lib/hash/rte_thash_x86_gfni.h
index c2889c3734..987dec4988 100644
--- a/lib/hash/rte_thash_x86_gfni.h
+++ b/lib/hash/rte_thash_x86_gfni.h
@@ -18,7 +18,7 @@ 
 extern "C" {
 #endif
 
-#ifdef __GFNI__
+#if defined(__GFNI__) && defined(__AVX512F__)
 #define RTE_THASH_GFNI_DEFINED
 
 #define RTE_THASH_FIRST_ITER_MSK	0x0f0f0f0f0f0e0c08
@@ -33,7 +33,6 @@  __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
 {
 	__m256i tmp_256_1, tmp_256_2;
 	__m128i tmp128_1, tmp128_2;
-	uint64_t tmp_1, tmp_2;
 
 	tmp_256_1 = _mm512_castsi512_si256(xor_acc);
 	tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
@@ -43,12 +42,24 @@  __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
 	tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
 	tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
 
+#ifdef RTE_ARCH_X86_64
+	uint64_t tmp_1, tmp_2;
 	tmp_1 = _mm_extract_epi64(tmp128_1, 0);
 	tmp_2 = _mm_extract_epi64(tmp128_1, 1);
 	tmp_1 ^= tmp_2;
 
 	*val_1 = (uint32_t)tmp_1;
 	*val_2 = (uint32_t)(tmp_1 >> 32);
+#else
+	uint32_t tmp_1, tmp_2;
+	tmp_1 = _mm_extract_epi32(tmp128_1, 0);
+	tmp_2 = _mm_extract_epi32(tmp128_1, 1);
+	tmp_1 ^= _mm_extract_epi32(tmp128_1, 2);
+	tmp_2 ^= _mm_extract_epi32(tmp128_1, 3);
+
+	*val_1 = tmp_1;
+	*val_2 = tmp_2;
+#endif
 }
 
 __rte_internal
@@ -56,23 +67,18 @@  static inline __m512i
 __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple,
 	const uint8_t *secondary_tuple, int len)
 {
-	__m512i permute_idx = _mm512_set_epi8(7, 6, 5, 4, 7, 6, 5, 4,
-						6, 5, 4, 3, 6, 5, 4, 3,
-						5, 4, 3, 2, 5, 4, 3, 2,
-						4, 3, 2, 1, 4, 3, 2, 1,
-						3, 2, 1, 0, 3, 2, 1, 0,
-						2, 1, 0, -1, 2, 1, 0, -1,
-						1, 0, -1, -2, 1, 0, -1, -2,
-						0, -1, -2, -3, 0, -1, -2, -3);
-
-	const __m512i rewind_idx = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 59, 0, 0, 0, 59,
-						0, 0, 59, 58, 0, 0, 59, 58,
-						0, 59, 58, 57, 0, 59, 58, 57);
+	__m512i permute_idx = _mm512_set_epi32(0x7060504, 0x7060504,
+						0x6050403, 0x6050403,
+						0x5040302, 0x5040302,
+						0x4030201, 0x4030201,
+						0x3020100, 0x3020100,
+						0x20100FF, 0x20100FF,
+						0x100FFFE, 0x100FFFE,
+						0xFFFEFD, 0xFFFEFD);
+	const __m512i rewind_idx = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0,
+							0, 0, 0x3B, 0x3B,
+							0x3B3A, 0x3B3A,
+							0x3B3A39, 0x3B3A39);
 	const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
 	const __m512i shift_8 = _mm512_set1_epi8(8);
 	__m512i xor_acc = _mm512_setzero_si512();