[v3] hash: fix thash gfni implementation

Message ID 20211116143330.305220-1-vladimir.medvedkin@intel.com (mailing list archive)
State Accepted, archived
Delegated to: David Marchand
Headers
Series [v3] hash: fix thash gfni implementation |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-intel-Performance fail Performance Testing issues
ci/iol-aarch64-compile-testing success Testing PASS

Commit Message

Vladimir Medvedkin Nov. 16, 2021, 2:33 p.m. UTC
  1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
due to the lack of support by some compilers.
2. This patch checks if AVX512F is supported along with GFNI.
This is done if the code is built on a platform that supports GFNI,
but does not support AVX512.
3. Also this patch fixes compilation problems on 32bit arch due to
lack of support for _mm_extract_epi64() by implementing XOR folding
with _mm_extract_epi32() on 32-bit arch.

Fixes: 4fd8c4cb0de1 ("hash: add new Toeplitz hash implementation")
Cc: vladimir.medvedkin@intel.com

Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Lance Richardson <lance.richardson@broadcom.com>
Acked-by: Ji Kai <kai.ji@intel.com>
---
 lib/hash/rte_thash_x86_gfni.h | 47 ++++++++++++++++++++---------------
 1 file changed, 27 insertions(+), 20 deletions(-)
  

Comments

David Marchand Nov. 17, 2021, 9:20 a.m. UTC | #1
On Tue, Nov 16, 2021 at 3:33 PM Vladimir Medvedkin
<vladimir.medvedkin@intel.com> wrote:
>
> 1. This patch replaces _mm512_set_epi8 with _mm512_set_epi32
> due to the lack of support by some compilers.
> 2. This patch checks if AVX512F is supported along with GFNI.
> This is done if the code is built on a platform that supports GFNI,
> but does not support AVX512.
> 3. Also this patch fixes compilation problems on 32bit arch due to
> lack of support for _mm_extract_epi64() by implementing XOR folding
> with _mm_extract_epi32() on 32-bit arch.
>
> Fixes: 4fd8c4cb0de1 ("hash: add new Toeplitz hash implementation")
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> Acked-by: Lance Richardson <lance.richardson@broadcom.com>
> Acked-by: Ji Kai <kai.ji@intel.com>

Afaics in git history, that's "Kai Ji".

I updated indentation and applied, thanks.
  

Patch

diff --git a/lib/hash/rte_thash_x86_gfni.h b/lib/hash/rte_thash_x86_gfni.h
index c2889c3734..8e3da51ecd 100644
--- a/lib/hash/rte_thash_x86_gfni.h
+++ b/lib/hash/rte_thash_x86_gfni.h
@@ -18,7 +18,7 @@ 
 extern "C" {
 #endif
 
-#ifdef __GFNI__
+#if defined(__GFNI__) && defined(__AVX512F__)
 #define RTE_THASH_GFNI_DEFINED
 
 #define RTE_THASH_FIRST_ITER_MSK	0x0f0f0f0f0f0e0c08
@@ -33,7 +33,6 @@  __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
 {
 	__m256i tmp_256_1, tmp_256_2;
 	__m128i tmp128_1, tmp128_2;
-	uint64_t tmp_1, tmp_2;
 
 	tmp_256_1 = _mm512_castsi512_si256(xor_acc);
 	tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
@@ -43,12 +42,24 @@  __rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
 	tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
 	tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
 
+#ifdef RTE_ARCH_X86_64
+	uint64_t tmp_1, tmp_2;
 	tmp_1 = _mm_extract_epi64(tmp128_1, 0);
 	tmp_2 = _mm_extract_epi64(tmp128_1, 1);
 	tmp_1 ^= tmp_2;
 
 	*val_1 = (uint32_t)tmp_1;
 	*val_2 = (uint32_t)(tmp_1 >> 32);
+#else
+	uint32_t tmp_1, tmp_2;
+	tmp_1 = _mm_extract_epi32(tmp128_1, 0);
+	tmp_2 = _mm_extract_epi32(tmp128_1, 1);
+	tmp_1 ^= _mm_extract_epi32(tmp128_1, 2);
+	tmp_2 ^= _mm_extract_epi32(tmp128_1, 3);
+
+	*val_1 = tmp_1;
+	*val_2 = tmp_2;
+#endif
 }
 
 __rte_internal
@@ -56,23 +67,19 @@  static inline __m512i
 __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple,
 	const uint8_t *secondary_tuple, int len)
 {
-	__m512i permute_idx = _mm512_set_epi8(7, 6, 5, 4, 7, 6, 5, 4,
-						6, 5, 4, 3, 6, 5, 4, 3,
-						5, 4, 3, 2, 5, 4, 3, 2,
-						4, 3, 2, 1, 4, 3, 2, 1,
-						3, 2, 1, 0, 3, 2, 1, 0,
-						2, 1, 0, -1, 2, 1, 0, -1,
-						1, 0, -1, -2, 1, 0, -1, -2,
-						0, -1, -2, -3, 0, -1, -2, -3);
-
-	const __m512i rewind_idx = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 0, 0, 0, 0, 0,
-						0, 0, 0, 59, 0, 0, 0, 59,
-						0, 0, 59, 58, 0, 0, 59, 58,
-						0, 59, 58, 57, 0, 59, 58, 57);
+	__m512i permute_idx = _mm512_set_epi32(0x07060504, 0x07060504,
+						0x06050403, 0x06050403,
+						0x05040302, 0x05040302,
+						0x04030201, 0x04030201,
+						0x03020100, 0x03020100,
+						0x020100FF, 0x020100FF,
+						0x0100FFFE, 0x0100FFFE,
+						0x00FFFEFD, 0x00FFFEFD);
+	const __m512i rewind_idx = _mm512_set_epi32(0, 0, 0, 0, 0, 0, 0, 0,
+						0, 0,
+						0x0000003B, 0x0000003B,
+						0x00003B3A, 0x00003B3A,
+						0x003B3A39, 0x003B3A39);
 	const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
 	const __m512i shift_8 = _mm512_set1_epi8(8);
 	__m512i xor_acc = _mm512_setzero_si512();
@@ -214,7 +221,7 @@  rte_thash_gfni_bulk(const uint64_t *mtrx, int len, uint8_t *tuple[],
 	}
 }
 
-#endif /* _GFNI_ */
+#endif /* __GFNI__ && __AVX512F__ */
 
 #ifdef __cplusplus
 }