[v11,4/8] fib: introduce AVX512 lookup
diff mbox series

Message ID 0fdfe93a2fc1af144efc962ef87a8984df383eb0.1602862172.git.vladimir.medvedkin@intel.com
State Superseded
Delegated to: David Marchand
Headers show
Series
  • fib: implement AVX512 vector lookup
Related show

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Medvedkin, Vladimir Oct. 16, 2020, 3:42 p.m. UTC
Add new lookup implementation for DIR24_8 algorithm using
AVX512 instruction set

Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 doc/guides/rel_notes/release_20_11.rst |   3 +
 lib/librte_fib/dir24_8.c               |  38 ++++++++
 lib/librte_fib/dir24_8_avx512.c        | 165 +++++++++++++++++++++++++++++++++
 lib/librte_fib/dir24_8_avx512.h        |  24 +++++
 lib/librte_fib/meson.build             |  34 +++++++
 lib/librte_fib/rte_fib.c               |   2 +-
 lib/librte_fib/rte_fib.h               |   6 +-
 7 files changed, 270 insertions(+), 2 deletions(-)
 create mode 100644 lib/librte_fib/dir24_8_avx512.c
 create mode 100644 lib/librte_fib/dir24_8_avx512.h

Patch
diff mbox series

diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index 57e3edc..8c2a89f 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -148,6 +148,9 @@  New Features
   * Extern objects and functions can be plugged into the pipeline.
   * Transaction-oriented table updates.
 
+* **Added AVX512 lookup implementation for FIB.**
+
+  Added a AVX512 lookup functions implementation into FIB library.
 
 Removed Items
 -------------
diff --git a/lib/librte_fib/dir24_8.c b/lib/librte_fib/dir24_8.c
index b5f2363..b96d810 100644
--- a/lib/librte_fib/dir24_8.c
+++ b/lib/librte_fib/dir24_8.c
@@ -18,6 +18,12 @@ 
 #include <rte_fib.h>
 #include "dir24_8.h"
 
+#ifdef CC_DIR24_8_AVX512_SUPPORT
+
+#include "dir24_8_avx512.h"
+
+#endif /* CC_DIR24_8_AVX512_SUPPORT */
+
 #define DIR24_8_NAMESIZE	64
 
 #define ROUNDUP(x, y)	 RTE_ALIGN_CEIL(x, (1 << (32 - y)))
@@ -56,11 +62,38 @@  get_scalar_fn_inlined(enum rte_fib_dir24_8_nh_sz nh_sz)
 	}
 }
 
+static inline rte_fib_lookup_fn_t
+get_vector_fn(enum rte_fib_dir24_8_nh_sz nh_sz)
+{
+#ifdef CC_DIR24_8_AVX512_SUPPORT
+	if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) ||
+			(rte_get_max_simd_bitwidth() < RTE_SIMD_512))
+		return NULL;
+
+	switch (nh_sz) {
+	case RTE_FIB_DIR24_8_1B:
+		return rte_dir24_8_vec_lookup_bulk_1b;
+	case RTE_FIB_DIR24_8_2B:
+		return rte_dir24_8_vec_lookup_bulk_2b;
+	case RTE_FIB_DIR24_8_4B:
+		return rte_dir24_8_vec_lookup_bulk_4b;
+	case RTE_FIB_DIR24_8_8B:
+		return rte_dir24_8_vec_lookup_bulk_8b;
+	default:
+		return NULL;
+	}
+#else
+	RTE_SET_USED(nh_sz);
+#endif
+	return NULL;
+}
+
 rte_fib_lookup_fn_t
 dir24_8_get_lookup_fn(void *p, enum rte_fib_dir24_8_lookup_type type)
 {
 	enum rte_fib_dir24_8_nh_sz nh_sz;
 	struct dir24_8_tbl *dp = p;
+	rte_fib_lookup_fn_t ret_fn = NULL;
 
 	if (dp == NULL)
 		return NULL;
@@ -74,6 +107,11 @@  dir24_8_get_lookup_fn(void *p, enum rte_fib_dir24_8_lookup_type type)
 		return get_scalar_fn_inlined(nh_sz);
 	case RTE_FIB_DIR24_8_SCALAR_UNI:
 		return dir24_8_lookup_bulk_uni;
+	case RTE_FIB_DIR24_8_VECTOR_AVX512:
+		return get_vector_fn(nh_sz);
+	case RTE_FIB_DIR24_8_ANY:
+		ret_fn = get_vector_fn(nh_sz);
+		return (ret_fn) ? ret_fn : get_scalar_fn(nh_sz);
 	default:
 		return NULL;
 	}
diff --git a/lib/librte_fib/dir24_8_avx512.c b/lib/librte_fib/dir24_8_avx512.c
new file mode 100644
index 0000000..43dba28
--- /dev/null
+++ b/lib/librte_fib/dir24_8_avx512.c
@@ -0,0 +1,165 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_vect.h>
+#include <rte_fib.h>
+
+#include "dir24_8.h"
+#include "dir24_8_avx512.h"
+
+static __rte_always_inline void
+dir24_8_vec_lookup_x16(void *p, const uint32_t *ips,
+	uint64_t *next_hops, int size)
+{
+	struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;
+	__mmask16 msk_ext;
+	__mmask16 exp_msk = 0x5555;
+	__m512i ip_vec, idxes, res, bytes;
+	const __m512i zero = _mm512_set1_epi32(0);
+	const __m512i lsb = _mm512_set1_epi32(1);
+	const __m512i lsbyte_msk = _mm512_set1_epi32(0xff);
+	__m512i tmp1, tmp2, res_msk;
+	__m256i tmp256;
+	/* used to mask gather values if size is 1/2 (8/16 bit next hops) */
+	if (size == sizeof(uint8_t))
+		res_msk = _mm512_set1_epi32(UINT8_MAX);
+	else if (size == sizeof(uint16_t))
+		res_msk = _mm512_set1_epi32(UINT16_MAX);
+
+	ip_vec = _mm512_loadu_si512(ips);
+	/* mask 24 most significant bits */
+	idxes = _mm512_srli_epi32(ip_vec, 8);
+
+	/**
+	 * lookup in tbl24
+	 * Put it inside branch to make compiler happy with -O0
+	 */
+	if (size == sizeof(uint8_t)) {
+		res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 1);
+		res = _mm512_and_epi32(res, res_msk);
+	} else if (size == sizeof(uint16_t)) {
+		res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 2);
+		res = _mm512_and_epi32(res, res_msk);
+	} else
+		res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 4);
+
+	/* get extended entries indexes */
+	msk_ext = _mm512_test_epi32_mask(res, lsb);
+
+	if (msk_ext != 0) {
+		idxes = _mm512_srli_epi32(res, 1);
+		idxes = _mm512_slli_epi32(idxes, 8);
+		bytes = _mm512_and_epi32(ip_vec, lsbyte_msk);
+		idxes = _mm512_maskz_add_epi32(msk_ext, idxes, bytes);
+		if (size == sizeof(uint8_t)) {
+			idxes = _mm512_mask_i32gather_epi32(zero, msk_ext,
+				idxes, (const int *)dp->tbl8, 1);
+			idxes = _mm512_and_epi32(idxes, res_msk);
+		} else if (size == sizeof(uint16_t)) {
+			idxes = _mm512_mask_i32gather_epi32(zero, msk_ext,
+				idxes, (const int *)dp->tbl8, 2);
+			idxes = _mm512_and_epi32(idxes, res_msk);
+		} else
+			idxes = _mm512_mask_i32gather_epi32(zero, msk_ext,
+				idxes, (const int *)dp->tbl8, 4);
+
+		res = _mm512_mask_blend_epi32(msk_ext, res, idxes);
+	}
+
+	res = _mm512_srli_epi32(res, 1);
+	tmp1 = _mm512_maskz_expand_epi32(exp_msk, res);
+	tmp256 = _mm512_extracti32x8_epi32(res, 1);
+	tmp2 = _mm512_maskz_expand_epi32(exp_msk,
+		_mm512_castsi256_si512(tmp256));
+	_mm512_storeu_si512(next_hops, tmp1);
+	_mm512_storeu_si512(next_hops + 8, tmp2);
+}
+
+static __rte_always_inline void
+dir24_8_vec_lookup_x8_8b(void *p, const uint32_t *ips,
+	uint64_t *next_hops)
+{
+	struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;
+	const __m512i zero = _mm512_set1_epi32(0);
+	const __m512i lsbyte_msk = _mm512_set1_epi64(0xff);
+	const __m512i lsb = _mm512_set1_epi64(1);
+	__m512i res, idxes, bytes;
+	__m256i idxes_256, ip_vec;
+	__mmask8 msk_ext;
+
+	ip_vec = _mm256_loadu_si256((const void *)ips);
+	/* mask 24 most significant bits */
+	idxes_256 = _mm256_srli_epi32(ip_vec, 8);
+
+	/* lookup in tbl24 */
+	res = _mm512_i32gather_epi64(idxes_256, (const void *)dp->tbl24, 8);
+
+	/* get extended entries indexes */
+	msk_ext = _mm512_test_epi64_mask(res, lsb);
+
+	if (msk_ext != 0) {
+		bytes = _mm512_cvtepi32_epi64(ip_vec);
+		idxes = _mm512_srli_epi64(res, 1);
+		idxes = _mm512_slli_epi64(idxes, 8);
+		bytes = _mm512_and_epi64(bytes, lsbyte_msk);
+		idxes = _mm512_maskz_add_epi64(msk_ext, idxes, bytes);
+		idxes = _mm512_mask_i64gather_epi64(zero, msk_ext, idxes,
+			(const void *)dp->tbl8, 8);
+
+		res = _mm512_mask_blend_epi64(msk_ext, res, idxes);
+	}
+
+	res = _mm512_srli_epi64(res, 1);
+	_mm512_storeu_si512(next_hops, res);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_1b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n)
+{
+	uint32_t i;
+	for (i = 0; i < (n / 16); i++)
+		dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16,
+			sizeof(uint8_t));
+
+	dir24_8_lookup_bulk_1b(p, ips + i * 16, next_hops + i * 16,
+		n - i * 16);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_2b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n)
+{
+	uint32_t i;
+	for (i = 0; i < (n / 16); i++)
+		dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16,
+			sizeof(uint16_t));
+
+	dir24_8_lookup_bulk_2b(p, ips + i * 16, next_hops + i * 16,
+		n - i * 16);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_4b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n)
+{
+	uint32_t i;
+	for (i = 0; i < (n / 16); i++)
+		dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16,
+			sizeof(uint32_t));
+
+	dir24_8_lookup_bulk_4b(p, ips + i * 16, next_hops + i * 16,
+		n - i * 16);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_8b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n)
+{
+	uint32_t i;
+	for (i = 0; i < (n / 8); i++)
+		dir24_8_vec_lookup_x8_8b(p, ips + i * 8, next_hops + i * 8);
+
+	dir24_8_lookup_bulk_8b(p, ips + i * 8, next_hops + i * 8, n - i * 8);
+}
diff --git a/lib/librte_fib/dir24_8_avx512.h b/lib/librte_fib/dir24_8_avx512.h
new file mode 100644
index 0000000..1d3c2b9
--- /dev/null
+++ b/lib/librte_fib/dir24_8_avx512.h
@@ -0,0 +1,24 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _DIR248_AVX512_H_
+#define _DIR248_AVX512_H_
+
+void
+rte_dir24_8_vec_lookup_bulk_1b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n);
+
+void
+rte_dir24_8_vec_lookup_bulk_2b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n);
+
+void
+rte_dir24_8_vec_lookup_bulk_4b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n);
+
+void
+rte_dir24_8_vec_lookup_bulk_8b(void *p, const uint32_t *ips,
+	uint64_t *next_hops, const unsigned int n);
+
+#endif /* _DIR248_AVX512_H_ */
diff --git a/lib/librte_fib/meson.build b/lib/librte_fib/meson.build
index 771828f..0a8adef 100644
--- a/lib/librte_fib/meson.build
+++ b/lib/librte_fib/meson.build
@@ -5,3 +5,37 @@ 
 sources = files('rte_fib.c', 'rte_fib6.c', 'dir24_8.c', 'trie.c')
 headers = files('rte_fib.h', 'rte_fib6.h')
 deps += ['rib']
+
+# compile AVX512 version if:
+# we are building 64-bit binary AND binutils can generate proper code
+if dpdk_conf.has('RTE_ARCH_X86_64') and binutils_ok.returncode() == 0
+	# compile AVX512 version if either:
+	# a. we have AVX512F supported in minimum instruction set baseline
+	# b. it's not minimum instruction set, but supported by compiler
+	#
+	# in former case, just add avx512 C file to files list
+	# in latter case, compile c file to static lib, using correct
+	# compiler flags, and then have the .o file from static lib
+	# linked into main lib.
+
+	# check if all required flags already enabled (variant a).
+	acl_avx512_flags = ['__AVX512F__','__AVX512DQ__']
+	acl_avx512_on = true
+	foreach f:acl_avx512_flags
+		if cc.get_define(f, args: machine_args) == ''
+			acl_avx512_on = false
+		endif
+	endforeach
+
+	if acl_avx512_on == true
+		cflags += ['-DCC_DIR24_8_AVX512_SUPPORT']
+		sources += files('dir24_8_avx512.c')
+	elif cc.has_multi_arguments('-mavx512f', '-mavx512dq')
+		dir24_8_avx512_tmp = static_library('dir24_8_avx512_tmp',
+				'dir24_8_avx512.c',
+				dependencies: static_rte_eal,
+				c_args: cflags + ['-mavx512f', '-mavx512dq'])
+		objs += dir24_8_avx512_tmp.extract_objects('dir24_8_avx512.c')
+		cflags += '-DCC_DIR24_8_AVX512_SUPPORT'
+	endif
+endif
diff --git a/lib/librte_fib/rte_fib.c b/lib/librte_fib/rte_fib.c
index b9f6efb..1af2a5f 100644
--- a/lib/librte_fib/rte_fib.c
+++ b/lib/librte_fib/rte_fib.c
@@ -108,7 +108,7 @@  init_dataplane(struct rte_fib *fib, __rte_unused int socket_id,
 		if (fib->dp == NULL)
 			return -rte_errno;
 		fib->lookup = dir24_8_get_lookup_fn(fib->dp,
-			RTE_FIB_DIR24_8_SCALAR_MACRO);
+			RTE_FIB_DIR24_8_ANY);
 		fib->modify = dir24_8_modify;
 		return 0;
 	default:
diff --git a/lib/librte_fib/rte_fib.h b/lib/librte_fib/rte_fib.h
index 2097ee5..d4e5d91 100644
--- a/lib/librte_fib/rte_fib.h
+++ b/lib/librte_fib/rte_fib.h
@@ -67,10 +67,14 @@  enum rte_fib_dir24_8_lookup_type {
 	 * Lookup implementation using inlined functions
 	 * for different next hop sizes
 	 */
-	RTE_FIB_DIR24_8_SCALAR_UNI
+	RTE_FIB_DIR24_8_SCALAR_UNI,
 	/**<
 	 * Unified lookup function for all next hop sizes
 	 */
+	RTE_FIB_DIR24_8_VECTOR_AVX512,
+	/**< Vector implementation using AVX512 */
+	RTE_FIB_DIR24_8_ANY = UINT32_MAX
+	/**< Selects the best implementation based on the max simd bitwidth */
 };
 
 /** FIB configuration structure */