[dpdk-dev,1/3] eal/x86: run-time dispatch over memcpy
Checks
Commit Message
This patch dynamically selects functions of memcpy at run-time based
on CPU flags that current machine supports. This patch uses function
pointers which are bind to the relative functions at constrctor time.
To make AVX512 instructions pass compilation, enable the switch in
makefile.
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
---
.../common/include/arch/x86/rte_memcpy.h | 305 ++++++++++++---------
mk/machine/native/rte.vars.mk | 2 +
2 files changed, 181 insertions(+), 126 deletions(-)
Comments
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Xiaoyun Li
> Sent: Friday, August 25, 2017 3:06 AM
> To: Richardson, Bruce <bruce.richardson@intel.com>
> Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>
> Subject: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
>
> This patch dynamically selects functions of memcpy at run-time based
> on CPU flags that current machine supports. This patch uses function
> pointers which are bind to the relative functions at constrctor time.
> To make AVX512 instructions pass compilation, enable the switch in
> makefile.
It seems quite an overhead to add extra function call for each 16B movement...
Wouldn't it be better to have one func_ptr per implementation, i.e:
rte_memcpy_sse(), rte_memcpy_avx2(), rte_memcpy_avx512(), etc.?
Konstantin
>
> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> ---
> .../common/include/arch/x86/rte_memcpy.h | 305 ++++++++++++---------
> mk/machine/native/rte.vars.mk | 2 +
> 2 files changed, 181 insertions(+), 126 deletions(-)
>
> diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> index 74c280c..f68ebd2 100644
> --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> @@ -45,11 +45,37 @@
> #include <string.h>
> #include <rte_vect.h>
> #include <rte_common.h>
> +#include <rte_cpuflags.h>
> +#include <rte_log.h>
>
> #ifdef __cplusplus
> extern "C" {
> #endif
>
> +/*
> + * Select SSE/AVX memory copy method as default one.
> + */
> +
> +static uint16_t alignment_mask = 0x0F;
> +
> +typedef void (*rte_mov16_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov32_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov64_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov128_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov256_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov128blocks_t)(uint8_t *dst, const uint8_t *src, size_t n);
> +typedef void (*rte_mov512blocks_t)(uint8_t *dst, const uint8_t *src, size_t n);
> +typedef void * (*rte_memcpy_generic_t)(void *dst, const void *src, size_t n);
> +
> +static rte_mov16_t rte_mov16;
> +static rte_mov32_t rte_mov32;
> +static rte_mov64_t rte_mov64;
> +static rte_mov128_t rte_mov128;
> +static rte_mov256_t rte_mov256;
> +static rte_mov128blocks_t rte_mov128blocks;
> +static rte_mov512blocks_t rte_mov512blocks;
> +static rte_memcpy_generic_t rte_memcpy_generic;
> +
> /**
> * Copy bytes from one location to another. The locations must not overlap.
> *
> @@ -68,10 +94,6 @@ extern "C" {
> static __rte_always_inline void *
> rte_memcpy(void *dst, const void *src, size_t n);
>
> -#ifdef RTE_MACHINE_CPUFLAG_AVX512F
> -
> -#define ALIGNMENT_MASK 0x3F
> -
> /**
> * AVX512 implementation below
> */
> @@ -81,7 +103,7 @@ rte_memcpy(void *dst, const void *src, size_t n);
> * locations should not overlap.
> */
> static inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> +rte_mov16_AVX512F(uint8_t *dst, const uint8_t *src)
> {
> __m128i xmm0;
>
> @@ -94,7 +116,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> +rte_mov32_AVX512F(uint8_t *dst, const uint8_t *src)
> {
> __m256i ymm0;
>
> @@ -107,7 +129,7 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> +rte_mov64_AVX512F(uint8_t *dst, const uint8_t *src)
> {
> __m512i zmm0;
>
> @@ -120,10 +142,10 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> +rte_mov128_AVX512F(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov64(dst + 0 * 64, src + 0 * 64);
> - rte_mov64(dst + 1 * 64, src + 1 * 64);
> + (*rte_mov64)(dst + 0 * 64, src + 0 * 64);
> + (*rte_mov64)(dst + 1 * 64, src + 1 * 64);
> }
>
> /**
> @@ -131,12 +153,12 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov256(uint8_t *dst, const uint8_t *src)
> +rte_mov256_AVX512F(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov64(dst + 0 * 64, src + 0 * 64);
> - rte_mov64(dst + 1 * 64, src + 1 * 64);
> - rte_mov64(dst + 2 * 64, src + 2 * 64);
> - rte_mov64(dst + 3 * 64, src + 3 * 64);
> + (*rte_mov64)(dst + 0 * 64, src + 0 * 64);
> + (*rte_mov64)(dst + 1 * 64, src + 1 * 64);
> + (*rte_mov64)(dst + 2 * 64, src + 2 * 64);
> + (*rte_mov64)(dst + 3 * 64, src + 3 * 64);
> }
>
> /**
> @@ -144,7 +166,7 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
> +rte_mov128blocks_AVX512F(uint8_t *dst, const uint8_t *src, size_t n)
> {
> __m512i zmm0, zmm1;
>
> @@ -164,7 +186,7 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
> * locations should not overlap.
> */
> static inline void
> -rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
> +rte_mov512blocks_AVX512F(uint8_t *dst, const uint8_t *src, size_t n)
> {
> __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
>
> @@ -192,7 +214,7 @@ rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
> }
>
> static inline void *
> -rte_memcpy_generic(void *dst, const void *src, size_t n)
> +rte_memcpy_generic_AVX512F(void *dst, const void *src, size_t n)
> {
> uintptr_t dstu = (uintptr_t)dst;
> uintptr_t srcu = (uintptr_t)src;
> @@ -228,39 +250,39 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * Fast way when copy size doesn't exceed 512 bytes
> */
> if (n <= 32) {
> - rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst - 16 + n,
> + (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> (const uint8_t *)src - 16 + n);
> return ret;
> }
> if (n <= 64) {
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov32((uint8_t *)dst - 32 + n,
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov32)((uint8_t *)dst - 32 + n,
> (const uint8_t *)src - 32 + n);
> return ret;
> }
> if (n <= 512) {
> if (n >= 256) {
> n -= 256;
> - rte_mov256((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov256)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + 256;
> dst = (uint8_t *)dst + 256;
> }
> if (n >= 128) {
> n -= 128;
> - rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + 128;
> dst = (uint8_t *)dst + 128;
> }
> COPY_BLOCK_128_BACK63:
> if (n > 64) {
> - rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov64((uint8_t *)dst - 64 + n,
> + (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov64)((uint8_t *)dst - 64 + n,
> (const uint8_t *)src - 64 + n);
> return ret;
> }
> if (n > 0)
> - rte_mov64((uint8_t *)dst - 64 + n,
> + (*rte_mov64)((uint8_t *)dst - 64 + n,
> (const uint8_t *)src - 64 + n);
> return ret;
> }
> @@ -272,7 +294,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> if (dstofss > 0) {
> dstofss = 64 - dstofss;
> n -= dstofss;
> - rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + dstofss;
> dst = (uint8_t *)dst + dstofss;
> }
> @@ -282,7 +304,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * Use copy block function for better instruction order control,
> * which is important when load is unaligned.
> */
> - rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
> + (*rte_mov512blocks)((uint8_t *)dst, (const uint8_t *)src, n);
> bits = n;
> n = n & 511;
> bits -= n;
> @@ -295,7 +317,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * which is important when load is unaligned.
> */
> if (n >= 128) {
> - rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
> + (*rte_mov128blocks)((uint8_t *)dst, (const uint8_t *)src, n);
> bits = n;
> n = n & 127;
> bits -= n;
> @@ -309,10 +331,6 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> goto COPY_BLOCK_128_BACK63;
> }
>
> -#elif defined RTE_MACHINE_CPUFLAG_AVX2
> -
> -#define ALIGNMENT_MASK 0x1F
> -
> /**
> * AVX2 implementation below
> */
> @@ -322,7 +340,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * locations should not overlap.
> */
> static inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> +rte_mov16_AVX2(uint8_t *dst, const uint8_t *src)
> {
> __m128i xmm0;
>
> @@ -335,7 +353,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> +rte_mov32_AVX2(uint8_t *dst, const uint8_t *src)
> {
> __m256i ymm0;
>
> @@ -348,10 +366,10 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> +rte_mov64_AVX2(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> - rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> + (*rte_mov32)((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> + (*rte_mov32)((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> }
>
> /**
> @@ -359,12 +377,12 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> +rte_mov128_AVX2(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> - rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> - rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> - rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
> + (*rte_mov32)((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> + (*rte_mov32)((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> + (*rte_mov32)((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> + (*rte_mov32)((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
> }
>
> /**
> @@ -372,7 +390,7 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
> +rte_mov128blocks_AVX2(uint8_t *dst, const uint8_t *src, size_t n)
> {
> __m256i ymm0, ymm1, ymm2, ymm3;
>
> @@ -392,7 +410,7 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
> }
>
> static inline void *
> -rte_memcpy_generic(void *dst, const void *src, size_t n)
> +rte_memcpy_generic_AVX2(void *dst, const void *src, size_t n)
> {
> uintptr_t dstu = (uintptr_t)dst;
> uintptr_t srcu = (uintptr_t)src;
> @@ -429,46 +447,46 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * Fast way when copy size doesn't exceed 256 bytes
> */
> if (n <= 32) {
> - rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst - 16 + n,
> + (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> (const uint8_t *)src - 16 + n);
> return ret;
> }
> if (n <= 48) {
> - rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
> - rte_mov16((uint8_t *)dst - 16 + n,
> + (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst + 16, (const uint8_t *)src + 16);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> (const uint8_t *)src - 16 + n);
> return ret;
> }
> if (n <= 64) {
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov32((uint8_t *)dst - 32 + n,
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov32)((uint8_t *)dst - 32 + n,
> (const uint8_t *)src - 32 + n);
> return ret;
> }
> if (n <= 256) {
> if (n >= 128) {
> n -= 128;
> - rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + 128;
> dst = (uint8_t *)dst + 128;
> }
> COPY_BLOCK_128_BACK31:
> if (n >= 64) {
> n -= 64;
> - rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + 64;
> dst = (uint8_t *)dst + 64;
> }
> if (n > 32) {
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov32((uint8_t *)dst - 32 + n,
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov32)((uint8_t *)dst - 32 + n,
> (const uint8_t *)src - 32 + n);
> return ret;
> }
> if (n > 0) {
> - rte_mov32((uint8_t *)dst - 32 + n,
> + (*rte_mov32)((uint8_t *)dst - 32 + n,
> (const uint8_t *)src - 32 + n);
> }
> return ret;
> @@ -481,7 +499,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> if (dstofss > 0) {
> dstofss = 32 - dstofss;
> n -= dstofss;
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + dstofss;
> dst = (uint8_t *)dst + dstofss;
> }
> @@ -489,7 +507,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> /**
> * Copy 128-byte blocks
> */
> - rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
> + (*rte_mov128blocks)((uint8_t *)dst, (const uint8_t *)src, n);
> bits = n;
> n = n & 127;
> bits -= n;
> @@ -502,10 +520,6 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> goto COPY_BLOCK_128_BACK31;
> }
>
> -#else /* RTE_MACHINE_CPUFLAG */
> -
> -#define ALIGNMENT_MASK 0x0F
> -
> /**
> * SSE & AVX implementation below
> */
> @@ -515,7 +529,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * locations should not overlap.
> */
> static inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> +rte_mov16_DEFAULT(uint8_t *dst, const uint8_t *src)
> {
> __m128i xmm0;
>
> @@ -528,10 +542,10 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> +rte_mov32_DEFAULT(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> + (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> + (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> }
>
> /**
> @@ -539,12 +553,12 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> +rte_mov64_DEFAULT(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> - rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> - rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> + (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> + (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> + (*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> + (*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> }
>
> /**
> @@ -552,16 +566,16 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> +rte_mov128_DEFAULT(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> - rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> - rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> - rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> - rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> - rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> - rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> + (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> + (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> + (*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> + (*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> + (*rte_mov16)((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> + (*rte_mov16)((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> + (*rte_mov16)((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> + (*rte_mov16)((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> }
>
> /**
> @@ -569,24 +583,24 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
> * locations should not overlap.
> */
> static inline void
> -rte_mov256(uint8_t *dst, const uint8_t *src)
> +rte_mov256_DEFAULT(uint8_t *dst, const uint8_t *src)
> {
> - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> - rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> - rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> - rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> - rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> - rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> - rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> - rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
> - rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
> - rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
> - rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
> - rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
> - rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
> - rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
> - rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
> + (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> + (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> + (*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> + (*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> + (*rte_mov16)((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> + (*rte_mov16)((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> + (*rte_mov16)((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> + (*rte_mov16)((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> + (*rte_mov16)((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
> + (*rte_mov16)((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
> + (*rte_mov16)((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
> + (*rte_mov16)((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
> + (*rte_mov16)((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
> + (*rte_mov16)((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
> + (*rte_mov16)((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
> + (*rte_mov16)((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
> }
>
> /**
> @@ -684,7 +698,7 @@ __extension__ ({ \
> })
>
> static inline void *
> -rte_memcpy_generic(void *dst, const void *src, size_t n)
> +rte_memcpy_generic_DEFAULT(void *dst, const void *src, size_t n)
> {
> __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
> uintptr_t dstu = (uintptr_t)dst;
> @@ -722,19 +736,22 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * Fast way when copy size doesn't exceed 512 bytes
> */
> if (n <= 32) {
> - rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> + (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> + (const uint8_t *)src - 16 + n);
> return ret;
> }
> if (n <= 48) {
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> + (const uint8_t *)src - 16 + n);
> return ret;
> }
> if (n <= 64) {
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
> - rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst + 32, (const uint8_t *)src + 32);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> + (const uint8_t *)src - 16 + n);
> return ret;
> }
> if (n <= 128) {
> @@ -743,39 +760,42 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> if (n <= 512) {
> if (n >= 256) {
> n -= 256;
> - rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
> + (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov128)((uint8_t *)dst + 128,
> + (const uint8_t *)src + 128);
> src = (const uint8_t *)src + 256;
> dst = (uint8_t *)dst + 256;
> }
> COPY_BLOCK_255_BACK15:
> if (n >= 128) {
> n -= 128;
> - rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + 128;
> dst = (uint8_t *)dst + 128;
> }
> COPY_BLOCK_128_BACK15:
> if (n >= 64) {
> n -= 64;
> - rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + 64;
> dst = (uint8_t *)dst + 64;
> }
> COPY_BLOCK_64_BACK15:
> if (n >= 32) {
> n -= 32;
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + 32;
> dst = (uint8_t *)dst + 32;
> }
> if (n > 16) {
> - rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> + (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> + (const uint8_t *)src - 16 + n);
> return ret;
> }
> if (n > 0) {
> - rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> + (const uint8_t *)src - 16 + n);
> }
> return ret;
> }
> @@ -790,7 +810,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> if (dstofss > 0) {
> dstofss = 16 - dstofss + 16;
> n -= dstofss;
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> src = (const uint8_t *)src + dstofss;
> dst = (uint8_t *)dst + dstofss;
> }
> @@ -804,7 +824,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> * Copy 256-byte blocks
> */
> for (; n >= 256; n -= 256) {
> - rte_mov256((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov256)((uint8_t *)dst, (const uint8_t *)src);
> dst = (uint8_t *)dst + 256;
> src = (const uint8_t *)src + 256;
> }
> @@ -826,7 +846,40 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> goto COPY_BLOCK_64_BACK15;
> }
>
> -#endif /* RTE_MACHINE_CPUFLAG */
> +static void __attribute__((constructor))
> +rte_memcpy_init(void)
> +{
> + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
> + alignment_mask = 0x3F;
> + rte_mov16 = rte_mov16_AVX512F;
> + rte_mov32 = rte_mov32_AVX512F;
> + rte_mov64 = rte_mov64_AVX512F;
> + rte_mov128 = rte_mov128_AVX512F;
> + rte_mov256 = rte_mov256_AVX512F;
> + rte_mov128blocks = rte_mov128blocks_AVX512F;
> + rte_mov512blocks = rte_mov512blocks_AVX512F;
> + rte_memcpy_generic = rte_memcpy_generic_AVX512F;
> + RTE_LOG(INFO, EAL, "AVX512 implementation of memcpy() is using!\n");
> + } else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
> + alignment_mask = 0x1F;
> + rte_mov16 = rte_mov16_AVX2;
> + rte_mov32 = rte_mov32_AVX2;
> + rte_mov64 = rte_mov64_AVX2;
> + rte_mov128 = rte_mov128_AVX2;
> + rte_mov128blocks = rte_mov128blocks_AVX2;
> + rte_memcpy_generic = rte_memcpy_generic_AVX2;
> + RTE_LOG(INFO, EAL, "AVX2 implementation of memcpy() is using!\n");
> + } else {
> + alignment_mask = 0x0F;
> + rte_mov16 = rte_mov16_DEFAULT;
> + rte_mov32 = rte_mov32_DEFAULT;
> + rte_mov64 = rte_mov64_DEFAULT;
> + rte_mov128 = rte_mov128_DEFAULT;
> + rte_mov256 = rte_mov256_DEFAULT;
> + rte_memcpy_generic = rte_memcpy_generic_DEFAULT;
> + RTE_LOG(INFO, EAL, "Default SSE/AVX implementation of memcpy() is using!\n");
> + }
> +}
>
> static inline void *
> rte_memcpy_aligned(void *dst, const void *src, size_t n)
> @@ -858,8 +911,8 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
>
> /* Copy 16 <= size <= 32 bytes */
> if (n <= 32) {
> - rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov16((uint8_t *)dst - 16 + n,
> + (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov16)((uint8_t *)dst - 16 + n,
> (const uint8_t *)src - 16 + n);
>
> return ret;
> @@ -867,8 +920,8 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
>
> /* Copy 32 < size <= 64 bytes */
> if (n <= 64) {
> - rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> - rte_mov32((uint8_t *)dst - 32 + n,
> + (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov32)((uint8_t *)dst - 32 + n,
> (const uint8_t *)src - 32 + n);
>
> return ret;
> @@ -876,13 +929,13 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
>
> /* Copy 64 bytes blocks */
> for (; n >= 64; n -= 64) {
> - rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> + (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
> dst = (uint8_t *)dst + 64;
> src = (const uint8_t *)src + 64;
> }
>
> /* Copy whatever left */
> - rte_mov64((uint8_t *)dst - 64 + n,
> + (*rte_mov64)((uint8_t *)dst - 64 + n,
> (const uint8_t *)src - 64 + n);
>
> return ret;
> @@ -891,10 +944,10 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
> static inline void *
> rte_memcpy(void *dst, const void *src, size_t n)
> {
> - if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
> + if (!(((uintptr_t)dst | (uintptr_t)src) & alignment_mask))
> return rte_memcpy_aligned(dst, src, n);
> else
> - return rte_memcpy_generic(dst, src, n);
> + return (*rte_memcpy_generic)(dst, src, n);
> }
>
> #ifdef __cplusplus
> diff --git a/mk/machine/native/rte.vars.mk b/mk/machine/native/rte.vars.mk
> index f7d98d0..cdcf6c6 100644
> --- a/mk/machine/native/rte.vars.mk
> +++ b/mk/machine/native/rte.vars.mk
> @@ -65,3 +65,5 @@ SSE42_SUPPORT=$(shell $(CC) -march=native -dM -E - </dev/null | grep SSE4_2)
> ifeq ($(SSE42_SUPPORT),)
> MACHINE_CFLAGS = -march=corei7
> endif
> +
> +MACHINE_CFLAGS += -mavx512f
> --
> 2.7.4
On Wed, Aug 30, 2017 at 03:56:35PM +0100, Ananyev, Konstantin wrote:
>
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Xiaoyun Li
> > Sent: Friday, August 25, 2017 3:06 AM
> > To: Richardson, Bruce <bruce.richardson@intel.com>
> > Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>
> > Subject: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
> >
> > This patch dynamically selects functions of memcpy at run-time based
> > on CPU flags that current machine supports. This patch uses function
> > pointers which are bind to the relative functions at constrctor time.
> > To make AVX512 instructions pass compilation, enable the switch in
> > makefile.
>
> It seems quite an overhead to add extra function call for each 16B movement...
> Wouldn't it be better to have one func_ptr per implementation, i.e:
> rte_memcpy_sse(), rte_memcpy_avx2(), rte_memcpy_avx512(), etc.?
> Konstantin
>
+1 to this.
Also, how big of a benefit is there for this implementation over
standard libc memcpy (in a reasonably bleeding edge distro like e.g.
Fedora 26)?
/Bruce
On Fri, 25 Aug 2017 10:06:11 +0800
Xiaoyun Li <xiaoyun.li@intel.com> wrote:
> This patch dynamically selects functions of memcpy at run-time based
> on CPU flags that current machine supports. This patch uses function
> pointers which are bind to the relative functions at constrctor time.
> To make AVX512 instructions pass compilation, enable the switch in
> makefile.
>
> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Recent versions of GCC also have better ways to handle this.
Hi Bruce,
> -----Original Message-----
> From: Richardson, Bruce
> Sent: Thursday, August 31, 2017 1:52 AM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: Li, Xiaoyun <xiaoyun.li@intel.com>; dev@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Zhang,
> Qi Z <qi.z.zhang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
>
> On Wed, Aug 30, 2017 at 03:56:35PM +0100, Ananyev, Konstantin wrote:
> >
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Xiaoyun Li
> > > Sent: Friday, August 25, 2017 3:06 AM
> > > To: Richardson, Bruce <bruce.richardson@intel.com>
> > > Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wang,
> Zhihong
> > > <zhihong.wang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Li,
> > > Xiaoyun <xiaoyun.li@intel.com>
> > > Subject: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over
> > > memcpy
> > >
> > > This patch dynamically selects functions of memcpy at run-time based
> > > on CPU flags that current machine supports. This patch uses function
> > > pointers which are bind to the relative functions at constrctor time.
> > > To make AVX512 instructions pass compilation, enable the switch in
> > > makefile.
> >
> > It seems quite an overhead to add extra function call for each 16B
> movement...
> > Wouldn't it be better to have one func_ptr per implementation, i.e:
> > rte_memcpy_sse(), rte_memcpy_avx2(), rte_memcpy_avx512(), etc.?
> > Konstantin
> >
> +1 to this.
>
> Also, how big of a benefit is there for this implementation over standard libc
> memcpy (in a reasonably bleeding edge distro like e.g.
> Fedora 26)?
This patch is not an optimization. It only to make the code easier to use. So, the benefit is just the same as before.
I'm also curious about the benefit. Suppose it's better than standard libc. If not, maybe we should just use standard libc and this patch is not valuable.
+ Sergio, the maintainer of this module for more suggestion. Thanks.
>
> /Bruce
Hi Stephen,
> -----Original Message-----
> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Thursday, August 31, 2017 2:01 AM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>
> Cc: Richardson, Bruce <bruce.richardson@intel.com>; dev@dpdk.org; Lu,
> Wenzhuo <wenzhuo.lu@intel.com>; Wang, Zhihong
> <zhihong.wang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
>
> On Fri, 25 Aug 2017 10:06:11 +0800
> Xiaoyun Li <xiaoyun.li@intel.com> wrote:
>
> > This patch dynamically selects functions of memcpy at run-time based
> > on CPU flags that current machine supports. This patch uses function
> > pointers which are bind to the relative functions at constrctor time.
> > To make AVX512 instructions pass compilation, enable the switch in
> > makefile.
> >
> > Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
>
> Recent versions of GCC also have better ways to handle this.
I think the assumption of using the instructions is that we believe we can do better than the compiler. If it turns out not, maybe we need to change the instructions back to C. But it's another story.
I was referring to gcc multiversion
https://gcc.gnu.org/wiki/FunctionMultiVersioning
On Aug 30, 2017 6:24 PM, "Lu, Wenzhuo" <wenzhuo.lu@intel.com> wrote:
> Hi Stephen,
>
>
> > -----Original Message-----
> > From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> > Sent: Thursday, August 31, 2017 2:01 AM
> > To: Li, Xiaoyun <xiaoyun.li@intel.com>
> > Cc: Richardson, Bruce <bruce.richardson@intel.com>; dev@dpdk.org; Lu,
> > Wenzhuo <wenzhuo.lu@intel.com>; Wang, Zhihong
> > <zhihong.wang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> > Subject: Re: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over
> memcpy
> >
> > On Fri, 25 Aug 2017 10:06:11 +0800
> > Xiaoyun Li <xiaoyun.li@intel.com> wrote:
> >
> > > This patch dynamically selects functions of memcpy at run-time based
> > > on CPU flags that current machine supports. This patch uses function
> > > pointers which are bind to the relative functions at constrctor time.
> > > To make AVX512 instructions pass compilation, enable the switch in
> > > makefile.
> > >
> > > Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> >
> > Recent versions of GCC also have better ways to handle this.
> I think the assumption of using the instructions is that we believe we can
> do better than the compiler. If it turns out not, maybe we need to change
> the instructions back to C. But it's another story.
>
>
About gcc FMV, I tried it several days ago.
But the way that the same function name with different attributions only works in C++.
And then I tried GCC6 since it is said that GCC6 would support both C and C++.
But it doesn’t work.
However, if using different function names with attributions, it works.
And the function with attribution AVX512 means this function would be compiled via AVX512.
So I add attribution for each function and delete –mavx512 in makefile. But I haven’t sent the patch.
Because there are some compilation issues.
Before, only if both compiler and cpu support AVX512 and users want, the AVX512 codes would be compiled since the macro RTE_MACHINE_CPUFLAG_AVX512.
Now, we hope to compiler them all and choose one at runtime based on cpu.
But only above gcc4.9 and newest clang would support AVX512.
So I am thinking adding a macro switch in mk which will determine whether the compiler supports AVX512 and whether users hope to use 512. (don’t need cpu support because it will be determined at run-time)
Only if the compiler supports AVX512 and users hope to use 512, the 512 codes would be compiled.
Best Regards,
Xiaoyun Li
From: Stephen Hemminger [mailto:stephen@networkplumber.org]
Sent: Thursday, August 31, 2017 13:06
To: Lu, Wenzhuo <wenzhuo.lu@intel.com>
Cc: Wang, Zhihong <zhihong.wang@intel.com>; dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>; Richardson, Bruce <bruce.richardson@intel.com>
Subject: RE: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
I was referring to gcc multiversion
https://gcc.gnu.org/wiki/FunctionMultiVersioning
On Aug 30, 2017 6:24 PM, "Lu, Wenzhuo" <wenzhuo.lu@intel.com<mailto:wenzhuo.lu@intel.com>> wrote:
Hi Stephen,
> -----Original Message-----
> From: Stephen Hemminger [mailto:stephen@networkplumber.org<mailto:stephen@networkplumber.org>]
> Sent: Thursday, August 31, 2017 2:01 AM
> To: Li, Xiaoyun <xiaoyun.li@intel.com<mailto:xiaoyun.li@intel.com>>
> Cc: Richardson, Bruce <bruce.richardson@intel.com<mailto:bruce.richardson@intel.com>>; dev@dpdk.org<mailto:dev@dpdk.org>; Lu,
> Wenzhuo <wenzhuo.lu@intel.com<mailto:wenzhuo.lu@intel.com>>; Wang, Zhihong
> <zhihong.wang@intel.com<mailto:zhihong.wang@intel.com>>; Zhang, Qi Z <qi.z.zhang@intel.com<mailto:qi.z.zhang@intel.com>>
> Subject: Re: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
>
> On Fri, 25 Aug 2017 10:06:11 +0800
> Xiaoyun Li <xiaoyun.li@intel.com<mailto:xiaoyun.li@intel.com>> wrote:
>
> > This patch dynamically selects functions of memcpy at run-time based
> > on CPU flags that current machine supports. This patch uses function
> > pointers which are bind to the relative functions at constrctor time.
> > To make AVX512 instructions pass compilation, enable the switch in
> > makefile.
> >
> > Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com<mailto:xiaoyun.li@intel.com>>
>
> Recent versions of GCC also have better ways to handle this.
I think the assumption of using the instructions is that we believe we can do better than the compiler. If it turns out not, maybe we need to change the instructions back to C. But it's another story.
@@ -45,11 +45,37 @@
#include <string.h>
#include <rte_vect.h>
#include <rte_common.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
#ifdef __cplusplus
extern "C" {
#endif
+/*
+ * Select SSE/AVX memory copy method as default one.
+ */
+
+static uint16_t alignment_mask = 0x0F;
+
+typedef void (*rte_mov16_t)(uint8_t *dst, const uint8_t *src);
+typedef void (*rte_mov32_t)(uint8_t *dst, const uint8_t *src);
+typedef void (*rte_mov64_t)(uint8_t *dst, const uint8_t *src);
+typedef void (*rte_mov128_t)(uint8_t *dst, const uint8_t *src);
+typedef void (*rte_mov256_t)(uint8_t *dst, const uint8_t *src);
+typedef void (*rte_mov128blocks_t)(uint8_t *dst, const uint8_t *src, size_t n);
+typedef void (*rte_mov512blocks_t)(uint8_t *dst, const uint8_t *src, size_t n);
+typedef void * (*rte_memcpy_generic_t)(void *dst, const void *src, size_t n);
+
+static rte_mov16_t rte_mov16;
+static rte_mov32_t rte_mov32;
+static rte_mov64_t rte_mov64;
+static rte_mov128_t rte_mov128;
+static rte_mov256_t rte_mov256;
+static rte_mov128blocks_t rte_mov128blocks;
+static rte_mov512blocks_t rte_mov512blocks;
+static rte_memcpy_generic_t rte_memcpy_generic;
+
/**
* Copy bytes from one location to another. The locations must not overlap.
*
@@ -68,10 +94,6 @@ extern "C" {
static __rte_always_inline void *
rte_memcpy(void *dst, const void *src, size_t n);
-#ifdef RTE_MACHINE_CPUFLAG_AVX512F
-
-#define ALIGNMENT_MASK 0x3F
-
/**
* AVX512 implementation below
*/
@@ -81,7 +103,7 @@ rte_memcpy(void *dst, const void *src, size_t n);
* locations should not overlap.
*/
static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
+rte_mov16_AVX512F(uint8_t *dst, const uint8_t *src)
{
__m128i xmm0;
@@ -94,7 +116,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
+rte_mov32_AVX512F(uint8_t *dst, const uint8_t *src)
{
__m256i ymm0;
@@ -107,7 +129,7 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
+rte_mov64_AVX512F(uint8_t *dst, const uint8_t *src)
{
__m512i zmm0;
@@ -120,10 +142,10 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
+rte_mov128_AVX512F(uint8_t *dst, const uint8_t *src)
{
- rte_mov64(dst + 0 * 64, src + 0 * 64);
- rte_mov64(dst + 1 * 64, src + 1 * 64);
+ (*rte_mov64)(dst + 0 * 64, src + 0 * 64);
+ (*rte_mov64)(dst + 1 * 64, src + 1 * 64);
}
/**
@@ -131,12 +153,12 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
+rte_mov256_AVX512F(uint8_t *dst, const uint8_t *src)
{
- rte_mov64(dst + 0 * 64, src + 0 * 64);
- rte_mov64(dst + 1 * 64, src + 1 * 64);
- rte_mov64(dst + 2 * 64, src + 2 * 64);
- rte_mov64(dst + 3 * 64, src + 3 * 64);
+ (*rte_mov64)(dst + 0 * 64, src + 0 * 64);
+ (*rte_mov64)(dst + 1 * 64, src + 1 * 64);
+ (*rte_mov64)(dst + 2 * 64, src + 2 * 64);
+ (*rte_mov64)(dst + 3 * 64, src + 3 * 64);
}
/**
@@ -144,7 +166,7 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
+rte_mov128blocks_AVX512F(uint8_t *dst, const uint8_t *src, size_t n)
{
__m512i zmm0, zmm1;
@@ -164,7 +186,7 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
* locations should not overlap.
*/
static inline void
-rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
+rte_mov512blocks_AVX512F(uint8_t *dst, const uint8_t *src, size_t n)
{
__m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
@@ -192,7 +214,7 @@ rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
}
static inline void *
-rte_memcpy_generic(void *dst, const void *src, size_t n)
+rte_memcpy_generic_AVX512F(void *dst, const void *src, size_t n)
{
uintptr_t dstu = (uintptr_t)dst;
uintptr_t srcu = (uintptr_t)src;
@@ -228,39 +250,39 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* Fast way when copy size doesn't exceed 512 bytes
*/
if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n,
+ (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
(const uint8_t *)src - 16 + n);
return ret;
}
if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n,
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov32)((uint8_t *)dst - 32 + n,
(const uint8_t *)src - 32 + n);
return ret;
}
if (n <= 512) {
if (n >= 256) {
n -= 256;
- rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov256)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 256;
dst = (uint8_t *)dst + 256;
}
if (n >= 128) {
n -= 128;
- rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 128;
dst = (uint8_t *)dst + 128;
}
COPY_BLOCK_128_BACK63:
if (n > 64) {
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- rte_mov64((uint8_t *)dst - 64 + n,
+ (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov64)((uint8_t *)dst - 64 + n,
(const uint8_t *)src - 64 + n);
return ret;
}
if (n > 0)
- rte_mov64((uint8_t *)dst - 64 + n,
+ (*rte_mov64)((uint8_t *)dst - 64 + n,
(const uint8_t *)src - 64 + n);
return ret;
}
@@ -272,7 +294,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
if (dstofss > 0) {
dstofss = 64 - dstofss;
n -= dstofss;
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + dstofss;
dst = (uint8_t *)dst + dstofss;
}
@@ -282,7 +304,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* Use copy block function for better instruction order control,
* which is important when load is unaligned.
*/
- rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ (*rte_mov512blocks)((uint8_t *)dst, (const uint8_t *)src, n);
bits = n;
n = n & 511;
bits -= n;
@@ -295,7 +317,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* which is important when load is unaligned.
*/
if (n >= 128) {
- rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ (*rte_mov128blocks)((uint8_t *)dst, (const uint8_t *)src, n);
bits = n;
n = n & 127;
bits -= n;
@@ -309,10 +331,6 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
goto COPY_BLOCK_128_BACK63;
}
-#elif defined RTE_MACHINE_CPUFLAG_AVX2
-
-#define ALIGNMENT_MASK 0x1F
-
/**
* AVX2 implementation below
*/
@@ -322,7 +340,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* locations should not overlap.
*/
static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
+rte_mov16_AVX2(uint8_t *dst, const uint8_t *src)
{
__m128i xmm0;
@@ -335,7 +353,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
+rte_mov32_AVX2(uint8_t *dst, const uint8_t *src)
{
__m256i ymm0;
@@ -348,10 +366,10 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
+rte_mov64_AVX2(uint8_t *dst, const uint8_t *src)
{
- rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
- rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+ (*rte_mov32)((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ (*rte_mov32)((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
}
/**
@@ -359,12 +377,12 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
+rte_mov128_AVX2(uint8_t *dst, const uint8_t *src)
{
- rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
- rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
- rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
- rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+ (*rte_mov32)((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+ (*rte_mov32)((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+ (*rte_mov32)((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
+ (*rte_mov32)((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
}
/**
@@ -372,7 +390,7 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
+rte_mov128blocks_AVX2(uint8_t *dst, const uint8_t *src, size_t n)
{
__m256i ymm0, ymm1, ymm2, ymm3;
@@ -392,7 +410,7 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
}
static inline void *
-rte_memcpy_generic(void *dst, const void *src, size_t n)
+rte_memcpy_generic_AVX2(void *dst, const void *src, size_t n)
{
uintptr_t dstu = (uintptr_t)dst;
uintptr_t srcu = (uintptr_t)src;
@@ -429,46 +447,46 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* Fast way when copy size doesn't exceed 256 bytes
*/
if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n,
+ (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
(const uint8_t *)src - 16 + n);
return ret;
}
if (n <= 48) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
- rte_mov16((uint8_t *)dst - 16 + n,
+ (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst + 16, (const uint8_t *)src + 16);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
(const uint8_t *)src - 16 + n);
return ret;
}
if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n,
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov32)((uint8_t *)dst - 32 + n,
(const uint8_t *)src - 32 + n);
return ret;
}
if (n <= 256) {
if (n >= 128) {
n -= 128;
- rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 128;
dst = (uint8_t *)dst + 128;
}
COPY_BLOCK_128_BACK31:
if (n >= 64) {
n -= 64;
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 64;
dst = (uint8_t *)dst + 64;
}
if (n > 32) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n,
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov32)((uint8_t *)dst - 32 + n,
(const uint8_t *)src - 32 + n);
return ret;
}
if (n > 0) {
- rte_mov32((uint8_t *)dst - 32 + n,
+ (*rte_mov32)((uint8_t *)dst - 32 + n,
(const uint8_t *)src - 32 + n);
}
return ret;
@@ -481,7 +499,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
if (dstofss > 0) {
dstofss = 32 - dstofss;
n -= dstofss;
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + dstofss;
dst = (uint8_t *)dst + dstofss;
}
@@ -489,7 +507,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
/**
* Copy 128-byte blocks
*/
- rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ (*rte_mov128blocks)((uint8_t *)dst, (const uint8_t *)src, n);
bits = n;
n = n & 127;
bits -= n;
@@ -502,10 +520,6 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
goto COPY_BLOCK_128_BACK31;
}
-#else /* RTE_MACHINE_CPUFLAG */
-
-#define ALIGNMENT_MASK 0x0F
-
/**
* SSE & AVX implementation below
*/
@@ -515,7 +529,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* locations should not overlap.
*/
static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
+rte_mov16_DEFAULT(uint8_t *dst, const uint8_t *src)
{
__m128i xmm0;
@@ -528,10 +542,10 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
+rte_mov32_DEFAULT(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
}
/**
@@ -539,12 +553,12 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
+rte_mov64_DEFAULT(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
- rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
- rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ (*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ (*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
}
/**
@@ -552,16 +566,16 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
+rte_mov128_DEFAULT(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
- rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
- rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
- rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
- rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
- rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
- rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+ (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ (*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ (*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ (*rte_mov16)((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+ (*rte_mov16)((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+ (*rte_mov16)((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+ (*rte_mov16)((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
}
/**
@@ -569,24 +583,24 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
* locations should not overlap.
*/
static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
+rte_mov256_DEFAULT(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
- rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
- rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
- rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
- rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
- rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
- rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
- rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
- rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
- rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
- rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
- rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
- rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
- rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
- rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
+ (*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+ (*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ (*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+ (*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ (*rte_mov16)((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+ (*rte_mov16)((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+ (*rte_mov16)((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+ (*rte_mov16)((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+ (*rte_mov16)((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
+ (*rte_mov16)((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
+ (*rte_mov16)((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
+ (*rte_mov16)((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
+ (*rte_mov16)((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
+ (*rte_mov16)((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
+ (*rte_mov16)((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
+ (*rte_mov16)((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
}
/**
@@ -684,7 +698,7 @@ __extension__ ({ \
})
static inline void *
-rte_memcpy_generic(void *dst, const void *src, size_t n)
+rte_memcpy_generic_DEFAULT(void *dst, const void *src, size_t n)
{
__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
uintptr_t dstu = (uintptr_t)dst;
@@ -722,19 +736,22 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* Fast way when copy size doesn't exceed 512 bytes
*/
if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
return ret;
}
if (n <= 48) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
return ret;
}
if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst + 32, (const uint8_t *)src + 32);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
return ret;
}
if (n <= 128) {
@@ -743,39 +760,42 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
if (n <= 512) {
if (n >= 256) {
n -= 256;
- rte_mov128((uint8_t *)dst, (const uint8_t *)src);
- rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
+ (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov128)((uint8_t *)dst + 128,
+ (const uint8_t *)src + 128);
src = (const uint8_t *)src + 256;
dst = (uint8_t *)dst + 256;
}
COPY_BLOCK_255_BACK15:
if (n >= 128) {
n -= 128;
- rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 128;
dst = (uint8_t *)dst + 128;
}
COPY_BLOCK_128_BACK15:
if (n >= 64) {
n -= 64;
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 64;
dst = (uint8_t *)dst + 64;
}
COPY_BLOCK_64_BACK15:
if (n >= 32) {
n -= 32;
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 32;
dst = (uint8_t *)dst + 32;
}
if (n > 16) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
return ret;
}
if (n > 0) {
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
}
return ret;
}
@@ -790,7 +810,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
if (dstofss > 0) {
dstofss = 16 - dstofss + 16;
n -= dstofss;
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + dstofss;
dst = (uint8_t *)dst + dstofss;
}
@@ -804,7 +824,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
* Copy 256-byte blocks
*/
for (; n >= 256; n -= 256) {
- rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov256)((uint8_t *)dst, (const uint8_t *)src);
dst = (uint8_t *)dst + 256;
src = (const uint8_t *)src + 256;
}
@@ -826,7 +846,40 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
goto COPY_BLOCK_64_BACK15;
}
-#endif /* RTE_MACHINE_CPUFLAG */
+static void __attribute__((constructor))
+rte_memcpy_init(void)
+{
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
+ alignment_mask = 0x3F;
+ rte_mov16 = rte_mov16_AVX512F;
+ rte_mov32 = rte_mov32_AVX512F;
+ rte_mov64 = rte_mov64_AVX512F;
+ rte_mov128 = rte_mov128_AVX512F;
+ rte_mov256 = rte_mov256_AVX512F;
+ rte_mov128blocks = rte_mov128blocks_AVX512F;
+ rte_mov512blocks = rte_mov512blocks_AVX512F;
+ rte_memcpy_generic = rte_memcpy_generic_AVX512F;
+ RTE_LOG(INFO, EAL, "AVX512 implementation of memcpy() is using!\n");
+ } else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
+ alignment_mask = 0x1F;
+ rte_mov16 = rte_mov16_AVX2;
+ rte_mov32 = rte_mov32_AVX2;
+ rte_mov64 = rte_mov64_AVX2;
+ rte_mov128 = rte_mov128_AVX2;
+ rte_mov128blocks = rte_mov128blocks_AVX2;
+ rte_memcpy_generic = rte_memcpy_generic_AVX2;
+ RTE_LOG(INFO, EAL, "AVX2 implementation of memcpy() is using!\n");
+ } else {
+ alignment_mask = 0x0F;
+ rte_mov16 = rte_mov16_DEFAULT;
+ rte_mov32 = rte_mov32_DEFAULT;
+ rte_mov64 = rte_mov64_DEFAULT;
+ rte_mov128 = rte_mov128_DEFAULT;
+ rte_mov256 = rte_mov256_DEFAULT;
+ rte_memcpy_generic = rte_memcpy_generic_DEFAULT;
+ RTE_LOG(INFO, EAL, "Default SSE/AVX implementation of memcpy() is using!\n");
+ }
+}
static inline void *
rte_memcpy_aligned(void *dst, const void *src, size_t n)
@@ -858,8 +911,8 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
/* Copy 16 <= size <= 32 bytes */
if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n,
+ (*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov16)((uint8_t *)dst - 16 + n,
(const uint8_t *)src - 16 + n);
return ret;
@@ -867,8 +920,8 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
/* Copy 32 < size <= 64 bytes */
if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n,
+ (*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov32)((uint8_t *)dst - 32 + n,
(const uint8_t *)src - 32 + n);
return ret;
@@ -876,13 +929,13 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
/* Copy 64 bytes blocks */
for (; n >= 64; n -= 64) {
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ (*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
dst = (uint8_t *)dst + 64;
src = (const uint8_t *)src + 64;
}
/* Copy whatever left */
- rte_mov64((uint8_t *)dst - 64 + n,
+ (*rte_mov64)((uint8_t *)dst - 64 + n,
(const uint8_t *)src - 64 + n);
return ret;
@@ -891,10 +944,10 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
static inline void *
rte_memcpy(void *dst, const void *src, size_t n)
{
- if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
+ if (!(((uintptr_t)dst | (uintptr_t)src) & alignment_mask))
return rte_memcpy_aligned(dst, src, n);
else
- return rte_memcpy_generic(dst, src, n);
+ return (*rte_memcpy_generic)(dst, src, n);
}
#ifdef __cplusplus
@@ -65,3 +65,5 @@ SSE42_SUPPORT=$(shell $(CC) -march=native -dM -E - </dev/null | grep SSE4_2)
ifeq ($(SSE42_SUPPORT),)
MACHINE_CFLAGS = -march=corei7
endif
+
+MACHINE_CFLAGS += -mavx512f