@@ -106,8 +106,10 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov64(uint8_t *dst, const uint8_t *src)
{
- rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
- rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+ uint8_t i;
+
+ for (i = 0; i < 2; i++)
+ rte_mov32(dst + i * 32, src + i * 32);
}
/**
@@ -117,10 +119,10 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov128(uint8_t *dst, const uint8_t *src)
{
- rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
- rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
- rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
- rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+ uint8_t i;
+
+ for (i = 0; i < 4; i++)
+ rte_mov32(dst + i * 32, src + i * 32);
}
/**
@@ -130,14 +132,10 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov256(uint8_t *dst, const uint8_t *src)
{
- rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
- rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
- rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
- rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
- rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
- rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
- rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
- rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
+ uint8_t i;
+
+ for (i = 0; i < 8; i++)
+ rte_mov32(dst + i * 32, src + i * 32);
}
/**
@@ -147,16 +145,19 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
{
- __m256i ymm0, ymm1;
+ __m256i ymm;
+ uint8_t i;
while (n >= 64) {
- ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+
+ for (i = 0; i < 2; i++) {
+ ymm = _mm256_loadu_si256((const __m256i *)(src + i * 32));
+ _mm256_storeu_si256((__m256i *)(dst + i * 32), ymm);
+ }
+
n -= 64;
- ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
- src = (const uint8_t *)src + 64;
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
- dst = (uint8_t *)dst + 64;
+ src = src + 64;
+ dst = dst + 64;
}
}
@@ -167,37 +168,30 @@ rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
static inline void
rte_mov256blocks(uint8_t *dst, const uint8_t *src, size_t n)
{
- __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
+ __m256i ymm;
+ uint8_t i;
while (n >= 256) {
- ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+
+ for (i = 0; i < 8; i++) {
+ ymm = _mm256_loadu_si256((const __m256i *)(src + i * 32));
+ _mm256_storeu_si256((__m256i *)(dst + i * 32), ymm);
+ }
+
n -= 256;
- ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
- ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
- ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
- ymm4 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 4 * 32));
- ymm5 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 5 * 32));
- ymm6 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 6 * 32));
- ymm7 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 7 * 32));
- src = (const uint8_t *)src + 256;
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 4 * 32), ymm4);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 5 * 32), ymm5);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 6 * 32), ymm6);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 7 * 32), ymm7);
- dst = (uint8_t *)dst + 256;
+ src = src + 256;
+ dst = dst + 256;
}
}
static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+rte_memcpy(void *_dst, const void *_src, size_t n)
{
- uintptr_t dstu = (uintptr_t)dst;
- uintptr_t srcu = (uintptr_t)src;
- void *ret = dst;
+ const uint8_t *src = (const uint8_t *)_src;
+ uint8_t *dst = (uint8_t *)_dst;
+ uintptr_t dstu = (uintptr_t)_dst;
+ uintptr_t srcu = (uintptr_t)_src;
+ void *ret = _dst;
size_t dstofss;
size_t bits;
@@ -230,43 +224,44 @@ rte_memcpy(void *dst, const void *src, size_t n)
* Fast way when copy size doesn't exceed 512 bytes
*/
if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ rte_mov16(dst, src);
+ rte_mov16(dst - 16 + n, src - 16 + n);
return ret;
}
if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ rte_mov32(dst, src);
+ rte_mov32(dst - 32 + n, src - 32 + n);
return ret;
}
if (n <= 512) {
if (n >= 256) {
n -= 256;
- rte_mov256((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + 256;
- dst = (uint8_t *)dst + 256;
+ rte_mov256(dst, src);
+ src = src + 256;
+ dst = dst + 256;
}
if (n >= 128) {
n -= 128;
- rte_mov128((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + 128;
- dst = (uint8_t *)dst + 128;
+ rte_mov128(dst, src);
+ src = src + 128;
+ dst = dst + 128;
}
if (n >= 64) {
n -= 64;
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + 64;
- dst = (uint8_t *)dst + 64;
+ rte_mov64(dst, src);
+ src = src + 64;
+ dst = dst + 64;
}
+
COPY_BLOCK_64_BACK31:
if (n > 32) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ rte_mov32(dst, src);
+ rte_mov32(dst - 32 + n, src - 32 + n);
return ret;
}
- if (n > 0) {
- rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
- }
+ if (n > 0)
+ rte_mov32(dst - 32 + n, src - 32 + n);
+
return ret;
}
@@ -275,21 +270,21 @@ COPY_BLOCK_64_BACK31:
*/
dstofss = 32 - ((uintptr_t)dst & 0x1F);
n -= dstofss;
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + dstofss;
- dst = (uint8_t *)dst + dstofss;
+ rte_mov32(dst, src);
+ src = src + dstofss;
+ dst = dst + dstofss;
/**
* Copy 256-byte blocks.
* Use copy block function for better instruction order control,
* which is important when load is unaligned.
*/
- rte_mov256blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ rte_mov256blocks(dst, src, n);
bits = n;
n = n & 255;
bits -= n;
- src = (const uint8_t *)src + bits;
- dst = (uint8_t *)dst + bits;
+ src = src + bits;
+ dst = dst + bits;
/**
* Copy 64-byte blocks.
@@ -297,12 +292,12 @@ COPY_BLOCK_64_BACK31:
* which is important when load is unaligned.
*/
if (n >= 64) {
- rte_mov64blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ rte_mov64blocks(dst, src, n);
bits = n;
n = n & 63;
bits -= n;
- src = (const uint8_t *)src + bits;
- dst = (uint8_t *)dst + bits;
+ src = src + bits;
+ dst = dst + bits;
}
/**
@@ -337,8 +332,10 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov32(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+ uint8_t i;
+
+ for (i = 0; i < 2; i++)
+ rte_mov16(dst + i * 16, src + i * 16);
}
/**
@@ -348,10 +345,10 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov64(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
- rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
- rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+ uint8_t i;
+
+ for (i = 0; i < 4; i++)
+ rte_mov16(dst + i * 16, src + i * 16);
}
/**
@@ -361,14 +358,10 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov128(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
- rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
- rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
- rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
- rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
- rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
- rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+ uint8_t i;
+
+ for (i = 0; i < 8; i++)
+ rte_mov16(dst + i * 16, src + i * 16);
}
/**
@@ -378,22 +371,10 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
static inline void
rte_mov256(uint8_t *dst, const uint8_t *src)
{
- rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
- rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
- rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
- rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
- rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
- rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
- rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
- rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
- rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
- rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
- rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
- rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
- rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
- rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
- rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
- rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
+ uint8_t i;
+
+ for (i = 0; i < 16; i++)
+ rte_mov16(dst + i * 16, src + i * 16);
}
/**
@@ -411,48 +392,50 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
({ \
int tmp; \
while (len >= 128 + 16 - offset) { \
- xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
+ xmm0 = _mm_loadu_si128((const __m128i *)(src - offset + 0 * 16)); \
len -= 128; \
- xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
- xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
- xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
- xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
- xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
- xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
- xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
- xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
- src = (const uint8_t *)src + 128; \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
- dst = (uint8_t *)dst + 128; \
+ xmm1 = _mm_loadu_si128((const __m128i *)(src - offset + 1 * 16)); \
+ xmm2 = _mm_loadu_si128((const __m128i *)(src - offset + 2 * 16)); \
+ xmm3 = _mm_loadu_si128((const __m128i *)(src - offset + 3 * 16)); \
+ xmm4 = _mm_loadu_si128((const __m128i *)(src - offset + 4 * 16)); \
+ xmm5 = _mm_loadu_si128((const __m128i *)(src - offset + 5 * 16)); \
+ xmm6 = _mm_loadu_si128((const __m128i *)(src - offset + 6 * 16)); \
+ xmm7 = _mm_loadu_si128((const __m128i *)(src - offset + 7 * 16)); \
+ xmm8 = _mm_loadu_si128((const __m128i *)(src - offset + 8 * 16)); \
+ src = src + 128; \
+ _mm_storeu_si128((__m128i *)(dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
+ dst = dst + 128; \
} \
tmp = len; \
len = ((len - 16 + offset) & 127) + 16 - offset; \
tmp -= len; \
- src = (const uint8_t *)src + tmp; \
- dst = (uint8_t *)dst + tmp; \
+ src = src + tmp; \
+ \
+ dst = dst + tmp; \
+ \
if (len >= 32 + 16 - offset) { \
while (len >= 32 + 16 - offset) { \
- xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
+ xmm0 = _mm_loadu_si128((const __m128i *)(src - offset + 0 * 16)); \
len -= 32; \
- xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
- xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
- src = (const uint8_t *)src + 32; \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
- _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
- dst = (uint8_t *)dst + 32; \
+ xmm1 = _mm_loadu_si128((const __m128i *)(src - offset + 1 * 16)); \
+ xmm2 = _mm_loadu_si128((const __m128i *)(src - offset + 2 * 16)); \
+ src = src + 32; \
+ _mm_storeu_si128((__m128i *)(dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
+ _mm_storeu_si128((__m128i *)(dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
+ dst = dst + 32; \
} \
tmp = len; \
len = ((len - 16 + offset) & 31) + 16 - offset; \
tmp -= len; \
- src = (const uint8_t *)src + tmp; \
- dst = (uint8_t *)dst + tmp; \
+ src = src + tmp; \
+ dst = dst + tmp; \
} \
})
@@ -491,12 +474,14 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
})
static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+rte_memcpy(void *_dst, const void *_src, size_t n)
{
__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
- uintptr_t dstu = (uintptr_t)dst;
- uintptr_t srcu = (uintptr_t)src;
- void *ret = dst;
+ const uint8_t *src = (const uint8_t *)_src;
+ uint8_t *dst = (uint8_t *)_dst;
+ uintptr_t dstu = (uintptr_t)_dst;
+ uintptr_t srcu = (uintptr_t)_src;
+ void *ret = _dst;
size_t dstofss;
size_t srcofs;
@@ -529,19 +514,19 @@ rte_memcpy(void *dst, const void *src, size_t n)
* Fast way when copy size doesn't exceed 512 bytes
*/
if (n <= 32) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ rte_mov16(dst, src);
+ rte_mov16(dst - 16 + n, src - 16 + n);
return ret;
}
if (n <= 48) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ rte_mov32(dst, src);
+ rte_mov16(dst - 16 + n, src - 16 + n);
return ret;
}
if (n <= 64) {
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ rte_mov32(dst, src);
+ rte_mov16(dst + 32, src + 32);
+ rte_mov16(dst - 16 + n, src - 16 + n);
return ret;
}
if (n <= 128) {
@@ -550,40 +535,40 @@ rte_memcpy(void *dst, const void *src, size_t n)
if (n <= 512) {
if (n >= 256) {
n -= 256;
- rte_mov128((uint8_t *)dst, (const uint8_t *)src);
- rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
- src = (const uint8_t *)src + 256;
- dst = (uint8_t *)dst + 256;
+ rte_mov128(dst, src);
+ rte_mov128(dst + 128, src + 128);
+ src = src + 256;
+ dst = dst + 256;
}
COPY_BLOCK_255_BACK15:
if (n >= 128) {
n -= 128;
- rte_mov128((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + 128;
- dst = (uint8_t *)dst + 128;
+ rte_mov128(dst, src);
+ src = src + 128;
+ dst = dst + 128;
}
COPY_BLOCK_128_BACK15:
if (n >= 64) {
n -= 64;
- rte_mov64((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + 64;
- dst = (uint8_t *)dst + 64;
+ rte_mov64(dst, src);
+ src = src + 64;
+ dst = dst + 64;
}
COPY_BLOCK_64_BACK15:
if (n >= 32) {
n -= 32;
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + 32;
- dst = (uint8_t *)dst + 32;
+ rte_mov32(dst, src);
+ src = src + 32;
+ dst = dst + 32;
}
if (n > 16) {
- rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ rte_mov16(dst, src);
+ rte_mov16(dst - 16 + n, src - 16 + n);
return ret;
}
- if (n > 0) {
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
- }
+ if (n > 0)
+ rte_mov16(dst - 16 + n, src - 16 + n);
+
return ret;
}
@@ -595,9 +580,9 @@ COPY_BLOCK_64_BACK15:
*/
dstofss = 16 - ((uintptr_t)dst & 0x0F) + 16;
n -= dstofss;
- rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + dstofss;
- dst = (uint8_t *)dst + dstofss;
+ rte_mov32(dst, src);
+ src = src + dstofss;
+ dst = dst + dstofss;
srcofs = ((uintptr_t)src & 0x0F);
/**
@@ -608,9 +593,9 @@ COPY_BLOCK_64_BACK15:
* Copy 256-byte blocks
*/
for (; n >= 256; n -= 256) {
- rte_mov256((uint8_t *)dst, (const uint8_t *)src);
- dst = (uint8_t *)dst + 256;
- src = (const uint8_t *)src + 256;
+ rte_mov256(dst, src);
+ dst = dst + 256;
+ src = src + 256;
}
/**