[v4,09/39] hash: use C11 alignas
Checks
Commit Message
* Move __rte_aligned from the end of {struct,union} definitions to
be between {struct,union} and tag.
The placement between {struct,union} and the tag allows the desired
alignment to be imparted on the type regardless of the toolchain being
used for all of GCC, LLVM, MSVC compilers building both C and C++.
* Replace use of __rte_aligned(a) on variables/fields with alignas(a).
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
lib/hash/rte_cuckoo_hash.h | 16 +++++++++-------
lib/hash/rte_thash.c | 4 +++-
lib/hash/rte_thash.h | 8 ++++----
3 files changed, 16 insertions(+), 12 deletions(-)
@@ -11,6 +11,8 @@
#ifndef _RTE_CUCKOO_HASH_H_
#define _RTE_CUCKOO_HASH_H_
+#include <stdalign.h>
+
#if defined(RTE_ARCH_X86)
#include "rte_cmp_x86.h"
#endif
@@ -117,10 +119,10 @@ enum cmp_jump_table_case {
#define RTE_HASH_TSX_MAX_RETRY 10
-struct lcore_cache {
+struct __rte_cache_aligned lcore_cache {
unsigned len; /**< Cache len */
uint32_t objs[LCORE_CACHE_SIZE]; /**< Cache objects */
-} __rte_cache_aligned;
+};
/* Structure that stores key-value pair */
struct rte_hash_key {
@@ -141,7 +143,7 @@ enum rte_hash_sig_compare_function {
};
/** Bucket structure */
-struct rte_hash_bucket {
+struct __rte_cache_aligned rte_hash_bucket {
uint16_t sig_current[RTE_HASH_BUCKET_ENTRIES];
RTE_ATOMIC(uint32_t) key_idx[RTE_HASH_BUCKET_ENTRIES];
@@ -149,10 +151,10 @@ struct rte_hash_bucket {
uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
void *next;
-} __rte_cache_aligned;
+};
/** A hash table structure. */
-struct rte_hash {
+struct __rte_cache_aligned rte_hash {
char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
uint32_t entries; /**< Total table entries. */
uint32_t num_buckets; /**< Number of buckets in table. */
@@ -170,7 +172,7 @@ struct rte_hash {
/* Fields used in lookup */
- uint32_t key_len __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint32_t key_len;
/**< Length of hash key. */
uint8_t hw_trans_mem_support;
/**< If hardware transactional memory is used. */
@@ -220,7 +222,7 @@ struct rte_hash {
uint32_t *ext_bkt_to_free;
RTE_ATOMIC(uint32_t) *tbl_chng_cnt;
/**< Indicates if the hash table changed from last read. */
-} __rte_cache_aligned;
+};
struct queue_node {
struct rte_hash_bucket *bkt; /* Current bucket on the bfs search */
@@ -2,6 +2,8 @@
* Copyright(c) 2021 Intel Corporation
*/
+#include <stdalign.h>
+
#include <sys/queue.h>
#include <rte_thash.h>
@@ -80,7 +82,7 @@ struct rte_thash_subtuple_helper {
uint32_t tuple_offset; /** < Offset in bits of the subtuple */
uint32_t tuple_len; /** < Length in bits of the subtuple */
uint32_t lsb_msk; /** < (1 << reta_sz_log) - 1 */
- __extension__ uint32_t compl_table[0] __rte_cache_aligned;
+ __extension__ alignas(RTE_CACHE_LINE_SIZE) uint32_t compl_table[0];
/** < Complementary table */
};
@@ -99,14 +99,14 @@ struct rte_ipv6_tuple {
};
};
+#ifdef RTE_ARCH_X86
+union __rte_aligned(XMM_SIZE) rte_thash_tuple {
+#else
union rte_thash_tuple {
+#endif
struct rte_ipv4_tuple v4;
struct rte_ipv6_tuple v6;
-#ifdef RTE_ARCH_X86
-} __rte_aligned(XMM_SIZE);
-#else
};
-#endif
/**
* Prepare special converted key to use with rte_softrss_be()