Move location of __rte_aligned(a) to new conventional location. The new
placement between {struct,union} and the tag allows the desired
alignment to be imparted on the type regardless of the toolchain being
used for both C and C++. Additionally, it avoids confusion by Doxygen
when generating documentation.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
drivers/common/mlx5/linux/mlx5_glue.c | 2 +-
drivers/common/mlx5/linux/mlx5_nl.c | 2 +-
drivers/common/mlx5/mlx5_common_mr.h | 4 ++--
drivers/common/mlx5/mlx5_common_utils.h | 12 ++++++------
drivers/common/mlx5/mlx5_prm.h | 4 ++--
5 files changed, 12 insertions(+), 12 deletions(-)
@@ -1578,7 +1578,7 @@
#endif
}
-__rte_cache_aligned
+alignas(RTE_CACHE_LINE_SIZE)
const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
.version = MLX5_GLUE_VERSION,
.fork_init = mlx5_glue_fork_init,
@@ -1507,7 +1507,7 @@ struct mlx5_nl_port_info {
struct ifinfomsg *ifm;
char name[sizeof(MLX5_VMWA_VLAN_DEVICE_PFX) + 32];
- __rte_cache_aligned
+ alignas(RTE_CACHE_LINE_SIZE)
uint8_t buf[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct ifinfomsg)) +
NLMSG_ALIGN(sizeof(struct nlattr)) * 8 +
@@ -91,7 +91,7 @@ struct mlx5_mr_share_cache {
} __rte_packed;
/* Multi-Packet RQ buffer header. */
-struct mlx5_mprq_buf {
+struct __rte_cache_aligned mlx5_mprq_buf {
struct rte_mempool *mp;
uint16_t refcnt; /* Atomically accessed refcnt. */
struct rte_mbuf_ext_shared_info shinfos[];
@@ -100,7 +100,7 @@ struct mlx5_mprq_buf {
* More memory will be allocated for the first stride head-room and for
* the strides data.
*/
-} __rte_cache_aligned;
+};
__rte_internal
void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
@@ -29,7 +29,7 @@
*/
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
+ alignas(8) uint32_t ref_cnt; /* 0 means, entry is invalid. */
uint32_t lcore_idx;
union {
struct mlx5_list_entry *gentry;
@@ -37,10 +37,10 @@ struct mlx5_list_entry {
};
} __rte_packed;
-struct mlx5_list_cache {
+struct __rte_cache_aligned mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
uint32_t inv_cnt; /* Invalid entries counter. */
-} __rte_cache_aligned;
+};
/**
* Type of callback function for entry removal.
@@ -243,9 +243,9 @@ int mlx5_list_unregister(struct mlx5_list *list,
/********************* Hash List **********************/
/* Hash list bucket. */
-struct mlx5_hlist_bucket {
+struct __rte_cache_aligned mlx5_hlist_bucket {
struct mlx5_list_inconst l;
-} __rte_cache_aligned;
+};
/**
* Hash list table structure
@@ -257,7 +257,7 @@ struct mlx5_hlist {
uint8_t flags;
bool direct_key; /* Whether to use the key directly as hash index. */
struct mlx5_list_const l_const; /* List constant data. */
- struct mlx5_hlist_bucket buckets[] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hlist_bucket buckets[];
};
/**
@@ -319,12 +319,12 @@ enum mlx5_mpw_mode {
};
/* WQE Control segment. */
-struct mlx5_wqe_cseg {
+struct __rte_aligned(MLX5_WSEG_SIZE) mlx5_wqe_cseg {
uint32_t opcode;
uint32_t sq_ds;
uint32_t flags;
uint32_t misc;
-} __rte_packed __rte_aligned(MLX5_WSEG_SIZE);
+} __rte_packed;
/*
* WQE CSEG opcode field size is 32 bits, divided: