Move location of __rte_aligned(a) to new conventional location. The new
placement between {struct,union} and the tag allows the desired
alignment to be imparted on the type regardless of the toolchain being
used for both C and C++. Additionally, it avoids confusion by Doxygen
when generating documentation.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/thunderx/nicvf_rxtx.c | 8 ++++----
drivers/net/thunderx/nicvf_struct.h | 16 ++++++++--------
2 files changed, 12 insertions(+), 12 deletions(-)
@@ -76,7 +76,7 @@
uint32_t curr_head;
uint32_t head = sq->head;
struct rte_mbuf **txbuffs = sq->txbuffs;
- void *obj_p[NICVF_MAX_TX_FREE_THRESH] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) void *obj_p[NICVF_MAX_TX_FREE_THRESH];
curr_head = nicvf_addr_read(sq->sq_head) >> 4;
while (head != curr_head) {
@@ -239,7 +239,7 @@
return i;
}
-static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
+static const alignas(RTE_CACHE_LINE_SIZE) uint32_t ptype_table[16][16] = {
[L3_NONE][L4_NONE] = RTE_PTYPE_UNKNOWN,
[L3_NONE][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
[L3_NONE][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
@@ -342,7 +342,7 @@
static inline uint64_t __rte_hot
nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
{
- static const uint64_t flag_table[3] __rte_cache_aligned = {
+ static const alignas(RTE_CACHE_LINE_SIZE) uint64_t flag_table[3] = {
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
@@ -363,7 +363,7 @@
struct rbdr_entry_t *desc = rbdr->desc;
uint32_t qlen_mask = rbdr->qlen_mask;
uintptr_t door = rbdr->rbdr_door;
- void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) void *obj_p[NICVF_MAX_RX_FREE_THRESH];
if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) {
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
@@ -14,7 +14,7 @@
#include <ethdev_driver.h>
#include <rte_memory.h>
-struct nicvf_rbdr {
+struct __rte_cache_aligned nicvf_rbdr {
uintptr_t rbdr_status;
uintptr_t rbdr_door;
struct rbdr_entry_t *desc;
@@ -24,9 +24,9 @@ struct nicvf_rbdr {
uint32_t next_tail;
uint32_t head;
uint32_t qlen_mask;
-} __rte_cache_aligned;
+};
-struct nicvf_txq {
+struct __rte_cache_aligned nicvf_txq {
union sq_entry_t *desc;
nicvf_iova_addr_t phys;
struct rte_mbuf **txbuffs;
@@ -42,7 +42,7 @@ struct nicvf_txq {
uint64_t offloads;
uint16_t queue_id;
uint16_t tx_free_thresh;
-} __rte_cache_aligned;
+};
union mbuf_initializer {
struct {
@@ -54,7 +54,7 @@ struct nicvf_txq {
uint64_t value;
};
-struct nicvf_rxq {
+struct __rte_cache_aligned nicvf_rxq {
RTE_MARKER rxq_fastpath_data_start;
uint8_t rbptr_offset;
uint16_t rx_free_thresh;
@@ -76,9 +76,9 @@ struct nicvf_rxq {
uint16_t queue_id;
struct nicvf *nic;
nicvf_iova_addr_t phys;
-} __rte_cache_aligned;
+};
-struct nicvf {
+struct __rte_cache_aligned nicvf {
uint8_t vf_id;
uint8_t node;
uintptr_t reg_base;
@@ -111,7 +111,7 @@ struct nicvf {
uint8_t sqs_count;
#define MAX_SQS_PER_VF 11
struct nicvf *snicvf[MAX_SQS_PER_VF];
-} __rte_cache_aligned;
+};
struct change_link_mode {
bool enable;