Move location of __rte_aligned(a) to new conventional location. The new
placement between {struct,union} and the tag allows the desired
alignment to be imparted on the type regardless of the toolchain being
used for both C and C++. Additionally, it avoids confusion by Doxygen
when generating documentation.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-eventdev/test_order_common.h | 4 ++--
app/test-eventdev/test_perf_common.h | 24 ++++++++++++------------
app/test-eventdev/test_pipeline_common.h | 18 +++++++++---------
3 files changed, 23 insertions(+), 23 deletions(-)
@@ -39,7 +39,7 @@ struct prod_data {
struct test_order *t;
};
-struct test_order {
+struct __rte_cache_aligned test_order {
/* Don't change the offset of "err". Signal handler use this memory
* to terminate all lcores work.
*/
@@ -60,7 +60,7 @@ struct test_order {
uint32_t *producer_flow_seq;
uint32_t *expected_flow_seq;
struct evt_options *opt;
-} __rte_cache_aligned;
+};
static inline void
order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event)
@@ -31,13 +31,13 @@
struct test_perf;
-struct worker_data {
+struct __rte_cache_aligned worker_data {
uint64_t processed_pkts;
uint64_t latency;
uint8_t dev_id;
uint8_t port_id;
struct test_perf *t;
-} __rte_cache_aligned;
+};
struct crypto_adptr_data {
uint8_t cdev_id;
@@ -51,16 +51,16 @@ struct dma_adptr_data {
void **dma_op;
};
-struct prod_data {
+struct __rte_cache_aligned prod_data {
uint8_t dev_id;
uint8_t port_id;
uint8_t queue_id;
struct crypto_adptr_data ca;
struct dma_adptr_data da;
struct test_perf *t;
-} __rte_cache_aligned;
+};
-struct test_perf {
+struct __rte_cache_aligned test_perf {
/* Don't change the offset of "done". Signal handler use this memory
* to terminate all lcores work.
*/
@@ -74,17 +74,17 @@ struct test_perf {
struct prod_data prod[EVT_MAX_PORTS];
struct worker_data worker[EVT_MAX_PORTS];
struct evt_options *opt;
- uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
- struct rte_event_timer_adapter *timer_adptr[
- RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint8_t sched_type_list[EVT_MAX_STAGES];
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_event_timer_adapter *timer_adptr[
+ RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
struct rte_mempool *ca_op_pool;
struct rte_mempool *ca_sess_pool;
struct rte_mempool *ca_asym_sess_pool;
struct rte_mempool *ca_vector_pool;
struct rte_mempool *da_op_pool;
-} __rte_cache_aligned;
+};
-struct perf_elt {
+struct __rte_cache_aligned perf_elt {
union {
struct rte_event_timer tim;
struct {
@@ -92,7 +92,7 @@ struct perf_elt {
uint64_t timestamp;
};
};
-} __rte_cache_aligned;
+};
#define BURST_SIZE 16
#define MAX_PROD_ENQ_BURST_SIZE 128
@@ -111,7 +111,7 @@ struct perf_elt {
const uint8_t nb_stages = t->opt->nb_stages;\
const uint8_t laststage = nb_stages - 1;\
uint8_t cnt = 0;\
- void *bufs[16] __rte_cache_aligned;\
+ alignas(RTE_CACHE_LINE_SIZE) void *bufs[16];\
int const sz = RTE_DIM(bufs);\
uint8_t stage;\
struct perf_elt *pe = NULL;\
@@ -31,14 +31,14 @@
struct test_pipeline;
-struct worker_data {
+struct __rte_cache_aligned worker_data {
uint64_t processed_pkts;
uint8_t dev_id;
uint8_t port_id;
struct test_pipeline *t;
-} __rte_cache_aligned;
+};
-struct test_pipeline {
+struct __rte_cache_aligned test_pipeline {
/* Don't change the offset of "done". Signal handler use this memory
* to terminate all lcores work.
*/
@@ -52,8 +52,8 @@ struct test_pipeline {
struct rte_mempool *pool[RTE_MAX_ETHPORTS];
struct worker_data worker[EVT_MAX_PORTS];
struct evt_options *opt;
- uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
-} __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint8_t sched_type_list[EVT_MAX_STAGES];
+};
#define BURST_SIZE 16
@@ -62,7 +62,7 @@ struct test_pipeline {
struct test_pipeline *t = w->t; \
const uint8_t dev = w->dev_id; \
const uint8_t port = w->port_id; \
- struct rte_event ev __rte_cache_aligned
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev
#define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \
int i; \
@@ -70,7 +70,7 @@ struct test_pipeline {
struct test_pipeline *t = w->t; \
const uint8_t dev = w->dev_id; \
const uint8_t port = w->port_id; \
- struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev[BURST_SIZE + 1]
#define PIPELINE_WORKER_MULTI_STAGE_INIT \
struct worker_data *w = arg; \
@@ -81,7 +81,7 @@ struct test_pipeline {
const uint8_t last_queue = t->opt->nb_stages - 1; \
uint8_t *const sched_type_list = &t->sched_type_list[0]; \
const uint8_t nb_stages = t->opt->nb_stages + 1; \
- struct rte_event ev __rte_cache_aligned
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev
#define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT \
int i; \
@@ -93,7 +93,7 @@ struct test_pipeline {
const uint8_t last_queue = t->opt->nb_stages - 1; \
uint8_t *const sched_type_list = &t->sched_type_list[0]; \
const uint8_t nb_stages = t->opt->nb_stages + 1; \
- struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev[BURST_SIZE + 1]
static __rte_always_inline void
pipeline_fwd_event(struct rte_event *ev, uint8_t sched)