[v4,13/39] distributor: use C11 alignas
Checks
Commit Message
* Move __rte_aligned from the end of {struct,union} definitions to
be between {struct,union} and tag.
The placement between {struct,union} and the tag allows the desired
alignment to be imparted on the type regardless of the toolchain being
used for all of GCC, LLVM, MSVC compilers building both C and C++.
* Replace use of __rte_aligned(a) on variables/fields with alignas(a).
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
lib/distributor/distributor_private.h | 34 ++++++++++++++++++----------------
lib/distributor/rte_distributor.c | 5 +++--
2 files changed, 21 insertions(+), 18 deletions(-)
@@ -5,6 +5,8 @@
#ifndef _DIST_PRIV_H_
#define _DIST_PRIV_H_
+#include <stdalign.h>
+
/**
* @file
* RTE distributor
@@ -51,10 +53,10 @@
* the next cache line to worker 0, we pad this out to three cache lines.
* Only 64-bits of the memory is actually used though.
*/
-union rte_distributor_buffer_single {
+union __rte_cache_aligned rte_distributor_buffer_single {
volatile RTE_ATOMIC(int64_t) bufptr64;
char pad[RTE_CACHE_LINE_SIZE*3];
-} __rte_cache_aligned;
+};
/*
* Transfer up to 8 mbufs at a time to/from workers, and
@@ -62,12 +64,12 @@
*/
#define RTE_DIST_BURST_SIZE 8
-struct rte_distributor_backlog {
+struct __rte_cache_aligned rte_distributor_backlog {
unsigned int start;
unsigned int count;
- int64_t pkts[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) int64_t pkts[RTE_DIST_BURST_SIZE];
uint16_t *tags; /* will point to second cacheline of inflights */
-} __rte_cache_aligned;
+};
struct rte_distributor_returned_pkts {
@@ -113,17 +115,17 @@ enum rte_distributor_match_function {
* There is a separate cacheline for returns in the burst API.
*/
struct rte_distributor_buffer {
- volatile RTE_ATOMIC(int64_t) bufptr64[RTE_DIST_BURST_SIZE]
- __rte_cache_aligned; /* <= outgoing to worker */
+ volatile alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int64_t) bufptr64[RTE_DIST_BURST_SIZE];
+ /* <= outgoing to worker */
- int64_t pad1 __rte_cache_aligned; /* <= one cache line */
+ alignas(RTE_CACHE_LINE_SIZE) int64_t pad1; /* <= one cache line */
- volatile RTE_ATOMIC(int64_t) retptr64[RTE_DIST_BURST_SIZE]
- __rte_cache_aligned; /* <= incoming from worker */
+ volatile alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int64_t) retptr64[RTE_DIST_BURST_SIZE];
+ /* <= incoming from worker */
- int64_t pad2 __rte_cache_aligned; /* <= one cache line */
+ alignas(RTE_CACHE_LINE_SIZE) int64_t pad2; /* <= one cache line */
- int count __rte_cache_aligned; /* <= number of current mbufs */
+ alignas(RTE_CACHE_LINE_SIZE) int count; /* <= number of current mbufs */
};
struct rte_distributor {
@@ -138,11 +140,11 @@ struct rte_distributor {
* on the worker core. Second cache line are the backlog
* that are going to go to the worker core.
*/
- uint16_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS][RTE_DIST_BURST_SIZE*2]
- __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint16_t
+ in_flight_tags[RTE_DISTRIB_MAX_WORKERS][RTE_DIST_BURST_SIZE*2];
- struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS]
- __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_distributor_backlog
+ backlog[RTE_DISTRIB_MAX_WORKERS];
struct rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
@@ -2,6 +2,7 @@
* Copyright(c) 2017 Intel Corporation
*/
+#include <stdalign.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/queue.h>
@@ -447,7 +448,7 @@
struct rte_mbuf *next_mb = NULL;
int64_t next_value = 0;
uint16_t new_tag = 0;
- uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint16_t flows[RTE_DIST_BURST_SIZE];
unsigned int i, j, w, wid, matching_required;
if (d->alg_type == RTE_DIST_ALG_SINGLE) {
@@ -477,7 +478,7 @@
return 0;
while (next_idx < num_mbufs) {
- uint16_t matches[RTE_DIST_BURST_SIZE] __rte_aligned(128);
+ alignas(128) uint16_t matches[RTE_DIST_BURST_SIZE];
unsigned int pkts;
if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)