@@ -120,16 +120,16 @@ struct app_config_params {
uint16_t num_dec_cores;
};
-struct lcore_statistics {
+struct __rte_cache_aligned lcore_statistics {
unsigned int enqueued;
unsigned int dequeued;
unsigned int rx_lost_packets;
unsigned int enc_to_dec_lost_packets;
unsigned int tx_lost_packets;
-} __rte_cache_aligned;
+};
/** each lcore configuration */
-struct lcore_conf {
+struct __rte_cache_aligned lcore_conf {
uint64_t core_type;
unsigned int port_id;
@@ -148,7 +148,7 @@ struct lcore_conf {
struct rte_ring *enc_to_dec_ring;
struct lcore_statistics *lcore_stats;
-} __rte_cache_aligned;
+};
struct stats_lcore_params {
struct lcore_conf *lconf;
@@ -346,7 +346,7 @@ struct global_flag_stru_t {
*/
static int lcore_main(__rte_unused void *arg1)
{
- struct rte_mbuf *pkts[MAX_PKT_BURST] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *pkts[MAX_PKT_BURST];
struct rte_ether_addr dst_addr;
struct rte_ether_addr bond_mac_addr;
@@ -44,39 +44,39 @@
unsigned int num_workers;
static volatile struct app_stats {
- struct {
+ alignas(RTE_CACHE_LINE_SIZE) struct {
uint64_t rx_pkts;
uint64_t returned_pkts;
uint64_t enqueued_pkts;
uint64_t enqdrop_pkts;
- } rx __rte_cache_aligned;
- int pad1 __rte_cache_aligned;
+ } rx;
+ alignas(RTE_CACHE_LINE_SIZE) int pad1;
- struct {
+ alignas(RTE_CACHE_LINE_SIZE) struct {
uint64_t in_pkts;
uint64_t ret_pkts;
uint64_t sent_pkts;
uint64_t enqdrop_pkts;
- } dist __rte_cache_aligned;
- int pad2 __rte_cache_aligned;
+ } dist;
+ alignas(RTE_CACHE_LINE_SIZE) int pad2;
- struct {
+ alignas(RTE_CACHE_LINE_SIZE) struct {
uint64_t dequeue_pkts;
uint64_t tx_pkts;
uint64_t enqdrop_pkts;
- } tx __rte_cache_aligned;
- int pad3 __rte_cache_aligned;
+ } tx;
+ alignas(RTE_CACHE_LINE_SIZE) int pad3;
- uint64_t worker_pkts[64] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint64_t worker_pkts[64];
- int pad4 __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) int pad4;
- uint64_t worker_bursts[64][8] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint64_t worker_bursts[64][8];
- int pad5 __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) int pad5;
- uint64_t port_rx_pkts[64] __rte_cache_aligned;
- uint64_t port_tx_pkts[64] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) uint64_t port_rx_pkts[64];
+ alignas(RTE_CACHE_LINE_SIZE) uint64_t port_tx_pkts[64];
} app_stats;
struct app_stats prev_app_stats;
@@ -642,7 +642,7 @@ struct lcore_params {
* port, otherwise we send traffic from 0 to 1, 2 to 3, and vice versa
*/
const unsigned xor_val = (rte_eth_dev_count_avail() > 1);
- struct rte_mbuf *buf[8] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *buf[8];
for (i = 0; i < 8; i++)
buf[i] = NULL;
@@ -24,10 +24,10 @@
#define BATCH_SIZE 16
#define MAX_NUM_CORE 64
-struct worker_data {
+struct __rte_cache_aligned worker_data {
uint8_t dev_id;
uint8_t port_id;
-} __rte_cache_aligned;
+};
typedef int (*worker_loop)(void *);
typedef void (*schedule_loop)(unsigned int);
@@ -43,7 +43,7 @@ struct setup_data {
opt_check check_opt;
};
-struct fastpath_data {
+struct __rte_cache_aligned fastpath_data {
volatile int done;
uint32_t evdev_service_id;
uint32_t rxadptr_service_id;
@@ -56,7 +56,7 @@ struct fastpath_data {
uint64_t sched_core[MAX_NUM_CORE];
uint64_t worker_core[MAX_NUM_CORE];
struct setup_data cap;
-} __rte_cache_aligned;
+};
struct config_data {
unsigned int active_cores;
@@ -135,12 +135,12 @@ struct rx_queue {
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
uint16_t n_rx_queue;
uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
+};
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
@@ -63,7 +63,7 @@ struct pipeline_data {
uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
};
-struct thread_data {
+struct __rte_cache_aligned thread_data {
struct rte_pipeline *p[THREAD_PIPELINES_MAX];
uint32_t n_pipelines;
@@ -73,7 +73,7 @@ struct thread_data {
uint64_t timer_period; /* Measured in CPU cycles. */
uint64_t time_next;
uint64_t time_next_min;
-} __rte_cache_aligned;
+};
static struct thread_data thread_data[RTE_MAX_LCORE];
@@ -148,14 +148,14 @@ struct tx_lcore_stat {
#define MAX_TX_QUEUE_PER_PORT 16
#define MAX_RX_QUEUE_PER_PORT 128
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
uint16_t n_rx_queue;
struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
struct rte_ip_frag_death_row death_row;
struct mbuf_table *tx_mbufs[RTE_MAX_ETHPORTS];
struct tx_lcore_stat tx_stat;
-} __rte_cache_aligned;
+};
static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
@@ -218,11 +218,11 @@ struct app_sa_prm app_sa_prm = {
};
static const char *cfgfile;
-struct lcore_params {
+struct __rte_cache_aligned lcore_params {
uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
-} __rte_cache_aligned;
+};
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -54,13 +54,13 @@
#define IPSEC_NAT_T_PORT 4500
#define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
-struct traffic_type {
+struct __rte_cache_aligned traffic_type {
uint32_t num;
struct rte_mbuf *pkts[MAX_PKTS];
const uint8_t *data[MAX_PKTS];
void *saptr[MAX_PKTS];
uint32_t res[MAX_PKTS];
-} __rte_cache_aligned;
+};
struct ipsec_traffic {
struct traffic_type ipsec;
@@ -98,7 +98,7 @@ struct ipsec_sa_stats {
uint64_t miss;
};
-struct ipsec_core_statistics {
+struct __rte_cache_aligned ipsec_core_statistics {
uint64_t tx;
uint64_t rx;
uint64_t rx_call;
@@ -126,7 +126,7 @@ struct ipsec_core_statistics {
struct {
uint64_t miss;
} lpm6;
-} __rte_cache_aligned;
+};
extern struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
@@ -112,7 +112,7 @@ enum {
return (struct ipsec_sa *)i;
}
-struct ipsec_sa {
+struct __rte_cache_aligned ipsec_sa {
struct rte_ipsec_session sessions[IPSEC_SESSION_MAX];
uint32_t spi;
struct cdev_qp *cqp[RTE_MAX_LCORE];
@@ -170,7 +170,7 @@ struct ipsec_sa {
struct rte_flow_item_esp esp_spec;
struct rte_flow *flow;
struct rte_security_session_conf sess_conf;
-} __rte_cache_aligned;
+};
struct ipsec_xf {
struct rte_crypto_sym_xform a;
@@ -190,12 +190,12 @@ struct sa_ctx {
struct ipsec_sa sa[];
};
-struct ipsec_mbuf_metadata {
+struct __rte_cache_aligned ipsec_mbuf_metadata {
struct ipsec_sa *sa;
struct rte_crypto_op cop;
struct rte_crypto_sym_op sym_cop;
uint8_t buf[32];
-} __rte_cache_aligned;
+};
#define IS_TRANSPORT(flags) ((flags) & TRANSPORT)
@@ -224,7 +224,7 @@ struct cdev_qp {
uint16_t qp;
uint16_t in_flight;
uint16_t len;
- struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+ alignas(sizeof(void *)) struct rte_crypto_op *buf[MAX_PKT_BURST];
};
struct ipsec_ctx {
@@ -235,7 +235,7 @@ struct ipsec_ctx {
uint16_t nb_qps;
uint16_t last_qp;
struct cdev_qp tbl[MAX_QP_PER_LCORE];
- struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+ alignas(sizeof(void *)) struct rte_mbuf *ol_pkts[MAX_PKT_BURST];
uint16_t ol_pkts_cnt;
uint64_t ipv4_offloads;
uint64_t ipv6_offloads;
@@ -283,18 +283,18 @@ struct cnt_blk {
uint32_t cnt;
} __rte_packed;
-struct lcore_rx_queue {
+struct __rte_cache_aligned lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
void *sec_ctx;
-} __rte_cache_aligned;
+};
struct buffer {
uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+ alignas(sizeof(void *)) struct rte_mbuf *m_table[MAX_PKT_BURST];
};
-struct lcore_conf {
+struct __rte_cache_aligned lcore_conf {
uint16_t nb_rx_queue;
struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
@@ -308,7 +308,7 @@ struct lcore_conf {
struct rte_mempool *pool_indir;
struct rte_ip_frag_death_row dr;
} frag;
-} __rte_cache_aligned;
+};
extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
@@ -37,11 +37,11 @@ struct route_table {
/*
* Conf required by event mode worker with tx internal port
*/
-struct lcore_conf_ev_tx_int_port_wrkr {
+struct __rte_cache_aligned lcore_conf_ev_tx_int_port_wrkr {
struct ipsec_ctx inbound;
struct ipsec_ctx outbound;
struct route_table rt;
-} __rte_cache_aligned;
+};
void ipsec_poll_mode_worker(void);
void ipsec_poll_mode_wrkr_inl_pr(void);
@@ -98,13 +98,13 @@ struct mbuf_table {
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
uint64_t tx_tsc;
uint16_t n_rx_queue;
uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
uint16_t tx_queue_id[MAX_PORTS];
struct mbuf_table tx_mbufs[MAX_PORTS];
-} __rte_cache_aligned;
+};
static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
@@ -201,7 +201,7 @@ struct l2fwd_crypto_params {
};
/** lcore configuration */
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
unsigned nb_rx_ports;
uint16_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
@@ -210,7 +210,7 @@ struct lcore_queue_conf {
struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS];
struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
+};
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
@@ -230,7 +230,7 @@ struct lcore_queue_conf {
} session_pool_socket[RTE_MAX_NUMA_NODES];
/* Per-port statistics struct */
-struct l2fwd_port_statistics {
+struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t tx;
uint64_t rx;
@@ -238,14 +238,14 @@ struct l2fwd_port_statistics {
uint64_t crypto_dequeued;
uint64_t dropped;
-} __rte_cache_aligned;
+};
-struct l2fwd_crypto_statistics {
+struct __rte_cache_aligned l2fwd_crypto_statistics {
uint64_t enqueued;
uint64_t dequeued;
uint64_t errors;
-} __rte_cache_aligned;
+};
struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS];
@@ -59,11 +59,11 @@
#define VECTOR_TMO_NS_DEFAULT 1E6 /* 1ms */
/* Per-port statistics struct */
-struct l2fwd_port_statistics {
+struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t dropped;
uint64_t tx;
uint64_t rx;
-} __rte_cache_aligned;
+};
/* Event vector attributes */
struct l2fwd_event_vector_params {
@@ -72,7 +72,7 @@ struct l2fwd_event_vector_params {
uint64_t timeout_ns;
};
-struct l2fwd_resources {
+struct __rte_cache_aligned l2fwd_resources {
volatile uint8_t force_quit;
uint8_t event_mode;
uint8_t sched_type;
@@ -91,7 +91,7 @@ struct l2fwd_resources {
struct l2fwd_event_vector_params evt_vec;
void *evt_rsrc;
void *poll_rsrc;
-} __rte_cache_aligned;
+};
static __rte_always_inline void
l2fwd_mac_updating(struct rte_mbuf *m, uint32_t dest_port_id,
@@ -9,10 +9,10 @@
typedef void (*poll_main_loop_cb)(struct l2fwd_resources *rsrc);
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
uint32_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
uint32_t n_rx_port;
-} __rte_cache_aligned;
+};
struct l2fwd_poll_resources {
poll_main_loop_cb poll_main_loop;
@@ -67,7 +67,7 @@
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
/* List of queues to be polled for given lcore. 8< */
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
uint64_t next_flush_time[RTE_MAX_ETHPORTS];
@@ -82,7 +82,7 @@ struct lcore_queue_conf {
uint16_t stats_read_pending;
rte_spinlock_t lock;
-} __rte_cache_aligned;
+};
/* >8 End of list of queues to be polled for given lcore. */
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
@@ -97,11 +97,11 @@ struct lcore_queue_conf {
struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
/* Per-port statistics struct */
-struct l2fwd_port_statistics {
+struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t tx;
uint64_t rx;
uint64_t dropped;
-} __rte_cache_aligned;
+};
struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
/* 1 day max */
@@ -69,10 +69,10 @@
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
-} __rte_cache_aligned;
+};
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
@@ -86,11 +86,11 @@ struct lcore_queue_conf {
struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
/* Per-port statistics struct */
-struct l2fwd_port_statistics {
+struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t tx;
uint64_t rx;
uint64_t dropped;
-} __rte_cache_aligned;
+};
struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
@@ -72,10 +72,10 @@
/* list of enabled ports */
static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
-struct port_pair_params {
+struct __rte_cache_aligned port_pair_params {
#define NUM_PORTS 2
uint16_t port[NUM_PORTS];
-} __rte_cache_aligned;
+};
static struct port_pair_params port_pair_params_array[RTE_MAX_ETHPORTS / 2];
static struct port_pair_params *port_pair_params;
@@ -86,10 +86,10 @@ struct port_pair_params {
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
/* List of queues to be polled for a given lcore. 8< */
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
unsigned int n_rx_port;
unsigned int rx_port_list[MAX_RX_QUEUE_PER_LCORE];
-} __rte_cache_aligned;
+};
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
/* >8 End of list of queues to be polled for a given lcore. */
@@ -105,11 +105,11 @@ struct lcore_queue_conf {
struct rte_mempool *l2fwd_pktmbuf_pool;
/* Per-port statistics struct */
-struct l2fwd_port_statistics {
+struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t tx;
uint64_t rx;
uint64_t dropped;
-} __rte_cache_aligned;
+};
struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
#define MAX_TIMER_PERIOD 86400 /* 1 day max */
@@ -69,10 +69,10 @@
/* list of enabled ports */
static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
-struct port_pair_params {
+struct __rte_cache_aligned port_pair_params {
#define NUM_PORTS 2
uint16_t port[NUM_PORTS];
-} __rte_cache_aligned;
+};
static struct port_pair_params port_pair_params_array[RTE_MAX_ETHPORTS / 2];
static struct port_pair_params *port_pair_params;
@@ -83,10 +83,10 @@ struct port_pair_params {
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
/* List of queues to be polled for a given lcore. 8< */
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
-} __rte_cache_aligned;
+};
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
/* >8 End of list of queues to be polled for a given lcore. */
@@ -101,11 +101,11 @@ struct lcore_queue_conf {
struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
/* Per-port statistics struct */
-struct l2fwd_port_statistics {
+struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t tx;
uint64_t rx;
uint64_t dropped;
-} __rte_cache_aligned;
+};
struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
#define MAX_TIMER_PERIOD 86400 /* 1 day max */
@@ -97,22 +97,22 @@ struct lcore_rx_queue {
static uint8_t model_conf = RTE_GRAPH_MODEL_DEFAULT;
/* Lcore conf */
-struct lcore_conf {
+struct __rte_cache_aligned lcore_conf {
uint16_t n_rx_queue;
struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
struct rte_graph *graph;
char name[RTE_GRAPH_NAMESIZE];
rte_graph_t graph_id;
-} __rte_cache_aligned;
+};
static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
-struct lcore_params {
+struct __rte_cache_aligned lcore_params {
uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
-} __rte_cache_aligned;
+};
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
static struct lcore_params lcore_params_array_default[] = {
@@ -212,13 +212,13 @@ enum freq_scale_hint_t
FREQ_HIGHEST = 2
};
-struct lcore_rx_queue {
+struct __rte_cache_aligned lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
uint32_t idle_hint;
-} __rte_cache_aligned;
+};
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
@@ -329,8 +329,8 @@ struct ipv6_l3fwd_route {
#define L3FWD_HASH_ENTRIES 1024
-static uint16_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
-static uint16_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static alignas(RTE_CACHE_LINE_SIZE) uint16_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES];
+static alignas(RTE_CACHE_LINE_SIZE) uint16_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES];
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
@@ -357,7 +357,7 @@ struct ipv4_l3fwd_route {
static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
#endif
-struct lcore_conf {
+struct __rte_cache_aligned lcore_conf {
uint16_t n_rx_queue;
struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
uint16_t n_tx_port;
@@ -366,9 +366,9 @@ struct lcore_conf {
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
lookup_struct_t * ipv4_lookup_struct;
lookup_struct_t * ipv6_lookup_struct;
-} __rte_cache_aligned;
+};
-struct lcore_stats {
+struct __rte_cache_aligned lcore_stats {
/* total sleep time in ms since last frequency scaling down */
uint32_t sleep_time;
/* number of long sleep recently */
@@ -399,10 +399,10 @@ struct lcore_stats {
uint64_t fp_nfp[2];
enum busy_rate br;
rte_spinlock_t telemetry_lock;
-} __rte_cache_aligned;
+};
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE] __rte_cache_aligned;
-static struct lcore_stats stats[RTE_MAX_LCORE] __rte_cache_aligned;
+static alignas(RTE_CACHE_LINE_SIZE) struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+static alignas(RTE_CACHE_LINE_SIZE) struct lcore_stats stats[RTE_MAX_LCORE];
static struct rte_timer power_timers[RTE_MAX_LCORE];
static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
@@ -832,9 +832,9 @@ static int is_done(void)
* back to sleep again without log spamming. Avoid cache line sharing
* to prevent threads stepping on each others' toes.
*/
- static struct {
+ static alignas(RTE_CACHE_LINE_SIZE) struct {
bool wakeup;
- } __rte_cache_aligned status[RTE_MAX_LCORE];
+ } status[RTE_MAX_LCORE];
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
@@ -7,11 +7,11 @@
#define MAX_LCORE_PARAMS 1024
-struct lcore_params {
+struct __rte_cache_aligned lcore_params {
uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
-} __rte_cache_aligned;
+};
extern struct lcore_params *lcore_params;
extern uint16_t nb_lcore_params;
@@ -20,12 +20,12 @@
static uint16_t hp_lcores[RTE_MAX_LCORE];
static uint16_t nb_hp_lcores;
-struct perf_lcore_params {
+struct __rte_cache_aligned perf_lcore_params {
uint16_t port_id;
uint8_t queue_id;
uint8_t high_perf;
uint8_t lcore_idx;
-} __rte_cache_aligned;
+};
static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
static uint16_t nb_prf_lc_prms;
@@ -72,12 +72,12 @@ struct mbuf_table {
struct rte_mbuf *m_table[MAX_PKT_BURST];
};
-struct lcore_rx_queue {
+struct __rte_cache_aligned lcore_rx_queue {
uint16_t port_id;
uint8_t queue_id;
-} __rte_cache_aligned;
+};
-struct lcore_conf {
+struct __rte_cache_aligned lcore_conf {
uint16_t n_rx_queue;
struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
uint16_t n_tx_port;
@@ -86,7 +86,7 @@ struct lcore_conf {
struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
void *ipv4_lookup_struct;
void *ipv6_lookup_struct;
-} __rte_cache_aligned;
+};
extern volatile bool force_quit;
@@ -208,8 +208,8 @@
return init_val;
}
-static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
-static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static alignas(RTE_CACHE_LINE_SIZE) uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES];
+static alignas(RTE_CACHE_LINE_SIZE) uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES];
static rte_xmm_t mask0;
static rte_xmm_t mask1;
@@ -96,11 +96,11 @@ enum L3FWD_LOOKUP_MODE {
struct parm_cfg parm_config;
-struct lcore_params {
+struct __rte_cache_aligned lcore_params {
uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
-} __rte_cache_aligned;
+};
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
static struct lcore_params lcore_params_array_default[] = {
@@ -66,11 +66,11 @@
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
/* List of queues must be polled for a give lcore. 8< */
-struct lcore_queue_conf {
+struct __rte_cache_aligned lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
unsigned tx_queue_id;
-} __rte_cache_aligned;
+};
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
/* >8 End of list of queues to be polled. */
@@ -90,11 +90,11 @@ struct lcore_queue_conf {
struct rte_mempool * lsi_pktmbuf_pool = NULL;
/* Per-port statistics struct */
-struct lsi_port_statistics {
+struct __rte_cache_aligned lsi_port_statistics {
uint64_t tx;
uint64_t rx;
uint64_t dropped;
-} __rte_cache_aligned;
+};
struct lsi_port_statistics port_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
@@ -18,14 +18,14 @@
* themselves are written by the clients, so we have a distinct set, on different
* cache lines for each client to use.
*/
-struct rx_stats{
+struct __rte_cache_aligned rx_stats {
uint64_t rx[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
+};
-struct tx_stats{
+struct __rte_cache_aligned tx_stats {
uint64_t tx[RTE_MAX_ETHPORTS];
uint64_t tx_drop[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
+};
struct port_info {
uint16_t num_ports;
@@ -61,11 +61,11 @@ struct lcore_ports{
/* structure to record the rx and tx packets. Put two per cache line as ports
* used in pairs */
-struct port_stats{
+struct __rte_aligned(RTE_CACHE_LINE_SIZE / 2) port_stats{
unsigned rx;
unsigned tx;
unsigned drop;
-} __rte_aligned(RTE_CACHE_LINE_SIZE / 2);
+};
static int proc_id = -1;
static unsigned num_procs = 0;
@@ -24,10 +24,10 @@
#include "commands.h"
/* Per-port statistics struct */
-struct ntb_port_statistics {
+struct __rte_cache_aligned ntb_port_statistics {
uint64_t tx;
uint64_t rx;
-} __rte_cache_aligned;
+};
/* Port 0: NTB dev, Port 1: ethdev when iofwd. */
struct ntb_port_statistics ntb_port_stats[2];
@@ -58,19 +58,19 @@ struct send_thread_args {
};
volatile struct app_stats {
- struct {
+ alignas(RTE_CACHE_LINE_SIZE) struct {
uint64_t rx_pkts;
uint64_t enqueue_pkts;
uint64_t enqueue_failed_pkts;
- } rx __rte_cache_aligned;
+ } rx;
- struct {
+ alignas(RTE_CACHE_LINE_SIZE) struct {
uint64_t dequeue_pkts;
uint64_t enqueue_pkts;
uint64_t enqueue_failed_pkts;
- } wkr __rte_cache_aligned;
+ } wkr;
- struct {
+ alignas(RTE_CACHE_LINE_SIZE) struct {
uint64_t dequeue_pkts;
/* Too early pkts transmitted directly w/o reordering */
uint64_t early_pkts_txtd_woro;
@@ -78,15 +78,15 @@ struct send_thread_args {
uint64_t early_pkts_tx_failed_woro;
uint64_t ro_tx_pkts;
uint64_t ro_tx_failed_pkts;
- } tx __rte_cache_aligned;
+ } tx;
} app_stats;
/* per worker lcore stats */
-struct wkr_stats_per {
+struct __rte_cache_aligned wkr_stats_per {
uint64_t deq_pkts;
uint64_t enq_pkts;
uint64_t enq_failed_pkts;
-} __rte_cache_aligned;
+};
static struct wkr_stats_per wkr_stats[RTE_MAX_LCORE] = { {0} };
/**
@@ -47,13 +47,13 @@ struct block {
void *block;
};
-struct thread {
+struct __rte_cache_aligned thread {
struct rte_swx_pipeline *pipelines[THREAD_PIPELINES_MAX];
struct block *blocks[THREAD_BLOCKS_MAX];
volatile uint64_t n_pipelines;
volatile uint64_t n_blocks;
int enabled;
-} __rte_cache_aligned;
+};
static struct thread threads[RTE_MAX_LCORE];
@@ -62,7 +62,7 @@
app_rx_thread(struct thread_conf **confs)
{
uint32_t i, nb_rx;
- struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *rx_mbufs[burst_conf.rx_burst];
struct thread_conf *conf;
int conf_idx = 0;
@@ -63,7 +63,7 @@ struct thread_stat
};
-struct thread_conf
+struct __rte_cache_aligned thread_conf
{
uint16_t rx_port;
uint16_t tx_port;
@@ -76,7 +76,7 @@ struct thread_conf
#if APP_COLLECT_STAT
struct thread_stat stat;
#endif
-} __rte_cache_aligned;
+};
struct flow_conf
@@ -20,19 +20,19 @@
* themselves are written by the nodes, so we have a distinct set, on different
* cache lines for each node to use.
*/
-struct rx_stats {
+struct __rte_cache_aligned rx_stats {
uint64_t rx[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
+};
-struct tx_stats {
+struct __rte_cache_aligned tx_stats {
uint64_t tx[RTE_MAX_ETHPORTS];
uint64_t tx_drop[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
+};
-struct filter_stats {
+struct __rte_cache_aligned filter_stats {
uint64_t drop;
uint64_t passed;
-} __rte_cache_aligned;
+};
struct shared_info {
uint8_t num_nodes;
@@ -32,7 +32,7 @@ struct vhost_queue {
uint16_t last_used_idx;
};
-struct vhost_dev {
+struct __rte_cache_aligned vhost_dev {
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
/**< Device MAC address (Obtained on first TX packet). */
@@ -59,7 +59,7 @@ struct vhost_dev {
#define MAX_QUEUE_PAIRS 4
struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
-} __rte_cache_aligned;
+};
typedef uint16_t (*vhost_enqueue_burst_t)(struct vhost_dev *dev,
uint16_t queue_id, struct rte_mbuf **pkts,
@@ -66,7 +66,7 @@ struct vhost_block_dev {
uint8_t *data;
};
-struct vhost_blk_ctrlr {
+struct __rte_cache_aligned vhost_blk_ctrlr {
uint8_t started;
/** ID for vhost library. */
int vid;
@@ -76,7 +76,7 @@ struct vhost_blk_ctrlr {
struct vhost_block_dev *bdev;
/** VM memory region */
struct rte_vhost_memory *mem;
-} __rte_cache_aligned;
+};
#define VHOST_BLK_MAX_IOVS 128
@@ -42,7 +42,7 @@ struct lcore_option {
uint16_t qid;
};
-struct vhost_crypto_info {
+struct __rte_cache_aligned vhost_crypto_info {
int vids[MAX_NB_SOCKETS];
uint32_t nb_vids;
struct rte_mempool *sess_pool;
@@ -51,7 +51,7 @@ struct vhost_crypto_info {
uint32_t qid;
uint32_t nb_inflight_ops;
volatile uint32_t initialized[MAX_NB_SOCKETS];
-} __rte_cache_aligned;
+};
struct vhost_crypto_options {
struct lcore_option los[MAX_NB_WORKER_CORES];
@@ -32,11 +32,11 @@
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \
} while (0)
-struct freq_info {
+struct __rte_cache_aligned freq_info {
rte_spinlock_t power_sl;
uint32_t freqs[RTE_MAX_LCORE_FREQS];
unsigned num_freqs;
-} __rte_cache_aligned;
+};
static struct freq_info global_core_freq_info[RTE_MAX_LCORE];