From patchwork Wed Feb 14 16:35:38 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Tyler Retzlaff X-Patchwork-Id: 136766 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E7EB143B38; Wed, 14 Feb 2024 17:37:34 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8143E42FF5; Wed, 14 Feb 2024 17:36:24 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id D7ACA42D9D for ; Wed, 14 Feb 2024 17:36:07 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1086) id 94CAA20B200D; Wed, 14 Feb 2024 08:36:06 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 94CAA20B200D DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1707928566; bh=o4S3ALA1kOodLv97PW+baZ6fu3Afhd5kqhb6CscOeLA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=QHxK15nAu2fRg2OV/pwrtmbw2YPk8we0aAhT/O1uHBLT7lw5z0nPWcDEpLy8KBSeN cx4BCqQYNm2PAA6gEshCoYW8ifEdlktivdDYByV2Ao6jt2XNGoPnMV7creeT5yKs3r IBMCcIsLndhOM+qkJzQC/WW67PUG4XzDgfHoXxgE= From: Tyler Retzlaff To: dev@dpdk.org Cc: Andrew Rybchenko , Bruce Richardson , Chengwen Feng , Cristian Dumitrescu , David Christensen , David Hunt , Ferruh Yigit , Honnappa Nagarahalli , Jasvinder Singh , Jerin Jacob , Kevin Laatz , Konstantin Ananyev , Min Zhou , Ruifeng Wang , Sameh Gobriel , Stanislaw Kardach , Thomas Monjalon , Vladimir Medvedkin , Yipeng Wang , Tyler Retzlaff Subject: [PATCH v4 13/39] distributor: use C11 alignas Date: Wed, 14 Feb 2024 08:35:38 -0800 Message-Id: <1707928564-28796-14-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1707928564-28796-1-git-send-email-roretzla@linux.microsoft.com> References: <1707873986-29352-1-git-send-email-roretzla@linux.microsoft.com> <1707928564-28796-1-git-send-email-roretzla@linux.microsoft.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org * Move __rte_aligned from the end of {struct,union} definitions to be between {struct,union} and tag. The placement between {struct,union} and the tag allows the desired alignment to be imparted on the type regardless of the toolchain being used for all of GCC, LLVM, MSVC compilers building both C and C++. * Replace use of __rte_aligned(a) on variables/fields with alignas(a). Signed-off-by: Tyler Retzlaff Acked-by: Morten Brørup --- lib/distributor/distributor_private.h | 34 ++++++++++++++++++---------------- lib/distributor/rte_distributor.c | 5 +++-- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/lib/distributor/distributor_private.h b/lib/distributor/distributor_private.h index dfeb9b5..07c2c05 100644 --- a/lib/distributor/distributor_private.h +++ b/lib/distributor/distributor_private.h @@ -5,6 +5,8 @@ #ifndef _DIST_PRIV_H_ #define _DIST_PRIV_H_ +#include + /** * @file * RTE distributor @@ -51,10 +53,10 @@ * the next cache line to worker 0, we pad this out to three cache lines. * Only 64-bits of the memory is actually used though. */ -union rte_distributor_buffer_single { +union __rte_cache_aligned rte_distributor_buffer_single { volatile RTE_ATOMIC(int64_t) bufptr64; char pad[RTE_CACHE_LINE_SIZE*3]; -} __rte_cache_aligned; +}; /* * Transfer up to 8 mbufs at a time to/from workers, and @@ -62,12 +64,12 @@ */ #define RTE_DIST_BURST_SIZE 8 -struct rte_distributor_backlog { +struct __rte_cache_aligned rte_distributor_backlog { unsigned int start; unsigned int count; - int64_t pkts[RTE_DIST_BURST_SIZE] __rte_cache_aligned; + alignas(RTE_CACHE_LINE_SIZE) int64_t pkts[RTE_DIST_BURST_SIZE]; uint16_t *tags; /* will point to second cacheline of inflights */ -} __rte_cache_aligned; +}; struct rte_distributor_returned_pkts { @@ -113,17 +115,17 @@ enum rte_distributor_match_function { * There is a separate cacheline for returns in the burst API. */ struct rte_distributor_buffer { - volatile RTE_ATOMIC(int64_t) bufptr64[RTE_DIST_BURST_SIZE] - __rte_cache_aligned; /* <= outgoing to worker */ + volatile alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int64_t) bufptr64[RTE_DIST_BURST_SIZE]; + /* <= outgoing to worker */ - int64_t pad1 __rte_cache_aligned; /* <= one cache line */ + alignas(RTE_CACHE_LINE_SIZE) int64_t pad1; /* <= one cache line */ - volatile RTE_ATOMIC(int64_t) retptr64[RTE_DIST_BURST_SIZE] - __rte_cache_aligned; /* <= incoming from worker */ + volatile alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int64_t) retptr64[RTE_DIST_BURST_SIZE]; + /* <= incoming from worker */ - int64_t pad2 __rte_cache_aligned; /* <= one cache line */ + alignas(RTE_CACHE_LINE_SIZE) int64_t pad2; /* <= one cache line */ - int count __rte_cache_aligned; /* <= number of current mbufs */ + alignas(RTE_CACHE_LINE_SIZE) int count; /* <= number of current mbufs */ }; struct rte_distributor { @@ -138,11 +140,11 @@ struct rte_distributor { * on the worker core. Second cache line are the backlog * that are going to go to the worker core. */ - uint16_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS][RTE_DIST_BURST_SIZE*2] - __rte_cache_aligned; + alignas(RTE_CACHE_LINE_SIZE) uint16_t + in_flight_tags[RTE_DISTRIB_MAX_WORKERS][RTE_DIST_BURST_SIZE*2]; - struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS] - __rte_cache_aligned; + alignas(RTE_CACHE_LINE_SIZE) struct rte_distributor_backlog + backlog[RTE_DISTRIB_MAX_WORKERS]; struct rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS]; diff --git a/lib/distributor/rte_distributor.c b/lib/distributor/rte_distributor.c index 2ecb95c..b901a4e 100644 --- a/lib/distributor/rte_distributor.c +++ b/lib/distributor/rte_distributor.c @@ -2,6 +2,7 @@ * Copyright(c) 2017 Intel Corporation */ +#include #include #include #include @@ -447,7 +448,7 @@ struct rte_mbuf *next_mb = NULL; int64_t next_value = 0; uint16_t new_tag = 0; - uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned; + alignas(RTE_CACHE_LINE_SIZE) uint16_t flows[RTE_DIST_BURST_SIZE]; unsigned int i, j, w, wid, matching_required; if (d->alg_type == RTE_DIST_ALG_SINGLE) { @@ -477,7 +478,7 @@ return 0; while (next_idx < num_mbufs) { - uint16_t matches[RTE_DIST_BURST_SIZE] __rte_aligned(128); + alignas(128) uint16_t matches[RTE_DIST_BURST_SIZE]; unsigned int pkts; if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)