From patchwork Sun Jun 23 03:15:44 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Phil Yang X-Patchwork-Id: 55220 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 741B31C1D0; Sun, 23 Jun 2019 05:15:59 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id 0EB721BFF6 for ; Sun, 23 Jun 2019 05:15:56 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 34BA6360; Sat, 22 Jun 2019 20:15:56 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (unknown [10.171.20.36]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 54E403F575; Sat, 22 Jun 2019 20:15:54 -0700 (PDT) From: Phil Yang To: dev@dpdk.org Cc: thomas@monjalon.net, jerinj@marvell.com, hemant.agrawal@nxp.com, Honnappa.Nagarahalli@arm.com, gavin.hu@arm.com, nd@arm.com, gage.eads@intel.com Date: Sun, 23 Jun 2019 11:15:44 +0800 Message-Id: <1561259746-12611-1-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1561257671-10316-1-git-send-email-phil.yang@arm.com> References: <1561257671-10316-1-git-send-email-phil.yang@arm.com> Subject: [dpdk-dev] [PATCH v2 1/3] eal/arm64: add 128-bit atomic compare exchange X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add 128-bit atomic compare exchange on aarch64. Signed-off-by: Phil Yang Reviewed-by: Honnappa Nagarahalli Tested-by: Honnappa Nagarahalli --- This patch depends on 'eal/stack: fix 'pointer-sign' warning' http://patchwork.dpdk.org/patch/54840/ v2: Fixed coding style warning. .../common/include/arch/arm/rte_atomic_64.h | 184 +++++++++++++++++++++ .../common/include/arch/x86/rte_atomic_64.h | 12 -- lib/librte_eal/common/include/generic/rte_atomic.h | 15 +- 3 files changed, 198 insertions(+), 13 deletions(-) diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h index 97060e4..ae29ce6 100644 --- a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h +++ b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2015 Cavium, Inc + * Copyright(c) 2019 Arm Limited */ #ifndef _RTE_ATOMIC_ARM64_H_ @@ -14,6 +15,9 @@ extern "C" { #endif #include "generic/rte_atomic.h" +#include +#include +#include #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define dmb(opt) asm volatile("dmb " #opt : : : "memory") @@ -40,6 +44,186 @@ extern "C" { #define rte_cio_rmb() dmb(oshld) +/*----------------------- 128 bit atomic operations -------------------------*/ + +#define RTE_HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE) +#define RTE_HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || \ + (mo) == __ATOMIC_ACQ_REL || \ + (mo) == __ATOMIC_SEQ_CST) + +#define RTE_MO_LOAD(mo) (RTE_HAS_ACQ((mo)) \ + ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED) +#define RTE_MO_STORE(mo) (RTE_HAS_RLS((mo)) \ + ? __ATOMIC_RELEASE : __ATOMIC_RELAXED) + +#ifdef __ARM_FEATURE_ATOMICS +static inline rte_int128_t +__rte_casp(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated, int mo) +{ + + /* caspX instructions register pair must start from even-numbered + * register at operand 1. + * So, specify registers for local variables here. + */ + register uint64_t x0 __asm("x0") = (uint64_t)old.val[0]; + register uint64_t x1 __asm("x1") = (uint64_t)old.val[1]; + register uint64_t x2 __asm("x2") = (uint64_t)updated.val[0]; + register uint64_t x3 __asm("x3") = (uint64_t)updated.val[1]; + + if (mo == __ATOMIC_RELAXED) { + asm volatile( + "casp %[old0], %[old1], %[upd0], %[upd1], [%[dst]]" + : [old0] "+r" (x0), + [old1] "+r" (x1) + : [upd0] "r" (x2), + [upd1] "r" (x3), + [dst] "r" (dst) + : "memory"); + } else if (mo == __ATOMIC_ACQUIRE) { + asm volatile( + "caspa %[old0], %[old1], %[upd0], %[upd1], [%[dst]]" + : [old0] "+r" (x0), + [old1] "+r" (x1) + : [upd0] "r" (x2), + [upd1] "r" (x3), + [dst] "r" (dst) + : "memory"); + } else if (mo == __ATOMIC_ACQ_REL) { + asm volatile( + "caspal %[old0], %[old1], %[upd0], %[upd1], [%[dst]]" + : [old0] "+r" (x0), + [old1] "+r" (x1) + : [upd0] "r" (x2), + [upd1] "r" (x3), + [dst] "r" (dst) + : "memory"); + } else if (mo == __ATOMIC_RELEASE) { + asm volatile( + "caspl %[old0], %[old1], %[upd0], %[upd1], [%[dst]]" + : [old0] "+r" (x0), + [old1] "+r" (x1) + : [upd0] "r" (x2), + [upd1] "r" (x3), + [dst] "r" (dst) + : "memory"); + } else { + rte_panic("Invalid memory order\n"); + } + + old.val[0] = x0; + old.val[1] = x1; + + return old; +} +#else +static inline rte_int128_t +__rte_ldx128(const rte_int128_t *src, int mo) +{ + rte_int128_t ret; + if (mo == __ATOMIC_ACQUIRE) + asm volatile( + "ldaxp %0, %1, %2" + : "=&r" (ret.val[0]), + "=&r" (ret.val[1]) + : "Q" (src->val[0]) + : "memory"); + else if (mo == __ATOMIC_RELAXED) + asm volatile( + "ldxp %0, %1, %2" + : "=&r" (ret.val[0]), + "=&r" (ret.val[1]) + : "Q" (src->val[0]) + : "memory"); + else + rte_panic("Invalid memory order\n"); + + return ret; +} + +static inline uint32_t +__rte_stx128(rte_int128_t *dst, const rte_int128_t src, int mo) +{ + uint32_t ret; + if (mo == __ATOMIC_RELEASE) + asm volatile( + "stlxp %w0, %1, %2, %3" + : "=&r" (ret) + : "r" (src.val[0]), + "r" (src.val[1]), + "Q" (dst->val[0]) + : "memory"); + else if (mo == __ATOMIC_RELAXED) + asm volatile( + "stxp %w0, %1, %2, %3" + : "=&r" (ret) + : "r" (src.val[0]), + "r" (src.val[1]), + "Q" (dst->val[0]) + : "memory"); + else + rte_panic("Invalid memory order\n"); + + /* Return 0 on success, 1 on failure */ + return ret; +} +#endif + +static inline int __rte_experimental +rte_atomic128_cmp_exchange(rte_int128_t *dst, + rte_int128_t *exp, + const rte_int128_t *src, + unsigned int weak, + int success, + int failure) +{ + // Always do strong CAS + RTE_SET_USED(weak); + /* Ignore memory ordering for failure, memory order for + * success must be stronger or equal + */ + RTE_SET_USED(failure); + +#ifdef __ARM_FEATURE_ATOMICS + rte_int128_t expected = *exp; + rte_int128_t desired = *src; + rte_int128_t old; + + old = __rte_casp(dst, expected, desired, success); +#else + int ldx_mo = RTE_MO_LOAD(success); + int stx_mo = RTE_MO_STORE(success); + uint32_t ret = 1; + register rte_int128_t expected = *exp; + register rte_int128_t desired = *src; + register rte_int128_t old; + + /* ldx128 can not guarantee atomic, + * Must write back src or old to verify atomicity of ldx128; + */ + do { + old = __rte_ldx128(dst, ldx_mo); + if (likely(old.int128 == expected.int128)) + ret = __rte_stx128(dst, desired, stx_mo); + else + /* In the failure case (since 'weak' is ignored and only + * weak == 0 is implemented), expected should contain the + * atomically read value of dst. This means, 'old' needs + * to be stored back to ensure it was read atomically. + */ + ret = __rte_stx128(dst, old, stx_mo); + } while (unlikely(ret)); +#endif + + /* Unconditionally updating expected removes + * an 'if' statement. + * expected should already be in register if + * not in the cache. + */ + *exp = old; + + return (old.int128 == expected.int128); +} + #ifdef __cplusplus } #endif diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h index 6232c57..23cf48f 100644 --- a/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h +++ b/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h @@ -212,18 +212,6 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) /*------------------------ 128 bit atomic operations -------------------------*/ -/** - * 128-bit integer structure. - */ -RTE_STD_C11 -typedef struct { - RTE_STD_C11 - union { - uint64_t val[2]; - __extension__ __int128 int128; - }; -} __rte_aligned(16) rte_int128_t; - static inline int __rte_experimental rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h index 9958543..7dd1aa4 100644 --- a/lib/librte_eal/common/include/generic/rte_atomic.h +++ b/lib/librte_eal/common/include/generic/rte_atomic.h @@ -1081,6 +1081,18 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) /*------------------------ 128 bit atomic operations -------------------------*/ +/** + * 128-bit integer structure. + */ +RTE_STD_C11 +typedef struct { + RTE_STD_C11 + union { + uint64_t val[2]; + __extension__ __int128 int128; + }; +} __rte_aligned(16) rte_int128_t; + #ifdef __DOXYGEN__ /** @@ -1093,7 +1105,8 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) * *exp = *dst * @endcode * - * @note This function is currently only available for the x86-64 platform. + * @note This function is currently available for the x86-64 and aarch64 + * platforms. * * @note The success and failure arguments must be one of the __ATOMIC_* values * defined in the C++11 standard. For details on their behavior, refer to the From patchwork Sun Jun 23 03:15:45 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Phil Yang X-Patchwork-Id: 55221 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8AD401C2C2; Sun, 23 Jun 2019 05:16:02 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id 1A4E61C2C2 for ; Sun, 23 Jun 2019 05:16:01 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 6FB6BEBD; Sat, 22 Jun 2019 20:16:00 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (unknown [10.171.20.36]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 9A8093F575; Sat, 22 Jun 2019 20:15:58 -0700 (PDT) From: Phil Yang To: dev@dpdk.org Cc: thomas@monjalon.net, jerinj@marvell.com, hemant.agrawal@nxp.com, Honnappa.Nagarahalli@arm.com, gavin.hu@arm.com, nd@arm.com, gage.eads@intel.com Date: Sun, 23 Jun 2019 11:15:45 +0800 Message-Id: <1561259746-12611-2-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1561259746-12611-1-git-send-email-phil.yang@arm.com> References: <1561257671-10316-1-git-send-email-phil.yang@arm.com> <1561259746-12611-1-git-send-email-phil.yang@arm.com> Subject: [dpdk-dev] [PATCH v2 2/3] test/atomic: add 128b compare and swap test X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add 128b atomic compare and swap test for aarch64 and x86_64. Signed-off-by: Phil Yang Reviewed-by: Honnappa Nagarahalli --- app/test/test_atomic.c | 120 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 1 deletion(-) diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c index 43be30e..da09bc4 100644 --- a/app/test/test_atomic.c +++ b/app/test/test_atomic.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2019 Arm Limited */ #include @@ -20,7 +21,7 @@ * Atomic Variables * ================ * - * - The main test function performs three subtests. The first test + * - The main test function performs four subtests. The first test * checks that the usual inc/dec/add/sub functions are working * correctly: * @@ -61,6 +62,22 @@ * atomic_sub(&count, tmp+1); * * - At the end of the test, the *count* value must be 0. + * + * - Test "128b compare and swap" (aarch64 and x86_64 only) + * + * - Initialize 128-bit atomic variables to zero. + * + * - Invoke ``test_atomici128_cmp_exchange()`` on each lcore. Before doing + * anything else, the cores are waiting a synchro. Each lcore does + * these compare and swap (CAS) operations several times:: + * + * Relaxed CAS update counter.val[0] + 2; counter.val[0] + 1; + * Acquired CAS update counter.val[0] + 2; counter.val[0] + 1; + * Released CAS update counter.val[0] + 2; counter.val[0] + 1; + * Acquired_Released CAS update counter.val[0] + 2; counter.val[0] + 1; + * + * - At the end of the test, the *count128* first 64-bit value and + * second 64-bit value differ by the total iterations. */ #define NUM_ATOMIC_TYPES 3 @@ -73,6 +90,10 @@ static rte_atomic64_t a64; static rte_atomic64_t count; static rte_atomic32_t synchro; +#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64) +static rte_int128_t count128; +#endif + static int test_atomic_usual(__attribute__((unused)) void *arg) { @@ -216,6 +237,72 @@ test_atomic_dec_and_test(__attribute__((unused)) void *arg) return 0; } +#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64) +/* + * rte_atomic128_cmp_exchange() should update a 128 bits counter's first 64 + * bits by 2 and the second 64 bits by 1 in this test. It should return true + * if the compare exchange operation successful. + * This test repeat 128 bits compare and swap operations 10K rounds. In each + * iteration it runs compare and swap operation with different memory models. + */ +static int +test_atomic128_cmp_exchange(__attribute__((unused)) void *arg) +{ + rte_int128_t expected; + int success; + unsigned int i; + + while (rte_atomic32_read(&synchro) == 0) + ; + + expected = count128; + + for (i = 0; i < N; i++) { + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, &expected, + &desired, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + } while (success == 0); + + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, &expected, + &desired, 1, __ATOMIC_RELEASE, __ATOMIC_RELAXED); + } while (success == 0); + + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, &expected, + &desired, 1, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); + } while (success == 0); + + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, &expected, + &desired, 1, __ATOMIC_RELAXED, __ATOMIC_RELAXED); + } while (success == 0); + } + + return 0; +} +#endif + static int test_atomic(void) { @@ -340,6 +427,37 @@ test_atomic(void) return -1; } +#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64) + /* + * This case tests the functionality of rte_atomic128b_cmp_exchange + * API. It calls rte_atomic128b_cmp_exchange with four kinds of memory + * models successively on each slave core. Once each 128-bit atomic + * compare and swap operation is successful, it updates the global + * 128-bit counter by 2 for the first 64-bit and 1 for the second + * 64-bit. Each slave core iterates this test 10K times. + * At the end of test, verify whether the first 64-bits of the 128-bit + * counter and the second 64bits is differ by the total iterations. If + * it is, the test passes. + */ + printf("128b compare and swap test\n"); + uint64_t iterations = 0; + + rte_atomic32_clear(&synchro); + count128.val[0] = 0; + count128.val[1] = 0; + + rte_eal_mp_remote_launch(test_atomic128_cmp_exchange, NULL, SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_clear(&synchro); + + iterations = count128.val[0] - count128.val[1]; + if (iterations != 4*N*(rte_lcore_count()-1)) { + printf("128b compare and swap failed\n"); + return -1; + } +#endif + return 0; } From patchwork Sun Jun 23 03:15:46 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Phil Yang X-Patchwork-Id: 55222 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 487DC1C2EC; Sun, 23 Jun 2019 05:16:06 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id 74F891C2E8 for ; Sun, 23 Jun 2019 05:16:04 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 063B91424; Sat, 22 Jun 2019 20:16:04 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (unknown [10.171.20.36]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 1B19B3F575; Sat, 22 Jun 2019 20:16:01 -0700 (PDT) From: Phil Yang To: dev@dpdk.org Cc: thomas@monjalon.net, jerinj@marvell.com, hemant.agrawal@nxp.com, Honnappa.Nagarahalli@arm.com, gavin.hu@arm.com, nd@arm.com, gage.eads@intel.com Date: Sun, 23 Jun 2019 11:15:46 +0800 Message-Id: <1561259746-12611-3-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1561259746-12611-1-git-send-email-phil.yang@arm.com> References: <1561257671-10316-1-git-send-email-phil.yang@arm.com> <1561259746-12611-1-git-send-email-phil.yang@arm.com> Subject: [dpdk-dev] [PATCH v2 3/3] eal/stack: enable lock-free stack for aarch64 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable both c11 atomic and non c11 atomic lock-free stack for aarch64. Signed-off-by: Phil Yang Reviewed-by: Honnappa Nagarahalli Tested-by: Honnappa Nagarahalli --- doc/guides/rel_notes/release_19_08.rst | 3 +++ lib/librte_stack/rte_stack_lf_c11.h | 4 ++-- lib/librte_stack/rte_stack_lf_generic.h | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/guides/rel_notes/release_19_08.rst b/doc/guides/rel_notes/release_19_08.rst index 8c3932d..b79ae28 100644 --- a/doc/guides/rel_notes/release_19_08.rst +++ b/doc/guides/rel_notes/release_19_08.rst @@ -88,6 +88,9 @@ New Features * Added multi-queue support to allow one af_xdp vdev with multiple netdev queues +* **Added Lock-free Stack for aarch64.** + + The lock-free stack implementation is enabled for aarch64 platforms. Removed Items ------------- diff --git a/lib/librte_stack/rte_stack_lf_c11.h b/lib/librte_stack/rte_stack_lf_c11.h index 3d677ae..67c21fd 100644 --- a/lib/librte_stack/rte_stack_lf_c11.h +++ b/lib/librte_stack/rte_stack_lf_c11.h @@ -36,7 +36,7 @@ __rte_stack_lf_push_elems(struct rte_stack_lf_list *list, struct rte_stack_lf_elem *last, unsigned int num) { -#ifndef RTE_ARCH_X86_64 +#if !defined(RTE_ARCH_X86_64) && !defined(RTE_ARCH_ARM64) RTE_SET_USED(first); RTE_SET_USED(last); RTE_SET_USED(list); @@ -88,7 +88,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list, void **obj_table, struct rte_stack_lf_elem **last) { -#ifndef RTE_ARCH_X86_64 +#if !defined(RTE_ARCH_X86_64) && !defined(RTE_ARCH_ARM64) RTE_SET_USED(obj_table); RTE_SET_USED(last); RTE_SET_USED(list); diff --git a/lib/librte_stack/rte_stack_lf_generic.h b/lib/librte_stack/rte_stack_lf_generic.h index 3182151..488fd9f 100644 --- a/lib/librte_stack/rte_stack_lf_generic.h +++ b/lib/librte_stack/rte_stack_lf_generic.h @@ -36,7 +36,7 @@ __rte_stack_lf_push_elems(struct rte_stack_lf_list *list, struct rte_stack_lf_elem *last, unsigned int num) { -#ifndef RTE_ARCH_X86_64 +#if !defined(RTE_ARCH_X86_64) && !defined(RTE_ARCH_ARM64) RTE_SET_USED(first); RTE_SET_USED(last); RTE_SET_USED(list); @@ -84,7 +84,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list, void **obj_table, struct rte_stack_lf_elem **last) { -#ifndef RTE_ARCH_X86_64 +#if !defined(RTE_ARCH_X86_64) && !defined(RTE_ARCH_ARM64) RTE_SET_USED(obj_table); RTE_SET_USED(last); RTE_SET_USED(list);