get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/130435/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 130435,
    "url": "http://patchwork.dpdk.org/api/patches/130435/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/1692213549-11786-3-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1692213549-11786-3-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1692213549-11786-3-git-send-email-roretzla@linux.microsoft.com",
    "date": "2023-08-16T19:19:05",
    "name": "[v3,2/6] eal: adapt EAL to present rte optional atomics API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "dbd4a65cb59df768f3883f19587d998f5e6cba7c",
    "submitter": {
        "id": 2077,
        "url": "http://patchwork.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1692213549-11786-3-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 29254,
            "url": "http://patchwork.dpdk.org/api/series/29254/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29254",
            "date": "2023-08-16T19:19:03",
            "name": "RFC optional rte optional stdatomics API",
            "version": 3,
            "mbox": "http://patchwork.dpdk.org/series/29254/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/130435/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/130435/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8B3DF43085;\n\tWed, 16 Aug 2023 21:19:37 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 6B32743269;\n\tWed, 16 Aug 2023 21:19:18 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id AC97140ED9;\n Wed, 16 Aug 2023 21:19:11 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id E6E88211F61A; Wed, 16 Aug 2023 12:19:10 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com E6E88211F61A",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1692213550;\n bh=o377BHX3cCoR6dUy4pu1mFiaC62As6MgUPZGXIWY7gM=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=mwA515VoqrqaDIaXsVVL4yHGIYIGSNfOJZrR0oISVqJP6jZWJxovmETfMEJrh0nxe\n 2x6ZBZwgL6hppacSY2hdUyy76ZZgTM63cfczFJlpPV45rVlt7lxYQl9O/WDQz9lxsC\n lKzINMo78adeBrWNoelJOp2WNY3itTBsUXnretds=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "techboard@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>, Jerin Jacob <jerinj@marvell.com>,\n Sunil Kumar Kori <skori@marvell.com>,\n =?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,\n Joyce Kong <joyce.kong@arm.com>, David Christensen <drc@linux.vnet.ibm.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n David Hunt <david.hunt@intel.com>, Thomas Monjalon <thomas@monjalon.net>,\n David Marchand <david.marchand@redhat.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v3 2/6] eal: adapt EAL to present rte optional atomics API",
        "Date": "Wed, 16 Aug 2023 12:19:05 -0700",
        "Message-Id": "<1692213549-11786-3-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1692213549-11786-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1691717521-1025-1-git-send-email-roretzla@linux.microsoft.com>\n <1692213549-11786-1-git-send-email-roretzla@linux.microsoft.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Adapt the EAL public headers to use rte optional atomics API instead of\ndirectly using and exposing toolchain specific atomic builtin intrinsics.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\nReviewed-by: Morten Brørup <mb@smartsharesystems.com>\n---\n app/test/test_mcslock.c                |  6 ++--\n lib/eal/arm/include/rte_atomic_32.h    |  4 +--\n lib/eal/arm/include/rte_atomic_64.h    | 36 +++++++++++------------\n lib/eal/arm/include/rte_pause_64.h     | 26 ++++++++--------\n lib/eal/arm/rte_power_intrinsics.c     |  8 ++---\n lib/eal/common/eal_common_trace.c      | 16 +++++-----\n lib/eal/include/generic/rte_atomic.h   | 50 +++++++++++++++----------------\n lib/eal/include/generic/rte_pause.h    | 46 ++++++++++++-----------------\n lib/eal/include/generic/rte_rwlock.h   | 47 +++++++++++++++--------------\n lib/eal/include/generic/rte_spinlock.h | 19 ++++++------\n lib/eal/include/rte_mcslock.h          | 50 +++++++++++++++----------------\n lib/eal/include/rte_pflock.h           | 24 ++++++++-------\n lib/eal/include/rte_seqcount.h         | 18 ++++++------\n lib/eal/include/rte_stdatomic.h        |  2 ++\n lib/eal/include/rte_ticketlock.h       | 42 +++++++++++++-------------\n lib/eal/include/rte_trace_point.h      |  4 +--\n lib/eal/loongarch/include/rte_atomic.h |  4 +--\n lib/eal/ppc/include/rte_atomic.h       | 54 +++++++++++++++++-----------------\n lib/eal/riscv/include/rte_atomic.h     |  4 +--\n lib/eal/x86/include/rte_atomic.h       |  8 ++---\n lib/eal/x86/include/rte_spinlock.h     |  2 +-\n lib/eal/x86/rte_power_intrinsics.c     |  6 ++--\n 22 files changed, 239 insertions(+), 237 deletions(-)",
    "diff": "diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c\nindex 52e45e7..242c242 100644\n--- a/app/test/test_mcslock.c\n+++ b/app/test/test_mcslock.c\n@@ -36,9 +36,9 @@\n  *   lock multiple times.\n  */\n \n-rte_mcslock_t *p_ml;\n-rte_mcslock_t *p_ml_try;\n-rte_mcslock_t *p_ml_perf;\n+RTE_ATOMIC(rte_mcslock_t *) p_ml;\n+RTE_ATOMIC(rte_mcslock_t *) p_ml_try;\n+RTE_ATOMIC(rte_mcslock_t *) p_ml_perf;\n \n static unsigned int count;\n \ndiff --git a/lib/eal/arm/include/rte_atomic_32.h b/lib/eal/arm/include/rte_atomic_32.h\nindex c00ab78..62fc337 100644\n--- a/lib/eal/arm/include/rte_atomic_32.h\n+++ b/lib/eal/arm/include/rte_atomic_32.h\n@@ -34,9 +34,9 @@\n #define rte_io_rmb() rte_rmb()\n \n static __rte_always_inline void\n-rte_atomic_thread_fence(int memorder)\n+rte_atomic_thread_fence(rte_memory_order memorder)\n {\n-\t__atomic_thread_fence(memorder);\n+\t__rte_atomic_thread_fence(memorder);\n }\n \n #ifdef __cplusplus\ndiff --git a/lib/eal/arm/include/rte_atomic_64.h b/lib/eal/arm/include/rte_atomic_64.h\nindex 6047911..75d8ba6 100644\n--- a/lib/eal/arm/include/rte_atomic_64.h\n+++ b/lib/eal/arm/include/rte_atomic_64.h\n@@ -38,9 +38,9 @@\n #define rte_io_rmb() rte_rmb()\n \n static __rte_always_inline void\n-rte_atomic_thread_fence(int memorder)\n+rte_atomic_thread_fence(rte_memory_order memorder)\n {\n-\t__atomic_thread_fence(memorder);\n+\t__rte_atomic_thread_fence(memorder);\n }\n \n /*------------------------ 128 bit atomic operations -------------------------*/\n@@ -107,33 +107,33 @@\n \t */\n \tRTE_SET_USED(failure);\n \t/* Find invalid memory order */\n-\tRTE_ASSERT(success == __ATOMIC_RELAXED ||\n-\t\tsuccess == __ATOMIC_ACQUIRE ||\n-\t\tsuccess == __ATOMIC_RELEASE ||\n-\t\tsuccess == __ATOMIC_ACQ_REL ||\n-\t\tsuccess == __ATOMIC_SEQ_CST);\n+\tRTE_ASSERT(success == rte_memory_order_relaxed ||\n+\t\tsuccess == rte_memory_order_acquire ||\n+\t\tsuccess == rte_memory_order_release ||\n+\t\tsuccess == rte_memory_order_acq_rel ||\n+\t\tsuccess == rte_memory_order_seq_cst);\n \n \trte_int128_t expected = *exp;\n \trte_int128_t desired = *src;\n \trte_int128_t old;\n \n #if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS)\n-\tif (success == __ATOMIC_RELAXED)\n+\tif (success == rte_memory_order_relaxed)\n \t\t__cas_128_relaxed(dst, exp, desired);\n-\telse if (success == __ATOMIC_ACQUIRE)\n+\telse if (success == rte_memory_order_acquire)\n \t\t__cas_128_acquire(dst, exp, desired);\n-\telse if (success == __ATOMIC_RELEASE)\n+\telse if (success == rte_memory_order_release)\n \t\t__cas_128_release(dst, exp, desired);\n \telse\n \t\t__cas_128_acq_rel(dst, exp, desired);\n \told = *exp;\n #else\n-#define __HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE)\n-#define __HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \\\n-\t\t(mo) == __ATOMIC_SEQ_CST)\n+#define __HAS_ACQ(mo) ((mo) != rte_memory_order_relaxed && (mo) != rte_memory_order_release)\n+#define __HAS_RLS(mo) ((mo) == rte_memory_order_release || (mo) == rte_memory_order_acq_rel || \\\n+\t\t(mo) == rte_memory_order_seq_cst)\n \n-\tint ldx_mo = __HAS_ACQ(success) ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED;\n-\tint stx_mo = __HAS_RLS(success) ? __ATOMIC_RELEASE : __ATOMIC_RELAXED;\n+\tint ldx_mo = __HAS_ACQ(success) ? rte_memory_order_acquire : rte_memory_order_relaxed;\n+\tint stx_mo = __HAS_RLS(success) ? rte_memory_order_release : rte_memory_order_relaxed;\n \n #undef __HAS_ACQ\n #undef __HAS_RLS\n@@ -153,7 +153,7 @@\n \t\t: \"Q\" (src->val[0])       \\\n \t\t: \"memory\"); }\n \n-\t\tif (ldx_mo == __ATOMIC_RELAXED)\n+\t\tif (ldx_mo == rte_memory_order_relaxed)\n \t\t\t__LOAD_128(\"ldxp\", dst, old)\n \t\telse\n \t\t\t__LOAD_128(\"ldaxp\", dst, old)\n@@ -170,7 +170,7 @@\n \t\t: \"memory\"); }\n \n \t\tif (likely(old.int128 == expected.int128)) {\n-\t\t\tif (stx_mo == __ATOMIC_RELAXED)\n+\t\t\tif (stx_mo == rte_memory_order_relaxed)\n \t\t\t\t__STORE_128(\"stxp\", dst, desired, ret)\n \t\t\telse\n \t\t\t\t__STORE_128(\"stlxp\", dst, desired, ret)\n@@ -181,7 +181,7 @@\n \t\t\t * needs to be stored back to ensure it was read\n \t\t\t * atomically.\n \t\t\t */\n-\t\t\tif (stx_mo == __ATOMIC_RELAXED)\n+\t\t\tif (stx_mo == rte_memory_order_relaxed)\n \t\t\t\t__STORE_128(\"stxp\", dst, old, ret)\n \t\t\telse\n \t\t\t\t__STORE_128(\"stlxp\", dst, old, ret)\ndiff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h\nindex 5f70e97..d4daafc 100644\n--- a/lib/eal/arm/include/rte_pause_64.h\n+++ b/lib/eal/arm/include/rte_pause_64.h\n@@ -41,7 +41,7 @@ static inline void rte_pause(void)\n  * implicitly to exit WFE.\n  */\n #define __RTE_ARM_LOAD_EXC_8(src, dst, memorder) {       \\\n-\tif (memorder == __ATOMIC_RELAXED) {               \\\n+\tif (memorder == rte_memory_order_relaxed) {               \\\n \t\tasm volatile(\"ldxrb %w[tmp], [%x[addr]]\"  \\\n \t\t\t: [tmp] \"=&r\" (dst)               \\\n \t\t\t: [addr] \"r\" (src)                \\\n@@ -60,7 +60,7 @@ static inline void rte_pause(void)\n  * implicitly to exit WFE.\n  */\n #define __RTE_ARM_LOAD_EXC_16(src, dst, memorder) {       \\\n-\tif (memorder == __ATOMIC_RELAXED) {               \\\n+\tif (memorder == rte_memory_order_relaxed) {               \\\n \t\tasm volatile(\"ldxrh %w[tmp], [%x[addr]]\"  \\\n \t\t\t: [tmp] \"=&r\" (dst)               \\\n \t\t\t: [addr] \"r\" (src)                \\\n@@ -79,7 +79,7 @@ static inline void rte_pause(void)\n  * implicitly to exit WFE.\n  */\n #define __RTE_ARM_LOAD_EXC_32(src, dst, memorder) {      \\\n-\tif (memorder == __ATOMIC_RELAXED) {              \\\n+\tif (memorder == rte_memory_order_relaxed) {              \\\n \t\tasm volatile(\"ldxr %w[tmp], [%x[addr]]\"  \\\n \t\t\t: [tmp] \"=&r\" (dst)              \\\n \t\t\t: [addr] \"r\" (src)               \\\n@@ -98,7 +98,7 @@ static inline void rte_pause(void)\n  * implicitly to exit WFE.\n  */\n #define __RTE_ARM_LOAD_EXC_64(src, dst, memorder) {      \\\n-\tif (memorder == __ATOMIC_RELAXED) {              \\\n+\tif (memorder == rte_memory_order_relaxed) {              \\\n \t\tasm volatile(\"ldxr %x[tmp], [%x[addr]]\"  \\\n \t\t\t: [tmp] \"=&r\" (dst)              \\\n \t\t\t: [addr] \"r\" (src)               \\\n@@ -118,7 +118,7 @@ static inline void rte_pause(void)\n  */\n #define __RTE_ARM_LOAD_EXC_128(src, dst, memorder) {                    \\\n \tvolatile rte_int128_t *dst_128 = (volatile rte_int128_t *)&dst; \\\n-\tif (memorder == __ATOMIC_RELAXED) {                             \\\n+\tif (memorder == rte_memory_order_relaxed) {                             \\\n \t\tasm volatile(\"ldxp %x[tmp0], %x[tmp1], [%x[addr]]\"      \\\n \t\t\t: [tmp0] \"=&r\" (dst_128->val[0]),               \\\n \t\t\t  [tmp1] \"=&r\" (dst_128->val[1])                \\\n@@ -153,8 +153,8 @@ static inline void rte_pause(void)\n {\n \tuint16_t value;\n \n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&\n-\t\tmemorder != __ATOMIC_RELAXED);\n+\tRTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire &&\n+\t\tmemorder != rte_memory_order_relaxed);\n \n \t__RTE_ARM_LOAD_EXC_16(addr, value, memorder)\n \tif (value != expected) {\n@@ -172,8 +172,8 @@ static inline void rte_pause(void)\n {\n \tuint32_t value;\n \n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&\n-\t\tmemorder != __ATOMIC_RELAXED);\n+\tRTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire &&\n+\t\tmemorder != rte_memory_order_relaxed);\n \n \t__RTE_ARM_LOAD_EXC_32(addr, value, memorder)\n \tif (value != expected) {\n@@ -191,8 +191,8 @@ static inline void rte_pause(void)\n {\n \tuint64_t value;\n \n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&\n-\t\tmemorder != __ATOMIC_RELAXED);\n+\tRTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire &&\n+\t\tmemorder != rte_memory_order_relaxed);\n \n \t__RTE_ARM_LOAD_EXC_64(addr, value, memorder)\n \tif (value != expected) {\n@@ -206,8 +206,8 @@ static inline void rte_pause(void)\n \n #define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do {  \\\n \tRTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));                \\\n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&                  \\\n-\t\tmemorder != __ATOMIC_RELAXED);                            \\\n+\tRTE_BUILD_BUG_ON(memorder != rte_memory_order_acquire &&                  \\\n+\t\tmemorder != rte_memory_order_relaxed);                            \\\n \tconst uint32_t size = sizeof(*(addr)) << 3;                       \\\n \ttypeof(*(addr)) expected_value = (expected);                      \\\n \ttypeof(*(addr)) value;                                            \\\ndiff --git a/lib/eal/arm/rte_power_intrinsics.c b/lib/eal/arm/rte_power_intrinsics.c\nindex 77b96e4..f54cf59 100644\n--- a/lib/eal/arm/rte_power_intrinsics.c\n+++ b/lib/eal/arm/rte_power_intrinsics.c\n@@ -33,19 +33,19 @@\n \n \tswitch (pmc->size) {\n \tcase sizeof(uint8_t):\n-\t\t__RTE_ARM_LOAD_EXC_8(pmc->addr, cur_value, __ATOMIC_RELAXED)\n+\t\t__RTE_ARM_LOAD_EXC_8(pmc->addr, cur_value, rte_memory_order_relaxed)\n \t\t__RTE_ARM_WFE()\n \t\tbreak;\n \tcase sizeof(uint16_t):\n-\t\t__RTE_ARM_LOAD_EXC_16(pmc->addr, cur_value, __ATOMIC_RELAXED)\n+\t\t__RTE_ARM_LOAD_EXC_16(pmc->addr, cur_value, rte_memory_order_relaxed)\n \t\t__RTE_ARM_WFE()\n \t\tbreak;\n \tcase sizeof(uint32_t):\n-\t\t__RTE_ARM_LOAD_EXC_32(pmc->addr, cur_value, __ATOMIC_RELAXED)\n+\t\t__RTE_ARM_LOAD_EXC_32(pmc->addr, cur_value, rte_memory_order_relaxed)\n \t\t__RTE_ARM_WFE()\n \t\tbreak;\n \tcase sizeof(uint64_t):\n-\t\t__RTE_ARM_LOAD_EXC_64(pmc->addr, cur_value, __ATOMIC_RELAXED)\n+\t\t__RTE_ARM_LOAD_EXC_64(pmc->addr, cur_value, rte_memory_order_relaxed)\n \t\t__RTE_ARM_WFE()\n \t\tbreak;\n \tdefault:\ndiff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c\nindex cb980af..c6628dd 100644\n--- a/lib/eal/common/eal_common_trace.c\n+++ b/lib/eal/common/eal_common_trace.c\n@@ -103,11 +103,11 @@ struct trace_point_head *\n trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode)\n {\n \tif (mode == RTE_TRACE_MODE_OVERWRITE)\n-\t\t__atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,\n+\t\t\trte_memory_order_release);\n \telse\n-\t\t__atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_DISCARD,\n-\t\t\t__ATOMIC_RELEASE);\n+\t\trte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_DISCARD,\n+\t\t\trte_memory_order_release);\n }\n \n void\n@@ -141,7 +141,7 @@ rte_trace_mode rte_trace_mode_get(void)\n \tif (trace_point_is_invalid(t))\n \t\treturn false;\n \n-\tval = __atomic_load_n(t, __ATOMIC_ACQUIRE);\n+\tval = rte_atomic_load_explicit(t, rte_memory_order_acquire);\n \treturn (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;\n }\n \n@@ -153,7 +153,8 @@ rte_trace_mode rte_trace_mode_get(void)\n \tif (trace_point_is_invalid(t))\n \t\treturn -ERANGE;\n \n-\tprev = __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE);\n+\tprev = rte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_MASK,\n+\t    rte_memory_order_release);\n \tif ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0)\n \t\t__atomic_fetch_add(&trace.status, 1, __ATOMIC_RELEASE);\n \treturn 0;\n@@ -167,7 +168,8 @@ rte_trace_mode rte_trace_mode_get(void)\n \tif (trace_point_is_invalid(t))\n \t\treturn -ERANGE;\n \n-\tprev = __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE);\n+\tprev = rte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_MASK,\n+\t    rte_memory_order_release);\n \tif ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0)\n \t\t__atomic_fetch_sub(&trace.status, 1, __ATOMIC_RELEASE);\n \treturn 0;\ndiff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h\nindex 4a235ba..5940e7e 100644\n--- a/lib/eal/include/generic/rte_atomic.h\n+++ b/lib/eal/include/generic/rte_atomic.h\n@@ -63,7 +63,7 @@\n  *  but has different syntax and memory ordering semantic. Hence\n  *  deprecated for the simplicity of memory ordering semantics in use.\n  *\n- *  rte_atomic_thread_fence(__ATOMIC_ACQ_REL) should be used instead.\n+ *  rte_atomic_thread_fence(rte_memory_order_acq_rel) should be used instead.\n  */\n static inline void rte_smp_mb(void);\n \n@@ -80,7 +80,7 @@\n  *  but has different syntax and memory ordering semantic. Hence\n  *  deprecated for the simplicity of memory ordering semantics in use.\n  *\n- *  rte_atomic_thread_fence(__ATOMIC_RELEASE) should be used instead.\n+ *  rte_atomic_thread_fence(rte_memory_order_release) should be used instead.\n  *  The fence also guarantees LOAD operations that precede the call\n  *  are globally visible across the lcores before the STORE operations\n  *  that follows it.\n@@ -100,7 +100,7 @@\n  *  but has different syntax and memory ordering semantic. Hence\n  *  deprecated for the simplicity of memory ordering semantics in use.\n  *\n- *  rte_atomic_thread_fence(__ATOMIC_ACQUIRE) should be used instead.\n+ *  rte_atomic_thread_fence(rte_memory_order_acquire) should be used instead.\n  *  The fence also guarantees LOAD operations that precede the call\n  *  are globally visible across the lcores before the STORE operations\n  *  that follows it.\n@@ -154,7 +154,7 @@\n /**\n  * Synchronization fence between threads based on the specified memory order.\n  */\n-static inline void rte_atomic_thread_fence(int memorder);\n+static inline void rte_atomic_thread_fence(rte_memory_order memorder);\n \n /*------------------------- 16 bit atomic operations -------------------------*/\n \n@@ -207,7 +207,7 @@\n static inline uint16_t\n rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)\n {\n-\treturn __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);\n+\treturn rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst);\n }\n #endif\n \n@@ -274,7 +274,7 @@\n static inline void\n rte_atomic16_add(rte_atomic16_t *v, int16_t inc)\n {\n-\t__atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST);\n+\trte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst);\n }\n \n /**\n@@ -288,7 +288,7 @@\n static inline void\n rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)\n {\n-\t__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST);\n+\trte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst);\n }\n \n /**\n@@ -341,7 +341,7 @@\n static inline int16_t\n rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)\n {\n-\treturn __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc;\n }\n \n /**\n@@ -361,7 +361,7 @@\n static inline int16_t\n rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec;\n }\n \n /**\n@@ -380,7 +380,7 @@\n #ifdef RTE_FORCE_INTRINSICS\n static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)\n {\n-\treturn __atomic_fetch_add(&v->cnt, 1, __ATOMIC_SEQ_CST) + 1 == 0;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_seq_cst) + 1 == 0;\n }\n #endif\n \n@@ -400,7 +400,7 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)\n #ifdef RTE_FORCE_INTRINSICS\n static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_SEQ_CST) - 1 == 0;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_seq_cst) - 1 == 0;\n }\n #endif\n \n@@ -486,7 +486,7 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)\n static inline uint32_t\n rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)\n {\n-\treturn __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);\n+\treturn rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst);\n }\n #endif\n \n@@ -553,7 +553,7 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)\n static inline void\n rte_atomic32_add(rte_atomic32_t *v, int32_t inc)\n {\n-\t__atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST);\n+\trte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst);\n }\n \n /**\n@@ -567,7 +567,7 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)\n static inline void\n rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)\n {\n-\t__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST);\n+\trte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst);\n }\n \n /**\n@@ -620,7 +620,7 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)\n static inline int32_t\n rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)\n {\n-\treturn __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc;\n }\n \n /**\n@@ -640,7 +640,7 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)\n static inline int32_t\n rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec;\n }\n \n /**\n@@ -659,7 +659,7 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)\n #ifdef RTE_FORCE_INTRINSICS\n static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)\n {\n-\treturn __atomic_fetch_add(&v->cnt, 1, __ATOMIC_SEQ_CST) + 1 == 0;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_seq_cst) + 1 == 0;\n }\n #endif\n \n@@ -679,7 +679,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)\n #ifdef RTE_FORCE_INTRINSICS\n static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_SEQ_CST) - 1 == 0;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_seq_cst) - 1 == 0;\n }\n #endif\n \n@@ -764,7 +764,7 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v)\n static inline uint64_t\n rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)\n {\n-\treturn __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);\n+\treturn rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst);\n }\n #endif\n \n@@ -885,7 +885,7 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v)\n static inline void\n rte_atomic64_add(rte_atomic64_t *v, int64_t inc)\n {\n-\t__atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST);\n+\trte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst);\n }\n #endif\n \n@@ -904,7 +904,7 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v)\n static inline void\n rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)\n {\n-\t__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST);\n+\trte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst);\n }\n #endif\n \n@@ -962,7 +962,7 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v)\n static inline int64_t\n rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)\n {\n-\treturn __atomic_fetch_add(&v->cnt, inc, __ATOMIC_SEQ_CST) + inc;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_seq_cst) + inc;\n }\n #endif\n \n@@ -986,7 +986,7 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v)\n static inline int64_t\n rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_SEQ_CST) - dec;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_seq_cst) - dec;\n }\n #endif\n \n@@ -1115,8 +1115,8 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v)\n  *   stronger) model.\n  * @param failure\n  *   If unsuccessful, the operation's memory behavior conforms to this (or a\n- *   stronger) model. This argument cannot be __ATOMIC_RELEASE,\n- *   __ATOMIC_ACQ_REL, or a stronger model than success.\n+ *   stronger) model. This argument cannot be rte_memory_order_release,\n+ *   rte_memory_order_acq_rel, or a stronger model than success.\n  * @return\n  *   Non-zero on success; 0 on failure.\n  */\ndiff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h\nindex bebfa95..256309e 100644\n--- a/lib/eal/include/generic/rte_pause.h\n+++ b/lib/eal/include/generic/rte_pause.h\n@@ -36,13 +36,11 @@\n  *  A 16-bit expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n- *  C++11 memory orders with the same names, see the C++11 standard or\n- *  the GCC wiki on atomic synchronization for detailed definition.\n+ *  rte_memory_order_acquire and rte_memory_order_relaxed.\n  */\n static __rte_always_inline void\n rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,\n-\t\tint memorder);\n+\t\trte_memory_order memorder);\n \n /**\n  * Wait for *addr to be updated with a 32-bit expected value, with a relaxed\n@@ -54,13 +52,11 @@\n  *  A 32-bit expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n- *  C++11 memory orders with the same names, see the C++11 standard or\n- *  the GCC wiki on atomic synchronization for detailed definition.\n+ *  rte_memory_order_acquire and rte_memory_order_relaxed.\n  */\n static __rte_always_inline void\n rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,\n-\t\tint memorder);\n+\t\trte_memory_order memorder);\n \n /**\n  * Wait for *addr to be updated with a 64-bit expected value, with a relaxed\n@@ -72,42 +68,40 @@\n  *  A 64-bit expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n- *  C++11 memory orders with the same names, see the C++11 standard or\n- *  the GCC wiki on atomic synchronization for detailed definition.\n+ *  rte_memory_order_acquire and rte_memory_order_relaxed.\n  */\n static __rte_always_inline void\n rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,\n-\t\tint memorder);\n+\t\trte_memory_order memorder);\n \n #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED\n static __rte_always_inline void\n rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,\n-\t\tint memorder)\n+\t\trte_memory_order memorder)\n {\n-\tassert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);\n+\tassert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed);\n \n-\twhile (__atomic_load_n(addr, memorder) != expected)\n+\twhile (rte_atomic_load_explicit(addr, memorder) != expected)\n \t\trte_pause();\n }\n \n static __rte_always_inline void\n rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,\n-\t\tint memorder)\n+\t\trte_memory_order memorder)\n {\n-\tassert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);\n+\tassert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed);\n \n-\twhile (__atomic_load_n(addr, memorder) != expected)\n+\twhile (rte_atomic_load_explicit(addr, memorder) != expected)\n \t\trte_pause();\n }\n \n static __rte_always_inline void\n rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,\n-\t\tint memorder)\n+\t\trte_memory_order memorder)\n {\n-\tassert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);\n+\tassert(memorder == rte_memory_order_acquire || memorder == rte_memory_order_relaxed);\n \n-\twhile (__atomic_load_n(addr, memorder) != expected)\n+\twhile (rte_atomic_load_explicit(addr, memorder) != expected)\n \t\trte_pause();\n }\n \n@@ -125,16 +119,14 @@\n  *  An expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n- *  C++11 memory orders with the same names, see the C++11 standard or\n- *  the GCC wiki on atomic synchronization for detailed definition.\n+ *  rte_memory_order_acquire and rte_memory_order_relaxed.\n  */\n #define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do { \\\n \tRTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));               \\\n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&                 \\\n-\t\tmemorder != __ATOMIC_RELAXED);                           \\\n+\tRTE_BUILD_BUG_ON((memorder) != rte_memory_order_acquire &&       \\\n+\t\t(memorder) != rte_memory_order_relaxed);                 \\\n \ttypeof(*(addr)) expected_value = (expected);                     \\\n-\twhile (!((__atomic_load_n((addr), (memorder)) & (mask)) cond     \\\n+\twhile (!((rte_atomic_load_explicit((addr), (memorder)) & (mask)) cond     \\\n \t\t\texpected_value))                                 \\\n \t\trte_pause();                                             \\\n } while (0)\ndiff --git a/lib/eal/include/generic/rte_rwlock.h b/lib/eal/include/generic/rte_rwlock.h\nindex 24ebec6..c788705 100644\n--- a/lib/eal/include/generic/rte_rwlock.h\n+++ b/lib/eal/include/generic/rte_rwlock.h\n@@ -58,7 +58,7 @@\n #define RTE_RWLOCK_READ\t 0x4\t/* Reader increment */\n \n typedef struct __rte_lockable {\n-\tint32_t cnt;\n+\tRTE_ATOMIC(int32_t) cnt;\n } rte_rwlock_t;\n \n /**\n@@ -93,21 +93,21 @@\n \n \twhile (1) {\n \t\t/* Wait while writer is present or pending */\n-\t\twhile (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED)\n+\t\twhile (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed)\n \t\t       & RTE_RWLOCK_MASK)\n \t\t\trte_pause();\n \n \t\t/* Try to get read lock */\n-\t\tx = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,\n-\t\t\t\t       __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;\n+\t\tx = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,\n+\t\t\t\t       rte_memory_order_acquire) + RTE_RWLOCK_READ;\n \n \t\t/* If no writer, then acquire was successful */\n \t\tif (likely(!(x & RTE_RWLOCK_MASK)))\n \t\t\treturn;\n \n \t\t/* Lost race with writer, backout the change. */\n-\t\t__atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ,\n-\t\t\t\t   __ATOMIC_RELAXED);\n+\t\trte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,\n+\t\t\t\t   rte_memory_order_relaxed);\n \t}\n }\n \n@@ -128,20 +128,20 @@\n {\n \tint32_t x;\n \n-\tx = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);\n+\tx = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);\n \n \t/* fail if write lock is held or writer is pending */\n \tif (x & RTE_RWLOCK_MASK)\n \t\treturn -EBUSY;\n \n \t/* Try to get read lock */\n-\tx = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,\n-\t\t\t       __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;\n+\tx = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,\n+\t\t\t       rte_memory_order_acquire) + RTE_RWLOCK_READ;\n \n \t/* Back out if writer raced in */\n \tif (unlikely(x & RTE_RWLOCK_MASK)) {\n-\t\t__atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ,\n-\t\t\t\t   __ATOMIC_RELEASE);\n+\t\trte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,\n+\t\t\t\t   rte_memory_order_release);\n \n \t\treturn -EBUSY;\n \t}\n@@ -159,7 +159,7 @@\n \t__rte_unlock_function(rwl)\n \t__rte_no_thread_safety_analysis\n {\n-\t__atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, rte_memory_order_release);\n }\n \n /**\n@@ -179,10 +179,10 @@\n {\n \tint32_t x;\n \n-\tx = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);\n+\tx = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);\n \tif (x < RTE_RWLOCK_WRITE &&\n-\t    __atomic_compare_exchange_n(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE,\n-\t\t\t\t\t1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n+\t    rte_atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE,\n+\t\t\t\t\trte_memory_order_acquire, rte_memory_order_relaxed))\n \t\treturn 0;\n \telse\n \t\treturn -EBUSY;\n@@ -202,22 +202,25 @@\n \tint32_t x;\n \n \twhile (1) {\n-\t\tx = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);\n+\t\tx = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);\n \n \t\t/* No readers or writers? */\n \t\tif (likely(x < RTE_RWLOCK_WRITE)) {\n \t\t\t/* Turn off RTE_RWLOCK_WAIT, turn on RTE_RWLOCK_WRITE */\n-\t\t\tif (__atomic_compare_exchange_n(&rwl->cnt, &x, RTE_RWLOCK_WRITE, 1,\n-\t\t\t\t\t\t\t__ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n+\t\t\tif (rte_atomic_compare_exchange_weak_explicit(\n+\t\t\t\t&rwl->cnt, &x, RTE_RWLOCK_WRITE,\n+\t\t\t\trte_memory_order_acquire, rte_memory_order_relaxed))\n \t\t\t\treturn;\n \t\t}\n \n \t\t/* Turn on writer wait bit */\n \t\tif (!(x & RTE_RWLOCK_WAIT))\n-\t\t\t__atomic_fetch_or(&rwl->cnt, RTE_RWLOCK_WAIT, __ATOMIC_RELAXED);\n+\t\t\trte_atomic_fetch_or_explicit(&rwl->cnt, RTE_RWLOCK_WAIT,\n+\t\t\t    rte_memory_order_relaxed);\n \n \t\t/* Wait until no readers before trying again */\n-\t\twhile (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > RTE_RWLOCK_WAIT)\n+\t\twhile (rte_atomic_load_explicit(&rwl->cnt,\n+\t\t    rte_memory_order_relaxed) > RTE_RWLOCK_WAIT)\n \t\t\trte_pause();\n \n \t}\n@@ -234,7 +237,7 @@\n \t__rte_unlock_function(rwl)\n \t__rte_no_thread_safety_analysis\n {\n-\t__atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_WRITE, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_WRITE, rte_memory_order_release);\n }\n \n /**\n@@ -248,7 +251,7 @@\n static inline int\n rte_rwlock_write_is_locked(rte_rwlock_t *rwl)\n {\n-\tif (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & RTE_RWLOCK_WRITE)\n+\tif (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed) & RTE_RWLOCK_WRITE)\n \t\treturn 1;\n \n \treturn 0;\ndiff --git a/lib/eal/include/generic/rte_spinlock.h b/lib/eal/include/generic/rte_spinlock.h\nindex e18f0cd..23fb048 100644\n--- a/lib/eal/include/generic/rte_spinlock.h\n+++ b/lib/eal/include/generic/rte_spinlock.h\n@@ -29,7 +29,7 @@\n  * The rte_spinlock_t type.\n  */\n typedef struct __rte_lockable {\n-\tvolatile int locked; /**< lock status 0 = unlocked, 1 = locked */\n+\tvolatile RTE_ATOMIC(int) locked; /**< lock status 0 = unlocked, 1 = locked */\n } rte_spinlock_t;\n \n /**\n@@ -66,10 +66,10 @@\n {\n \tint exp = 0;\n \n-\twhile (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,\n-\t\t\t\t__ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {\n-\t\trte_wait_until_equal_32((volatile uint32_t *)&sl->locked,\n-\t\t\t       0, __ATOMIC_RELAXED);\n+\twhile (!rte_atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1,\n+\t\t\t\trte_memory_order_acquire, rte_memory_order_relaxed)) {\n+\t\trte_wait_until_equal_32((volatile uint32_t *)(uintptr_t)&sl->locked,\n+\t\t\t       0, rte_memory_order_relaxed);\n \t\texp = 0;\n \t}\n }\n@@ -90,7 +90,7 @@\n rte_spinlock_unlock(rte_spinlock_t *sl)\n \t__rte_no_thread_safety_analysis\n {\n-\t__atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&sl->locked, 0, rte_memory_order_release);\n }\n #endif\n \n@@ -113,9 +113,8 @@\n \t__rte_no_thread_safety_analysis\n {\n \tint exp = 0;\n-\treturn __atomic_compare_exchange_n(&sl->locked, &exp, 1,\n-\t\t\t\t0, /* disallow spurious failure */\n-\t\t\t\t__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);\n+\treturn rte_atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1,\n+\t\t\t\trte_memory_order_acquire, rte_memory_order_relaxed);\n }\n #endif\n \n@@ -129,7 +128,7 @@\n  */\n static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)\n {\n-\treturn __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);\n+\treturn rte_atomic_load_explicit(&sl->locked, rte_memory_order_acquire);\n }\n \n /**\ndiff --git a/lib/eal/include/rte_mcslock.h b/lib/eal/include/rte_mcslock.h\nindex 18e63eb..8c75377 100644\n--- a/lib/eal/include/rte_mcslock.h\n+++ b/lib/eal/include/rte_mcslock.h\n@@ -33,8 +33,8 @@\n  * The rte_mcslock_t type.\n  */\n typedef struct rte_mcslock {\n-\tstruct rte_mcslock *next;\n-\tint locked; /* 1 if the queue locked, 0 otherwise */\n+\tRTE_ATOMIC(struct rte_mcslock *) next;\n+\tRTE_ATOMIC(int) locked; /* 1 if the queue locked, 0 otherwise */\n } rte_mcslock_t;\n \n /**\n@@ -49,13 +49,13 @@\n  *   lock should use its 'own node'.\n  */\n static inline void\n-rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)\n+rte_mcslock_lock(RTE_ATOMIC(rte_mcslock_t *) *msl, rte_mcslock_t *me)\n {\n \trte_mcslock_t *prev;\n \n \t/* Init me node */\n-\t__atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&me->locked, 1, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&me->next, NULL, rte_memory_order_relaxed);\n \n \t/* If the queue is empty, the exchange operation is enough to acquire\n \t * the lock. Hence, the exchange operation requires acquire semantics.\n@@ -63,7 +63,7 @@\n \t * visible to other CPUs/threads. Hence, the exchange operation requires\n \t * release semantics as well.\n \t */\n-\tprev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);\n+\tprev = rte_atomic_exchange_explicit(msl, me, rte_memory_order_acq_rel);\n \tif (likely(prev == NULL)) {\n \t\t/* Queue was empty, no further action required,\n \t\t * proceed with lock taken.\n@@ -77,19 +77,19 @@\n \t * strong as a release fence and is not sufficient to enforce the\n \t * desired order here.\n \t */\n-\t__atomic_store_n(&prev->next, me, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&prev->next, me, rte_memory_order_release);\n \n \t/* The while-load of me->locked should not move above the previous\n \t * store to prev->next. Otherwise it will cause a deadlock. Need a\n \t * store-load barrier.\n \t */\n-\t__atomic_thread_fence(__ATOMIC_ACQ_REL);\n+\t__rte_atomic_thread_fence(rte_memory_order_acq_rel);\n \t/* If the lock has already been acquired, it first atomically\n \t * places the node at the end of the queue and then proceeds\n \t * to spin on me->locked until the previous lock holder resets\n \t * the me->locked using mcslock_unlock().\n \t */\n-\trte_wait_until_equal_32((uint32_t *)&me->locked, 0, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_32((uint32_t *)(uintptr_t)&me->locked, 0, rte_memory_order_acquire);\n }\n \n /**\n@@ -101,34 +101,34 @@\n  *   A pointer to the node of MCS lock passed in rte_mcslock_lock.\n  */\n static inline void\n-rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)\n+rte_mcslock_unlock(RTE_ATOMIC(rte_mcslock_t *) *msl, RTE_ATOMIC(rte_mcslock_t *) me)\n {\n \t/* Check if there are more nodes in the queue. */\n-\tif (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {\n+\tif (likely(rte_atomic_load_explicit(&me->next, rte_memory_order_relaxed) == NULL)) {\n \t\t/* No, last member in the queue. */\n-\t\trte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);\n+\t\trte_mcslock_t *save_me = rte_atomic_load_explicit(&me, rte_memory_order_relaxed);\n \n \t\t/* Release the lock by setting it to NULL */\n-\t\tif (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,\n-\t\t\t\t__ATOMIC_RELEASE, __ATOMIC_RELAXED)))\n+\t\tif (likely(rte_atomic_compare_exchange_strong_explicit(msl, &save_me, NULL,\n+\t\t\t\trte_memory_order_release, rte_memory_order_relaxed)))\n \t\t\treturn;\n \n \t\t/* Speculative execution would be allowed to read in the\n \t\t * while-loop first. This has the potential to cause a\n \t\t * deadlock. Need a load barrier.\n \t\t */\n-\t\t__atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\t\t__rte_atomic_thread_fence(rte_memory_order_acquire);\n \t\t/* More nodes added to the queue by other CPUs.\n \t\t * Wait until the next pointer is set.\n \t\t */\n-\t\tuintptr_t *next;\n-\t\tnext = (uintptr_t *)&me->next;\n+\t\tRTE_ATOMIC(uintptr_t) *next;\n+\t\tnext = (__rte_atomic uintptr_t *)&me->next;\n \t\tRTE_WAIT_UNTIL_MASKED(next, UINTPTR_MAX, !=, 0,\n-\t\t\t__ATOMIC_RELAXED);\n+\t\t\trte_memory_order_relaxed);\n \t}\n \n \t/* Pass lock to next waiter. */\n-\t__atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&me->next->locked, 0, rte_memory_order_release);\n }\n \n /**\n@@ -142,10 +142,10 @@\n  *   1 if the lock is successfully taken; 0 otherwise.\n  */\n static inline int\n-rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)\n+rte_mcslock_trylock(RTE_ATOMIC(rte_mcslock_t *) *msl, rte_mcslock_t *me)\n {\n \t/* Init me node */\n-\t__atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&me->next, NULL, rte_memory_order_relaxed);\n \n \t/* Try to lock */\n \trte_mcslock_t *expected = NULL;\n@@ -156,8 +156,8 @@\n \t * is visible to other CPUs/threads. Hence, the compare-exchange\n \t * operation requires release semantics as well.\n \t */\n-\treturn __atomic_compare_exchange_n(msl, &expected, me, 0,\n-\t\t\t__ATOMIC_ACQ_REL, __ATOMIC_RELAXED);\n+\treturn rte_atomic_compare_exchange_strong_explicit(msl, &expected, me,\n+\t\t\trte_memory_order_acq_rel, rte_memory_order_relaxed);\n }\n \n /**\n@@ -169,9 +169,9 @@\n  *   1 if the lock is currently taken; 0 otherwise.\n  */\n static inline int\n-rte_mcslock_is_locked(rte_mcslock_t *msl)\n+rte_mcslock_is_locked(RTE_ATOMIC(rte_mcslock_t *) msl)\n {\n-\treturn (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);\n+\treturn (rte_atomic_load_explicit(&msl, rte_memory_order_relaxed) != NULL);\n }\n \n #ifdef __cplusplus\ndiff --git a/lib/eal/include/rte_pflock.h b/lib/eal/include/rte_pflock.h\nindex 790be71..79feeea 100644\n--- a/lib/eal/include/rte_pflock.h\n+++ b/lib/eal/include/rte_pflock.h\n@@ -41,8 +41,8 @@\n  */\n struct rte_pflock {\n \tstruct {\n-\t\tuint16_t in;\n-\t\tuint16_t out;\n+\t\tRTE_ATOMIC(uint16_t) in;\n+\t\tRTE_ATOMIC(uint16_t) out;\n \t} rd, wr;\n };\n typedef struct rte_pflock rte_pflock_t;\n@@ -117,14 +117,14 @@ struct rte_pflock {\n \t * If no writer is present, then the operation has completed\n \t * successfully.\n \t */\n-\tw = __atomic_fetch_add(&pf->rd.in, RTE_PFLOCK_RINC, __ATOMIC_ACQUIRE)\n+\tw = rte_atomic_fetch_add_explicit(&pf->rd.in, RTE_PFLOCK_RINC, rte_memory_order_acquire)\n \t\t& RTE_PFLOCK_WBITS;\n \tif (w == 0)\n \t\treturn;\n \n \t/* Wait for current write phase to complete. */\n \tRTE_WAIT_UNTIL_MASKED(&pf->rd.in, RTE_PFLOCK_WBITS, !=, w,\n-\t\t__ATOMIC_ACQUIRE);\n+\t\trte_memory_order_acquire);\n }\n \n /**\n@@ -140,7 +140,7 @@ struct rte_pflock {\n static inline void\n rte_pflock_read_unlock(rte_pflock_t *pf)\n {\n-\t__atomic_fetch_add(&pf->rd.out, RTE_PFLOCK_RINC, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_add_explicit(&pf->rd.out, RTE_PFLOCK_RINC, rte_memory_order_release);\n }\n \n /**\n@@ -161,8 +161,9 @@ struct rte_pflock {\n \t/* Acquire ownership of write-phase.\n \t * This is same as rte_ticketlock_lock().\n \t */\n-\tticket = __atomic_fetch_add(&pf->wr.in, 1, __ATOMIC_RELAXED);\n-\trte_wait_until_equal_16(&pf->wr.out, ticket, __ATOMIC_ACQUIRE);\n+\tticket = rte_atomic_fetch_add_explicit(&pf->wr.in, 1, rte_memory_order_relaxed);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&pf->wr.out, ticket,\n+\t    rte_memory_order_acquire);\n \n \t/*\n \t * Acquire ticket on read-side in order to allow them\n@@ -173,10 +174,11 @@ struct rte_pflock {\n \t * speculatively.\n \t */\n \tw = RTE_PFLOCK_PRES | (ticket & RTE_PFLOCK_PHID);\n-\tticket = __atomic_fetch_add(&pf->rd.in, w, __ATOMIC_RELAXED);\n+\tticket = rte_atomic_fetch_add_explicit(&pf->rd.in, w, rte_memory_order_relaxed);\n \n \t/* Wait for any pending readers to flush. */\n-\trte_wait_until_equal_16(&pf->rd.out, ticket, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&pf->rd.out, ticket,\n+\t    rte_memory_order_acquire);\n }\n \n /**\n@@ -193,10 +195,10 @@ struct rte_pflock {\n rte_pflock_write_unlock(rte_pflock_t *pf)\n {\n \t/* Migrate from write phase to read phase. */\n-\t__atomic_fetch_and(&pf->rd.in, RTE_PFLOCK_LSB, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_and_explicit(&pf->rd.in, RTE_PFLOCK_LSB, rte_memory_order_release);\n \n \t/* Allow other writers to continue. */\n-\t__atomic_fetch_add(&pf->wr.out, 1, __ATOMIC_RELEASE);\n+\trte_atomic_fetch_add_explicit(&pf->wr.out, 1, rte_memory_order_release);\n }\n \n #ifdef __cplusplus\ndiff --git a/lib/eal/include/rte_seqcount.h b/lib/eal/include/rte_seqcount.h\nindex 098af26..4f9cefb 100644\n--- a/lib/eal/include/rte_seqcount.h\n+++ b/lib/eal/include/rte_seqcount.h\n@@ -32,7 +32,7 @@\n  * The RTE seqcount type.\n  */\n typedef struct {\n-\tuint32_t sn; /**< A sequence number for the protected data. */\n+\tRTE_ATOMIC(uint32_t) sn; /**< A sequence number for the protected data. */\n } rte_seqcount_t;\n \n /**\n@@ -106,11 +106,11 @@\n static inline uint32_t\n rte_seqcount_read_begin(const rte_seqcount_t *seqcount)\n {\n-\t/* __ATOMIC_ACQUIRE to prevent loads after (in program order)\n+\t/* rte_memory_order_acquire to prevent loads after (in program order)\n \t * from happening before the sn load. Synchronizes-with the\n \t * store release in rte_seqcount_write_end().\n \t */\n-\treturn __atomic_load_n(&seqcount->sn, __ATOMIC_ACQUIRE);\n+\treturn rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_acquire);\n }\n \n /**\n@@ -161,9 +161,9 @@\n \t\treturn true;\n \n \t/* make sure the data loads happens before the sn load */\n-\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\trte_atomic_thread_fence(rte_memory_order_acquire);\n \n-\tend_sn = __atomic_load_n(&seqcount->sn, __ATOMIC_RELAXED);\n+\tend_sn = rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_relaxed);\n \n \t/* A writer incremented the sequence number during this read\n \t * critical section.\n@@ -205,12 +205,12 @@\n \n \tsn = seqcount->sn + 1;\n \n-\t__atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_relaxed);\n \n-\t/* __ATOMIC_RELEASE to prevent stores after (in program order)\n+\t/* rte_memory_order_release to prevent stores after (in program order)\n \t * from happening before the sn store.\n \t */\n-\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\trte_atomic_thread_fence(rte_memory_order_release);\n }\n \n /**\n@@ -237,7 +237,7 @@\n \tsn = seqcount->sn + 1;\n \n \t/* Synchronizes-with the load acquire in rte_seqcount_read_begin(). */\n-\t__atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELEASE);\n+\trte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_release);\n }\n \n #ifdef __cplusplus\ndiff --git a/lib/eal/include/rte_stdatomic.h b/lib/eal/include/rte_stdatomic.h\nindex f03be9b..3934190 100644\n--- a/lib/eal/include/rte_stdatomic.h\n+++ b/lib/eal/include/rte_stdatomic.h\n@@ -18,6 +18,7 @@\n \n #include <stdatomic.h>\n \n+#define RTE_ATOMIC(type) _Atomic(type)\n #define __rte_atomic _Atomic\n \n /* The memory order is an enumerated type in C11. */\n@@ -110,6 +111,7 @@\n \n #else\n \n+#define RTE_ATOMIC(type) type\n #define __rte_atomic\n \n /* The memory order is an integer type in GCC built-ins,\ndiff --git a/lib/eal/include/rte_ticketlock.h b/lib/eal/include/rte_ticketlock.h\nindex e22d119..7d39bca 100644\n--- a/lib/eal/include/rte_ticketlock.h\n+++ b/lib/eal/include/rte_ticketlock.h\n@@ -30,10 +30,10 @@\n  * The rte_ticketlock_t type.\n  */\n typedef union {\n-\tuint32_t tickets;\n+\tRTE_ATOMIC(uint32_t) tickets;\n \tstruct {\n-\t\tuint16_t current;\n-\t\tuint16_t next;\n+\t\tRTE_ATOMIC(uint16_t) current;\n+\t\tRTE_ATOMIC(uint16_t) next;\n \t} s;\n } rte_ticketlock_t;\n \n@@ -51,7 +51,7 @@\n static inline void\n rte_ticketlock_init(rte_ticketlock_t *tl)\n {\n-\t__atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tl->tickets, 0, rte_memory_order_relaxed);\n }\n \n /**\n@@ -63,8 +63,9 @@\n static inline void\n rte_ticketlock_lock(rte_ticketlock_t *tl)\n {\n-\tuint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);\n-\trte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE);\n+\tuint16_t me = rte_atomic_fetch_add_explicit(&tl->s.next, 1, rte_memory_order_relaxed);\n+\trte_wait_until_equal_16((uint16_t *)(uintptr_t)&tl->s.current, me,\n+\t    rte_memory_order_acquire);\n }\n \n /**\n@@ -76,8 +77,8 @@\n static inline void\n rte_ticketlock_unlock(rte_ticketlock_t *tl)\n {\n-\tuint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);\n+\tuint16_t i = rte_atomic_load_explicit(&tl->s.current, rte_memory_order_relaxed);\n+\trte_atomic_store_explicit(&tl->s.current, i + 1, rte_memory_order_release);\n }\n \n /**\n@@ -92,12 +93,13 @@\n rte_ticketlock_trylock(rte_ticketlock_t *tl)\n {\n \trte_ticketlock_t oldl, newl;\n-\toldl.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);\n+\toldl.tickets = rte_atomic_load_explicit(&tl->tickets, rte_memory_order_relaxed);\n \tnewl.tickets = oldl.tickets;\n \tnewl.s.next++;\n \tif (oldl.s.next == oldl.s.current) {\n-\t\tif (__atomic_compare_exchange_n(&tl->tickets, &oldl.tickets,\n-\t\t    newl.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))\n+\t\tif (rte_atomic_compare_exchange_strong_explicit(&tl->tickets,\n+\t\t    (uint32_t *)(uintptr_t)&oldl.tickets,\n+\t\t    newl.tickets, rte_memory_order_acquire, rte_memory_order_relaxed))\n \t\t\treturn 1;\n \t}\n \n@@ -116,7 +118,7 @@\n rte_ticketlock_is_locked(rte_ticketlock_t *tl)\n {\n \trte_ticketlock_t tic;\n-\ttic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE);\n+\ttic.tickets = rte_atomic_load_explicit(&tl->tickets, rte_memory_order_acquire);\n \treturn (tic.s.current != tic.s.next);\n }\n \n@@ -127,7 +129,7 @@\n \n typedef struct {\n \trte_ticketlock_t tl; /**< the actual ticketlock */\n-\tint user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */\n+\tRTE_ATOMIC(int) user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */\n \tunsigned int count; /**< count of time this lock has been called */\n } rte_ticketlock_recursive_t;\n \n@@ -147,7 +149,7 @@\n rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)\n {\n \trte_ticketlock_init(&tlr->tl);\n-\t__atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED);\n+\trte_atomic_store_explicit(&tlr->user, TICKET_LOCK_INVALID_ID, rte_memory_order_relaxed);\n \ttlr->count = 0;\n }\n \n@@ -162,9 +164,9 @@\n {\n \tint id = rte_gettid();\n \n-\tif (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {\n+\tif (rte_atomic_load_explicit(&tlr->user, rte_memory_order_relaxed) != id) {\n \t\trte_ticketlock_lock(&tlr->tl);\n-\t\t__atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&tlr->user, id, rte_memory_order_relaxed);\n \t}\n \ttlr->count++;\n }\n@@ -179,8 +181,8 @@\n rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)\n {\n \tif (--(tlr->count) == 0) {\n-\t\t__atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID,\n-\t\t\t\t __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&tlr->user, TICKET_LOCK_INVALID_ID,\n+\t\t\t\t rte_memory_order_relaxed);\n \t\trte_ticketlock_unlock(&tlr->tl);\n \t}\n }\n@@ -198,10 +200,10 @@\n {\n \tint id = rte_gettid();\n \n-\tif (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {\n+\tif (rte_atomic_load_explicit(&tlr->user, rte_memory_order_relaxed) != id) {\n \t\tif (rte_ticketlock_trylock(&tlr->tl) == 0)\n \t\t\treturn 0;\n-\t\t__atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);\n+\t\trte_atomic_store_explicit(&tlr->user, id, rte_memory_order_relaxed);\n \t}\n \ttlr->count++;\n \treturn 1;\ndiff --git a/lib/eal/include/rte_trace_point.h b/lib/eal/include/rte_trace_point.h\nindex d587591..b403edd 100644\n--- a/lib/eal/include/rte_trace_point.h\n+++ b/lib/eal/include/rte_trace_point.h\n@@ -33,7 +33,7 @@\n #include <rte_stdatomic.h>\n \n /** The tracepoint object. */\n-typedef uint64_t rte_trace_point_t;\n+typedef RTE_ATOMIC(uint64_t) rte_trace_point_t;\n \n /**\n  * Macro to define the tracepoint arguments in RTE_TRACE_POINT macro.\n@@ -359,7 +359,7 @@ struct __rte_trace_header {\n #define __rte_trace_point_emit_header_generic(t) \\\n void *mem; \\\n do { \\\n-\tconst uint64_t val = __atomic_load_n(t, __ATOMIC_ACQUIRE); \\\n+\tconst uint64_t val = rte_atomic_load_explicit(t, rte_memory_order_acquire); \\\n \tif (likely(!(val & __RTE_TRACE_FIELD_ENABLE_MASK))) \\\n \t\treturn; \\\n \tmem = __rte_trace_mem_get(val); \\\ndiff --git a/lib/eal/loongarch/include/rte_atomic.h b/lib/eal/loongarch/include/rte_atomic.h\nindex 3c82845..0510b8f 100644\n--- a/lib/eal/loongarch/include/rte_atomic.h\n+++ b/lib/eal/loongarch/include/rte_atomic.h\n@@ -35,9 +35,9 @@\n #define rte_io_rmb()\trte_mb()\n \n static __rte_always_inline void\n-rte_atomic_thread_fence(int memorder)\n+rte_atomic_thread_fence(rte_memory_order memorder)\n {\n-\t__atomic_thread_fence(memorder);\n+\t__rte_atomic_thread_fence(memorder);\n }\n \n #ifdef __cplusplus\ndiff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h\nindex ec8d8a2..7382412 100644\n--- a/lib/eal/ppc/include/rte_atomic.h\n+++ b/lib/eal/ppc/include/rte_atomic.h\n@@ -38,9 +38,9 @@\n #define rte_io_rmb() rte_rmb()\n \n static __rte_always_inline void\n-rte_atomic_thread_fence(int memorder)\n+rte_atomic_thread_fence(rte_memory_order memorder)\n {\n-\t__atomic_thread_fence(memorder);\n+\t__rte_atomic_thread_fence(memorder);\n }\n \n /*------------------------- 16 bit atomic operations -------------------------*/\n@@ -48,8 +48,8 @@\n static inline int\n rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)\n {\n-\treturn __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,\n-\t\t__ATOMIC_ACQUIRE) ? 1 : 0;\n+\treturn __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire,\n+\t\trte_memory_order_acquire) ? 1 : 0;\n }\n \n static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)\n@@ -60,29 +60,29 @@ static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)\n static inline void\n rte_atomic16_inc(rte_atomic16_t *v)\n {\n-\t__atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire);\n }\n \n static inline void\n rte_atomic16_dec(rte_atomic16_t *v)\n {\n-\t__atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire);\n }\n \n static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)\n {\n-\treturn __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire) + 1 == 0;\n }\n \n static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire) - 1 == 0;\n }\n \n static inline uint16_t\n rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)\n {\n-\treturn __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);\n+\treturn __atomic_exchange_2(dst, val, rte_memory_order_seq_cst);\n }\n \n /*------------------------- 32 bit atomic operations -------------------------*/\n@@ -90,8 +90,8 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)\n static inline int\n rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)\n {\n-\treturn __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,\n-\t\t__ATOMIC_ACQUIRE) ? 1 : 0;\n+\treturn __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire,\n+\t\trte_memory_order_acquire) ? 1 : 0;\n }\n \n static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)\n@@ -102,29 +102,29 @@ static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)\n static inline void\n rte_atomic32_inc(rte_atomic32_t *v)\n {\n-\t__atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire);\n }\n \n static inline void\n rte_atomic32_dec(rte_atomic32_t *v)\n {\n-\t__atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire);\n }\n \n static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)\n {\n-\treturn __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire) + 1 == 0;\n }\n \n static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire) - 1 == 0;\n }\n \n static inline uint32_t\n rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)\n {\n-\treturn __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);\n+\treturn __atomic_exchange_4(dst, val, rte_memory_order_seq_cst);\n }\n \n /*------------------------- 64 bit atomic operations -------------------------*/\n@@ -132,8 +132,8 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)\n static inline int\n rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)\n {\n-\treturn __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,\n-\t\t__ATOMIC_ACQUIRE) ? 1 : 0;\n+\treturn __atomic_compare_exchange(dst, &exp, &src, 0, rte_memory_order_acquire,\n+\t\trte_memory_order_acquire) ? 1 : 0;\n }\n \n static inline void\n@@ -157,47 +157,47 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)\n static inline void\n rte_atomic64_add(rte_atomic64_t *v, int64_t inc)\n {\n-\t__atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_acquire);\n }\n \n static inline void\n rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)\n {\n-\t__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_acquire);\n }\n \n static inline void\n rte_atomic64_inc(rte_atomic64_t *v)\n {\n-\t__atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire);\n }\n \n static inline void\n rte_atomic64_dec(rte_atomic64_t *v)\n {\n-\t__atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE);\n+\trte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire);\n }\n \n static inline int64_t\n rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)\n {\n-\treturn __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE) + inc;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, inc, rte_memory_order_acquire) + inc;\n }\n \n static inline int64_t\n rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE) - dec;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, dec, rte_memory_order_acquire) - dec;\n }\n \n static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)\n {\n-\treturn __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE) + 1 == 0;\n+\treturn rte_atomic_fetch_add_explicit(&v->cnt, 1, rte_memory_order_acquire) + 1 == 0;\n }\n \n static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)\n {\n-\treturn __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE) - 1 == 0;\n+\treturn rte_atomic_fetch_sub_explicit(&v->cnt, 1, rte_memory_order_acquire) - 1 == 0;\n }\n \n static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)\n@@ -213,7 +213,7 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v)\n static inline uint64_t\n rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)\n {\n-\treturn __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);\n+\treturn __atomic_exchange_8(dst, val, rte_memory_order_seq_cst);\n }\n \n #endif\ndiff --git a/lib/eal/riscv/include/rte_atomic.h b/lib/eal/riscv/include/rte_atomic.h\nindex 4b4633c..2603bc9 100644\n--- a/lib/eal/riscv/include/rte_atomic.h\n+++ b/lib/eal/riscv/include/rte_atomic.h\n@@ -40,9 +40,9 @@\n #define rte_io_rmb()\tasm volatile(\"fence ir, ir\" : : : \"memory\")\n \n static __rte_always_inline void\n-rte_atomic_thread_fence(int memorder)\n+rte_atomic_thread_fence(rte_memory_order memorder)\n {\n-\t__atomic_thread_fence(memorder);\n+\t__rte_atomic_thread_fence(memorder);\n }\n \n #ifdef __cplusplus\ndiff --git a/lib/eal/x86/include/rte_atomic.h b/lib/eal/x86/include/rte_atomic.h\nindex f2ee1a9..3b3a9a4 100644\n--- a/lib/eal/x86/include/rte_atomic.h\n+++ b/lib/eal/x86/include/rte_atomic.h\n@@ -82,17 +82,17 @@\n /**\n  * Synchronization fence between threads based on the specified memory order.\n  *\n- * On x86 the __atomic_thread_fence(__ATOMIC_SEQ_CST) generates full 'mfence'\n+ * On x86 the __rte_atomic_thread_fence(rte_memory_order_seq_cst) generates full 'mfence'\n  * which is quite expensive. The optimized implementation of rte_smp_mb is\n  * used instead.\n  */\n static __rte_always_inline void\n-rte_atomic_thread_fence(int memorder)\n+rte_atomic_thread_fence(rte_memory_order memorder)\n {\n-\tif (memorder == __ATOMIC_SEQ_CST)\n+\tif (memorder == rte_memory_order_seq_cst)\n \t\trte_smp_mb();\n \telse\n-\t\t__atomic_thread_fence(memorder);\n+\t\t__rte_atomic_thread_fence(memorder);\n }\n \n /*------------------------- 16 bit atomic operations -------------------------*/\ndiff --git a/lib/eal/x86/include/rte_spinlock.h b/lib/eal/x86/include/rte_spinlock.h\nindex 0b20ddf..a6c23ea 100644\n--- a/lib/eal/x86/include/rte_spinlock.h\n+++ b/lib/eal/x86/include/rte_spinlock.h\n@@ -78,7 +78,7 @@ static inline int rte_tm_supported(void)\n }\n \n static inline int\n-rte_try_tm(volatile int *lock)\n+rte_try_tm(volatile RTE_ATOMIC(int) *lock)\n {\n \tint i, retries;\n \ndiff --git a/lib/eal/x86/rte_power_intrinsics.c b/lib/eal/x86/rte_power_intrinsics.c\nindex f749da9..cf70e33 100644\n--- a/lib/eal/x86/rte_power_intrinsics.c\n+++ b/lib/eal/x86/rte_power_intrinsics.c\n@@ -23,9 +23,9 @@\n \tuint64_t val;\n \n \t/* trigger a write but don't change the value */\n-\tval = __atomic_load_n((volatile uint64_t *)addr, __ATOMIC_RELAXED);\n-\t__atomic_compare_exchange_n((volatile uint64_t *)addr, &val, val, 0,\n-\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED);\n+\tval = rte_atomic_load_explicit((volatile uint64_t *)addr, rte_memory_order_relaxed);\n+\trte_atomic_compare_exchange_strong_explicit((volatile uint64_t *)addr, &val, val,\n+\t\t\trte_memory_order_relaxed, rte_memory_order_relaxed);\n }\n \n static bool wait_supported;\n",
    "prefixes": [
        "v3",
        "2/6"
    ]
}