get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/129767/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 129767,
    "url": "http://patchwork.dpdk.org/api/patches/129767/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/1690866234-28365-4-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1690866234-28365-4-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1690866234-28365-4-git-send-email-roretzla@linux.microsoft.com",
    "date": "2023-08-01T05:03:53",
    "name": "[v2,3/4] eal: adapt rte pause APIs to use C11 atomics",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "174a84ab80b15d0af5bd9c6e7776b81471718f6c",
    "submitter": {
        "id": 2077,
        "url": "http://patchwork.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1690866234-28365-4-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 29053,
            "url": "http://patchwork.dpdk.org/api/series/29053/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29053",
            "date": "2023-08-01T05:03:51",
            "name": "eal: update public API to use stdatomic atomics",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/29053/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/129767/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/129767/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D8C4F42FAE;\n\tTue,  1 Aug 2023 07:04:24 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 9B50C43262;\n\tTue,  1 Aug 2023 07:04:02 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id EB7B443251\n for <dev@dpdk.org>; Tue,  1 Aug 2023 07:03:56 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id E40B6238AF44; Mon, 31 Jul 2023 22:03:55 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com E40B6238AF44",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1690866235;\n bh=M3HqiyCUdTJgCTmApDkgahNvv8dJRl/LcBfFIvpVwnU=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=n1Y46e2JHIP7m/EaP1MjKGckNpclbB20KvFQNk7TIPt+9ueGetcZigKRlzf0t/1T1\n P8T1h0H019W+cuRE2FcbX6DShnA+eW3bxPC7LlBloDXIlJpedxJJoa58r5S85ZJUjE\n 8u/yOd+HIOAu1406mVA3F1T/koFtZLtbLU7SjQhc=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "Gaetan Rivet <grive@u256.net>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Nicolas Chautru <nicolas.chautru@intel.com>,\n Yipeng Wang <yipeng1.wang@intel.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Olivier Matz <olivier.matz@6wind.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Joyce Kong <joyce.kong@arm.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Liang Ma <liangma@liangbit.com>, Peter Mccarthy <peter.mccarthy@intel.com>,\n Jerin Jacob <jerinj@marvell.com>, Maciej Czekaj <mczekaj@marvell.com>,\n David Hunt <david.hunt@intel.com>, Ruifeng Wang <ruifeng.wang@arm.com>,\n Min Zhou <zhoumin@loongson.cn>, David Christensen <drc@linux.vnet.ibm.com>,\n Stanislaw Kardach <kda@semihalf.com>, david.marchand@redhat.com,\n stephen@networkplumber.org, mb@smartsharesystems.com,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH v2 3/4] eal: adapt rte pause APIs to use C11 atomics",
        "Date": "Mon, 31 Jul 2023 22:03:53 -0700",
        "Message-Id": "<1690866234-28365-4-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1690866234-28365-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1690837661-27573-1-git-send-email-roretzla@linux.microsoft.com>\n <1690866234-28365-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Adapt rte_pause.h APIs to use standard C11 atomics. Update consumers of\nthe pause APIs for the API break.\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\n---\n app/test-bbdev/test_bbdev_perf.c         | 123 ++++++++++++++++++++-----------\n app/test/test_func_reentrancy.c          |   8 +-\n app/test/test_mcslock.c                  |  12 +--\n app/test/test_mempool_perf.c             |   8 +-\n app/test/test_pflock.c                   |  12 +--\n app/test/test_pmd_perf.c                 |  10 +--\n app/test/test_ring_perf.c                |   8 +-\n app/test/test_rwlock.c                   |   8 +-\n app/test/test_spinlock.c                 |   8 +-\n app/test/test_stack_perf.c               |  12 +--\n app/test/test_ticketlock.c               |   8 +-\n app/test/test_timer.c                    |  16 ++--\n drivers/event/opdl/opdl_ring.c           |  47 ++++++------\n drivers/net/thunderx/nicvf_rxtx.c        |   5 +-\n drivers/net/thunderx/nicvf_struct.h      |   2 +-\n lib/bpf/bpf_pkt.c                        |   4 +-\n lib/distributor/distributor_private.h    |   2 +-\n lib/distributor/rte_distributor_single.c |  44 +++++------\n lib/eal/arm/include/rte_pause_64.h       |  28 +++----\n lib/eal/common/eal_memcfg.h              |   2 +-\n lib/eal/include/generic/rte_pause.h      |  52 ++++++-------\n lib/eal/include/rte_mcslock.h            |  12 +--\n lib/eal/include/rte_pflock.h             |  22 +++---\n lib/eal/include/rte_ticketlock.h         |   8 +-\n lib/eal/loongarch/include/rte_pause.h    |   2 -\n lib/eal/ppc/include/rte_pause.h          |   2 -\n lib/eal/riscv/include/rte_pause.h        |   2 -\n lib/ring/rte_ring_c11_pvt.h              |  28 +++----\n lib/ring/rte_ring_core.h                 |   4 +-\n lib/ring/rte_ring_generic_pvt.h          |  19 +++--\n lib/ring/rte_ring_peek_elem_pvt.h        |   2 +-\n 31 files changed, 280 insertions(+), 240 deletions(-)",
    "diff": "diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c\nindex 276bbf0..c3a3a28 100644\n--- a/app/test-bbdev/test_bbdev_perf.c\n+++ b/app/test-bbdev/test_bbdev_perf.c\n@@ -143,7 +143,7 @@ struct test_op_params {\n \tuint16_t num_to_process;\n \tuint16_t num_lcores;\n \tint vector_mask;\n-\tuint16_t sync;\n+\tuint16_t _Atomic sync;\n \tstruct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];\n };\n \n@@ -158,7 +158,7 @@ struct thread_params {\n \tuint8_t iter_count;\n \tdouble iter_average;\n \tdouble bler;\n-\tuint16_t nb_dequeued;\n+\tuint16_t _Atomic nb_dequeued;\n \tint16_t processing_status;\n \tuint16_t burst_sz;\n \tstruct test_op_params *op_params;\n@@ -3021,27 +3021,32 @@ typedef int (test_case_function)(struct active_device *ad,\n \tif (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)\n \t\tdeq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,\n \t\t\t\t&tp->dec_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\tatomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t\tmemory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)\n \t\tdeq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,\n \t\t\t\t&tp->dec_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\tatomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t\tmemory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)\n \t\tdeq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,\n \t\t\t\t&tp->enc_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\tatomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t\tmemory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse if (test_vector.op_type == RTE_BBDEV_OP_FFT)\n \t\tdeq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,\n \t\t\t\t&tp->fft_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\tatomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t\tmemory_order_relaxed)],\n \t\t\t\tburst_sz);\n \telse /*RTE_BBDEV_OP_TURBO_ENC*/\n \t\tdeq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,\n \t\t\t\t&tp->enc_ops[\n-\t\t\t\t\t__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],\n+\t\t\t\t\tatomic_load_explicit(&tp->nb_dequeued,\n+\t\t\t\t\t\tmemory_order_relaxed)],\n \t\t\t\tburst_sz);\n \n \tif (deq < burst_sz) {\n@@ -3052,8 +3057,9 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\treturn;\n \t}\n \n-\tif (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {\n-\t\t__atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);\n+\tif (atomic_load_explicit(&tp->nb_dequeued, memory_order_relaxed) + deq < num_ops) {\n+\t\tatomic_fetch_add_explicit(&tp->nb_dequeued, deq,\n+\t\t\tmemory_order_relaxed);\n \t\treturn;\n \t}\n \n@@ -3126,7 +3132,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \ttp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /\n \t\t\t((double)total_time / (double)rte_get_tsc_hz());\n \n-\t__atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);\n+\tatomic_fetch_add_explicit(&tp->nb_dequeued, deq,\n+\t\tmemory_order_relaxed);\n }\n \n static int\n@@ -3165,9 +3172,10 @@ typedef int (test_case_function)(struct active_device *ad,\n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n \t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&tp->nb_dequeued, 0, memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\t\tnum_to_process);\n@@ -3222,10 +3230,12 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16(&tp->nb_dequeued,\n+\t\t\t\tenqueued, memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\tatomic_store_explicit(&tp->nb_dequeued, 0,\n+\t\t\t\tmemory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3262,9 +3272,10 @@ typedef int (test_case_function)(struct active_device *ad,\n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n \t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&tp->nb_dequeued, 0, memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\t\tnum_to_process);\n@@ -3313,10 +3324,12 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16(&tp->nb_dequeued,\n+\t\t\t\tenqueued, memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\tatomic_store_explicit(&tp->nb_dequeued, 0,\n+\t\t\t\tmemory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3352,9 +3365,10 @@ typedef int (test_case_function)(struct active_device *ad,\n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n \t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&tp->nb_dequeued, 0, memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\tnum_to_process);\n@@ -3399,10 +3413,12 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16(&tp->nb_dequeued,\n+\t\t\t\tenqueued, memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\tatomic_store_explicit(&tp->nb_dequeued, 0,\n+\t\t\t\tmemory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3439,9 +3455,10 @@ typedef int (test_case_function)(struct active_device *ad,\n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n \t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&tp->nb_dequeued, 0, memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\tnum_to_process);\n@@ -3488,10 +3505,12 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16(&tp->nb_dequeued,\n+\t\t\t\tenqueued, memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\tatomic_store_explicit(&tp->nb_dequeued, 0,\n+\t\t\t\tmemory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3528,9 +3547,10 @@ typedef int (test_case_function)(struct active_device *ad,\n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n \t__atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&tp->nb_dequeued, 0, memory_order_relaxed);\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,\n \t\t\tnum_to_process);\n@@ -3575,10 +3595,12 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t/* Wait until processing of previous batch is\n \t\t\t * completed\n \t\t\t */\n-\t\t\trte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);\n+\t\t\trte_wait_until_equal_16(&tp->nb_dequeued,\n+\t\t\t\tenqueued, memory_order_relaxed);\n \t\t}\n \t\tif (j != TEST_REPETITIONS - 1)\n-\t\t\t__atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);\n+\t\t\tatomic_store_explicit(&tp->nb_dequeued, 0,\n+\t\t\t\tmemory_order_relaxed);\n \t}\n \n \treturn TEST_SUCCESS;\n@@ -3613,7 +3635,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -3732,7 +3755,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -3867,7 +3891,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -3990,7 +4015,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4121,7 +4147,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,\n \t\t\tnum_ops);\n@@ -4222,7 +4249,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,\n \t\t\tnum_ops);\n@@ -4323,7 +4351,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \n \tbufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];\n \n-\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \n \tret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);\n \tTEST_ASSERT_SUCCESS(ret, \"Allocation failed for %d ops\", num_ops);\n@@ -4519,7 +4548,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \telse\n \t\treturn TEST_SKIPPED;\n \n-\t__atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&op_params->sync, SYNC_WAIT,\n+\t\tmemory_order_relaxed);\n \n \t/* Main core is set at first entry */\n \tt_params[0].dev_id = ad->dev_id;\n@@ -4542,7 +4572,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t\t&t_params[used_cores++], lcore_id);\n \t}\n \n-\t__atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \tret = bler_function(&t_params[0]);\n \n \t/* Main core is always used */\n@@ -4641,7 +4672,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\tthroughput_function = throughput_pmd_lcore_enc;\n \t}\n \n-\t__atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&op_params->sync, SYNC_WAIT,\n+\t\tmemory_order_relaxed);\n \n \t/* Main core is set at first entry */\n \tt_params[0].dev_id = ad->dev_id;\n@@ -4664,7 +4696,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \t\t\t\t&t_params[used_cores++], lcore_id);\n \t}\n \n-\t__atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&op_params->sync, SYNC_START,\n+\t\tmemory_order_relaxed);\n \tret = throughput_function(&t_params[0]);\n \n \t/* Main core is always used */\n@@ -4694,8 +4727,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \t * Wait for main lcore operations.\n \t */\n \ttp = &t_params[0];\n-\twhile ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <\n-\t\top_params->num_to_process) &&\n+\twhile ((atomic_load_explicit(&tp->nb_dequeued,\n+\t\tmemory_order_relaxed) < op_params->num_to_process) &&\n \t\t(__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=\n \t\tTEST_FAILED))\n \t\trte_pause();\n@@ -4708,8 +4741,8 @@ typedef int (test_case_function)(struct active_device *ad,\n \tfor (used_cores = 1; used_cores < num_lcores; used_cores++) {\n \t\ttp = &t_params[used_cores];\n \n-\t\twhile ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <\n-\t\t\top_params->num_to_process) &&\n+\t\twhile ((atomic_load_explicit(&tp->nb_dequeued,\n+\t\t\tmemory_order_relaxed) < op_params->num_to_process) &&\n \t\t\t(__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=\n \t\t\tTEST_FAILED))\n \t\t\trte_pause();\ndiff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c\nindex ae9de6f..833ceaf 100644\n--- a/app/test/test_func_reentrancy.c\n+++ b/app/test/test_func_reentrancy.c\n@@ -54,11 +54,11 @@\n #define MAX_LCORES\t(rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))\n \n static uint32_t obj_count;\n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n \n #define WAIT_SYNCHRO_FOR_WORKERS()   do { \\\n \tif (lcore_self != rte_get_main_lcore())                  \\\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \\\n+\t\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed); \\\n } while(0)\n \n /*\n@@ -438,7 +438,7 @@ struct test_case test_cases[] = {\n \t\treturn -1;\n \n \t__atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \n \tcores = RTE_MIN(rte_lcore_count(), MAX_LCORES);\n \tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n@@ -448,7 +448,7 @@ struct test_case test_cases[] = {\n \t\trte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);\n \t}\n \n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \n \tif (pt_case->func(pt_case->arg) < 0)\n \t\tret = -1;\ndiff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c\nindex 52e45e7..3c26c69 100644\n--- a/app/test/test_mcslock.c\n+++ b/app/test/test_mcslock.c\n@@ -42,7 +42,7 @@\n \n static unsigned int count;\n \n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n \n static int\n test_mcslock_per_core(__rte_unused void *arg)\n@@ -75,7 +75,7 @@\n \trte_mcslock_t ml_perf_me;\n \n \t/* wait synchro */\n-\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed);\n \n \tbegin = rte_get_timer_cycles();\n \twhile (lcount < MAX_LOOP) {\n@@ -100,14 +100,14 @@\n \tconst unsigned int lcore = rte_lcore_id();\n \n \tprintf(\"\\nTest with no lock on single core...\\n\");\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n \t\t\tlcore, time_count[lcore]);\n \tmemset(time_count, 0, sizeof(time_count));\n \n \tprintf(\"\\nTest with lock on single core...\\n\");\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tlock = 1;\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n@@ -116,11 +116,11 @@\n \n \tprintf(\"\\nTest with lock on %u cores...\\n\", (rte_lcore_count()));\n \n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \trte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c\nindex ce7c624..06f9fc1 100644\n--- a/app/test/test_mempool_perf.c\n+++ b/app/test/test_mempool_perf.c\n@@ -88,7 +88,7 @@\n static int use_external_cache;\n static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;\n \n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n \n /* number of objects in one bulk operation (get or put) */\n static unsigned n_get_bulk;\n@@ -188,7 +188,7 @@ struct mempool_test_stats {\n \n \t/* wait synchro for workers */\n \tif (lcore_id != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed);\n \n \tstart_cycles = rte_get_timer_cycles();\n \n@@ -233,7 +233,7 @@ struct mempool_test_stats {\n \tint ret;\n \tunsigned cores_save = cores;\n \n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \n \t/* reset stats */\n \tmemset(stats, 0, sizeof(stats));\n@@ -258,7 +258,7 @@ struct mempool_test_stats {\n \t}\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \n \tret = per_lcore_mempool_test(mp);\n \ndiff --git a/app/test/test_pflock.c b/app/test/test_pflock.c\nindex 38da6bc..28addf8 100644\n--- a/app/test/test_pflock.c\n+++ b/app/test/test_pflock.c\n@@ -31,7 +31,7 @@\n \n static rte_pflock_t sl;\n static rte_pflock_t sl_tab[RTE_MAX_LCORE];\n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n \n static int\n test_pflock_per_core(__rte_unused void *arg)\n@@ -69,7 +69,7 @@\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed);\n \n \tbegin = rte_rdtsc_precise();\n \twhile (lcount < MAX_LOOP) {\n@@ -99,7 +99,7 @@\n \tconst unsigned int lcore = rte_lcore_id();\n \n \tprintf(\"\\nTest with no lock on single core...\\n\");\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n \t\t\tlcore, time_count[lcore]);\n@@ -107,7 +107,7 @@\n \n \tprintf(\"\\nTest with phase-fair lock on single core...\\n\");\n \tlock = 1;\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(&lock);\n \tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n \t\t\tlcore, time_count[lcore]);\n@@ -116,12 +116,12 @@\n \tprintf(\"\\nPhase-fair test on %u cores...\\n\", rte_lcore_count());\n \n \t/* clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \tif (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)\n \t\treturn -1;\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c\nindex 3ef590c..1562bbb 100644\n--- a/app/test/test_pmd_perf.c\n+++ b/app/test/test_pmd_perf.c\n@@ -537,7 +537,7 @@ enum {\n \treturn 0;\n }\n \n-static uint64_t start;\n+static uint64_t _Atomic start;\n \n static inline int\n poll_burst(void *args)\n@@ -575,7 +575,7 @@ enum {\n \t\tnum[portid] = pkt_per_port;\n \t}\n \n-\trte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_64(&start, 1, memory_order_acquire);\n \n \tcur_tsc = rte_rdtsc();\n \twhile (total) {\n@@ -629,9 +629,9 @@ enum {\n \n \t/* only when polling first */\n \tif (flags == SC_BURST_POLL_FIRST)\n-\t\t__atomic_store_n(&start, 1, __ATOMIC_RELAXED);\n+\t\tatomic_store_explicit(&start, 1, memory_order_relaxed);\n \telse\n-\t\t__atomic_store_n(&start, 0, __ATOMIC_RELAXED);\n+\t\tatomic_store_explicit(&start, 0, memory_order_relaxed);\n \n \t/* start polling thread\n \t * if in POLL_FIRST mode, poll once launched;\n@@ -655,7 +655,7 @@ enum {\n \n \t/* only when polling second  */\n \tif (flags == SC_BURST_XMIT_FIRST)\n-\t\t__atomic_store_n(&start, 1, __ATOMIC_RELEASE);\n+\t\tatomic_store_explicit(&start, 1, memory_order_release);\n \n \t/* wait for polling finished */\n \tdiff_tsc = rte_eal_wait_lcore(lcore);\ndiff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c\nindex 3972fd9..f0f5ef1 100644\n--- a/app/test/test_ring_perf.c\n+++ b/app/test/test_ring_perf.c\n@@ -320,7 +320,7 @@ struct thread_params {\n \treturn 0;\n }\n \n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n static uint64_t queue_count[RTE_MAX_LCORE];\n \n #define TIME_MS 100\n@@ -342,7 +342,7 @@ struct thread_params {\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed);\n \n \tbegin = rte_get_timer_cycles();\n \twhile (time_diff < hz * TIME_MS / 1000) {\n@@ -397,12 +397,12 @@ struct thread_params {\n \t\tparam.r = r;\n \n \t\t/* clear synchro and start workers */\n-\t\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\t\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \t\tif (rte_eal_mp_remote_launch(lcore_f, &param, SKIP_MAIN) < 0)\n \t\t\treturn -1;\n \n \t\t/* start synchro and launch test on main */\n-\t\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\t\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \t\tlcore_f(&param);\n \n \t\trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c\nindex 4ae0bf8..dfbd0d6 100644\n--- a/app/test/test_rwlock.c\n+++ b/app/test/test_rwlock.c\n@@ -35,7 +35,7 @@\n \n static rte_rwlock_t sl;\n static rte_rwlock_t sl_tab[RTE_MAX_LCORE];\n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n \n enum {\n \tLC_TYPE_RDLOCK,\n@@ -101,7 +101,7 @@ struct try_rwlock_lcore {\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed);\n \n \tbegin = rte_rdtsc_precise();\n \twhile (lcount < MAX_LOOP) {\n@@ -134,12 +134,12 @@ struct try_rwlock_lcore {\n \tprintf(\"\\nRwlock Perf Test on %u cores...\\n\", rte_lcore_count());\n \n \t/* clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \tif (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)\n \t\treturn -1;\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(NULL);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c\nindex 3f59372..38724a1 100644\n--- a/app/test/test_spinlock.c\n+++ b/app/test/test_spinlock.c\n@@ -48,7 +48,7 @@\n static rte_spinlock_recursive_t slr;\n static unsigned count = 0;\n \n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n \n static int\n test_spinlock_per_core(__rte_unused void *arg)\n@@ -110,7 +110,7 @@\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed);\n \n \tbegin = rte_get_timer_cycles();\n \twhile (lcount < MAX_LOOP) {\n@@ -149,11 +149,11 @@\n \tprintf(\"\\nTest with lock on %u cores...\\n\", rte_lcore_count());\n \n \t/* Clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \trte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c\nindex 1eae00a..67510d6 100644\n--- a/app/test/test_stack_perf.c\n+++ b/app/test/test_stack_perf.c\n@@ -23,7 +23,7 @@\n  */\n static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};\n \n-static uint32_t lcore_barrier;\n+static uint32_t _Atomic lcore_barrier;\n \n struct lcore_pair {\n \tunsigned int c1;\n@@ -143,8 +143,8 @@ struct thread_args {\n \ts = args->s;\n \tsize = args->sz;\n \n-\t__atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);\n-\trte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);\n+\tatomic_fetch_sub_explicit(&lcore_barrier, 1, memory_order_relaxed);\n+\trte_wait_until_equal_32(&lcore_barrier, 0, memory_order_relaxed);\n \n \tuint64_t start = rte_rdtsc();\n \n@@ -173,7 +173,7 @@ struct thread_args {\n \tunsigned int i;\n \n \tfor (i = 0; i < RTE_DIM(bulk_sizes); i++) {\n-\t\t__atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);\n+\t\tatomic_store_explicit(&lcore_barrier, 2, memory_order_relaxed);\n \n \t\targs[0].sz = args[1].sz = bulk_sizes[i];\n \t\targs[0].s = args[1].s = s;\n@@ -206,7 +206,7 @@ struct thread_args {\n \t\tint cnt = 0;\n \t\tdouble avg;\n \n-\t\t__atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);\n+\t\tatomic_store_explicit(&lcore_barrier, n, memory_order_relaxed);\n \n \t\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n \t\t\tif (++cnt >= n)\n@@ -300,7 +300,7 @@ struct thread_args {\n \tstruct lcore_pair cores;\n \tstruct rte_stack *s;\n \n-\t__atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&lcore_barrier, 0, memory_order_relaxed);\n \n \ts = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);\n \tif (s == NULL) {\ndiff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c\nindex 242c136..f12d1e5 100644\n--- a/app/test/test_ticketlock.c\n+++ b/app/test/test_ticketlock.c\n@@ -48,7 +48,7 @@\n static rte_ticketlock_recursive_t tlr;\n static unsigned int count;\n \n-static uint32_t synchro;\n+static uint32_t _Atomic synchro;\n \n static int\n test_ticketlock_per_core(__rte_unused void *arg)\n@@ -111,7 +111,7 @@\n \n \t/* wait synchro for workers */\n \tif (lcore != rte_get_main_lcore())\n-\t\trte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);\n+\t\trte_wait_until_equal_32(&synchro, 1, memory_order_relaxed);\n \n \tbegin = rte_rdtsc_precise();\n \twhile (lcore_count[lcore] < MAX_LOOP) {\n@@ -153,11 +153,11 @@\n \tprintf(\"\\nTest with lock on %u cores...\\n\", rte_lcore_count());\n \n \t/* Clear synchro and start workers */\n-\t__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 0, memory_order_relaxed);\n \trte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);\n \n \t/* start synchro and launch test on main */\n-\t__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&synchro, 1, memory_order_relaxed);\n \tload_loop_fn(&lock);\n \n \trte_eal_mp_wait_lcore();\ndiff --git a/app/test/test_timer.c b/app/test/test_timer.c\nindex 0c36dc9..cf89a19 100644\n--- a/app/test/test_timer.c\n+++ b/app/test/test_timer.c\n@@ -202,7 +202,7 @@ struct mytimerinfo {\n \n /* Need to synchronize worker lcores through multiple steps. */\n enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };\n-static uint16_t lcore_state[RTE_MAX_LCORE];\n+static uint16_t _Atomic lcore_state[RTE_MAX_LCORE];\n \n static void\n main_init_workers(void)\n@@ -210,7 +210,7 @@ struct mytimerinfo {\n \tunsigned i;\n \n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\t__atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);\n+\t\tatomic_store_explicit(&lcore_state[i], WORKER_WAITING, memory_order_relaxed);\n \t}\n }\n \n@@ -220,10 +220,10 @@ struct mytimerinfo {\n \tunsigned i;\n \n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\t__atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);\n+\t\tatomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL, memory_order_release);\n \t}\n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\trte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);\n+\t\trte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, memory_order_acquire);\n \t}\n }\n \n@@ -233,7 +233,7 @@ struct mytimerinfo {\n \tunsigned i;\n \n \tRTE_LCORE_FOREACH_WORKER(i) {\n-\t\trte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);\n+\t\trte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, memory_order_acquire);\n \t}\n }\n \n@@ -242,8 +242,8 @@ struct mytimerinfo {\n {\n \tunsigned lcore_id = rte_lcore_id();\n \n-\trte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);\n-\t__atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);\n+\trte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, memory_order_acquire);\n+\tatomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING, memory_order_release);\n }\n \n static void\n@@ -251,7 +251,7 @@ struct mytimerinfo {\n {\n \tunsigned lcore_id = rte_lcore_id();\n \n-\t__atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED, memory_order_release);\n }\n \n \ndiff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c\nindex 69392b5..e1fa674 100644\n--- a/drivers/event/opdl/opdl_ring.c\n+++ b/drivers/event/opdl/opdl_ring.c\n@@ -52,7 +52,7 @@ struct shared_state {\n \tuint32_t head;  /* Head sequence number (for multi thread operation) */\n \tchar _pad2[RTE_CACHE_LINE_SIZE * 3];\n \tstruct opdl_stage *stage;  /* back pointer */\n-\tuint32_t tail;  /* Tail sequence number */\n+\tuint32_t _Atomic tail;  /* Tail sequence number */\n \tchar _pad3[RTE_CACHE_LINE_SIZE * 2];\n } __rte_cache_aligned;\n \n@@ -169,7 +169,7 @@ struct opdl_ring {\n {\n \tuint32_t i;\n \tuint32_t this_tail = s->shared.tail;\n-\tuint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);\n+\tuint32_t min_seq = atomic_load_explicit(&s->deps[0]->tail, memory_order_acquire);\n \t/* Input stage sequence numbers are greater than the sequence numbers of\n \t * its dependencies so an offset of t->num_slots is needed when\n \t * calculating available slots and also the condition which is used to\n@@ -180,16 +180,16 @@ struct opdl_ring {\n \tif (is_input_stage(s)) {\n \t\twrap = s->num_slots;\n \t\tfor (i = 1; i < s->num_deps; i++) {\n-\t\t\tuint32_t seq = __atomic_load_n(&s->deps[i]->tail,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\tuint32_t seq = atomic_load_explicit(&s->deps[i]->tail,\n+\t\t\t\t\tmemory_order_acquire);\n \t\t\tif ((this_tail - seq) > (this_tail - min_seq))\n \t\t\t\tmin_seq = seq;\n \t\t}\n \t} else {\n \t\twrap = 0;\n \t\tfor (i = 1; i < s->num_deps; i++) {\n-\t\t\tuint32_t seq = __atomic_load_n(&s->deps[i]->tail,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\tuint32_t seq = atomic_load_explicit(&s->deps[i]->tail,\n+\t\t\t\t\tmemory_order_acquire);\n \t\t\tif ((seq - this_tail) < (min_seq - this_tail))\n \t\t\t\tmin_seq = seq;\n \t\t}\n@@ -299,7 +299,8 @@ struct opdl_ring {\n \tcopy_entries_in(t, head, entries, num_entries);\n \n \ts->head += num_entries;\n-\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&s->shared.tail, s->head,\n+\t\tmemory_order_release);\n \n \treturn num_entries;\n }\n@@ -382,18 +383,18 @@ struct opdl_ring {\n \t\t/* There should be no race condition here. If shared.tail\n \t\t * matches, no other core can update it until this one does.\n \t\t */\n-\t\tif (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==\n-\t\t\t\ttail) {\n+\t\tif (atomic_load_explicit(&s->shared.tail,\n+\t\t\tmemory_order_acquire) == tail) {\n \t\t\tif (num_entries >= (head - tail)) {\n \t\t\t\tclaim_mgr_remove(disclaims);\n-\t\t\t\t__atomic_store_n(&s->shared.tail, head,\n-\t\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\tatomic_store_explicit(&s->shared.tail, head,\n+\t\t\t\t\t\tmemory_order_release);\n \t\t\t\tnum_entries -= (head - tail);\n \t\t\t} else {\n \t\t\t\tclaim_mgr_move_tail(disclaims, num_entries);\n-\t\t\t\t__atomic_store_n(&s->shared.tail,\n+\t\t\t\tatomic_store_explicit(&s->shared.tail,\n \t\t\t\t\t\tnum_entries + tail,\n-\t\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\t\tmemory_order_release);\n \t\t\t\tnum_entries = 0;\n \t\t\t}\n \t\t} else if (block == false)\n@@ -473,10 +474,11 @@ struct opdl_ring {\n \t/* If another thread started inputting before this one, but hasn't\n \t * finished, we need to wait for it to complete to update the tail.\n \t */\n-\trte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_32(&s->shared.tail, old_head,\n+\t\tmemory_order_acquire);\n \n-\t__atomic_store_n(&s->shared.tail, old_head + num_entries,\n-\t\t\t__ATOMIC_RELEASE);\n+\tatomic_store_explicit(&s->shared.tail, old_head + num_entries,\n+\t\t\tmemory_order_release);\n \n \treturn num_entries;\n }\n@@ -628,8 +630,8 @@ struct opdl_ring {\n \t\t\t\tnum_entries, s->head - old_tail);\n \t\tnum_entries = s->head - old_tail;\n \t}\n-\t__atomic_store_n(&s->shared.tail, num_entries + old_tail,\n-\t\t\t__ATOMIC_RELEASE);\n+\tatomic_store_explicit(&s->shared.tail, num_entries + old_tail,\n+\t\t\tmemory_order_release);\n }\n \n uint32_t\n@@ -658,7 +660,8 @@ struct opdl_ring {\n \tcopy_entries_in(t, head, entries, num_entries);\n \n \ts->head += num_entries;\n-\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&s->shared.tail, s->head,\n+\t\tmemory_order_release);\n \n \treturn num_entries;\n \n@@ -677,7 +680,8 @@ struct opdl_ring {\n \tcopy_entries_out(t, head, entries, num_entries);\n \n \ts->head += num_entries;\n-\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&s->shared.tail, s->head,\n+\t\tmemory_order_release);\n \n \treturn num_entries;\n }\n@@ -756,7 +760,8 @@ struct opdl_ring {\n \t\treturn 0;\n \t}\n \tif (s->threadsafe == false) {\n-\t\t__atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);\n+\t\tatomic_store_explicit(&s->shared.tail, s->head,\n+\t\t\tmemory_order_release);\n \t\ts->seq += s->num_claimed;\n \t\ts->shadow_head = s->head;\n \t\ts->num_claimed = 0;\ndiff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c\nindex defa551..0db9505 100644\n--- a/drivers/net/thunderx/nicvf_rxtx.c\n+++ b/drivers/net/thunderx/nicvf_rxtx.c\n@@ -385,9 +385,10 @@\n \t\tltail++;\n \t}\n \n-\trte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);\n+\trte_wait_until_equal_32(&rbdr->tail, next_tail,\n+\t\tmemory_order_relaxed);\n \n-\t__atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&rbdr->tail, ltail, memory_order_release);\n \tnicvf_addr_write(door, to_fill);\n \treturn to_fill;\n }\ndiff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h\nindex 13cf8fe..38c72b6 100644\n--- a/drivers/net/thunderx/nicvf_struct.h\n+++ b/drivers/net/thunderx/nicvf_struct.h\n@@ -20,7 +20,7 @@ struct nicvf_rbdr {\n \tstruct rbdr_entry_t *desc;\n \tnicvf_iova_addr_t phys;\n \tuint32_t buffsz;\n-\tuint32_t tail;\n+\tuint32_t _Atomic tail;\n \tuint32_t next_tail;\n \tuint32_t head;\n \tuint32_t qlen_mask;\ndiff --git a/lib/bpf/bpf_pkt.c b/lib/bpf/bpf_pkt.c\nindex ffd2db7..f5765c2 100644\n--- a/lib/bpf/bpf_pkt.c\n+++ b/lib/bpf/bpf_pkt.c\n@@ -25,7 +25,7 @@\n \n struct bpf_eth_cbi {\n \t/* used by both data & control path */\n-\tuint32_t use;    /*usage counter */\n+\tuint32_t _Atomic use;    /*usage counter */\n \tconst struct rte_eth_rxtx_callback *cb;  /* callback handle */\n \tstruct rte_bpf *bpf;\n \tstruct rte_bpf_jit jit;\n@@ -110,7 +110,7 @@ struct bpf_eth_cbh {\n \n \t/* in use, busy wait till current RX/TX iteration is finished */\n \tif ((puse & BPF_ETH_CBI_INUSE) != 0) {\n-\t\tRTE_WAIT_UNTIL_MASKED((uint32_t *)(uintptr_t)&cbi->use,\n+\t\tRTE_WAIT_UNTIL_MASKED(&cbi->use,\n \t\t\tUINT32_MAX, !=, puse, __ATOMIC_RELAXED);\n \t}\n }\ndiff --git a/lib/distributor/distributor_private.h b/lib/distributor/distributor_private.h\nindex 7101f63..3b43d3d 100644\n--- a/lib/distributor/distributor_private.h\n+++ b/lib/distributor/distributor_private.h\n@@ -52,7 +52,7 @@\n  * Only 64-bits of the memory is actually used though.\n  */\n union rte_distributor_buffer_single {\n-\tvolatile int64_t bufptr64;\n+\tint64_t _Atomic bufptr64;\n \tchar pad[RTE_CACHE_LINE_SIZE*3];\n } __rte_cache_aligned;\n \ndiff --git a/lib/distributor/rte_distributor_single.c b/lib/distributor/rte_distributor_single.c\nindex 2c77ac4..7a9a3d9 100644\n--- a/lib/distributor/rte_distributor_single.c\n+++ b/lib/distributor/rte_distributor_single.c\n@@ -32,10 +32,10 @@\n \tint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)\n \t\t\t| RTE_DISTRIB_GET_BUF;\n \tRTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK,\n-\t\t==, 0, __ATOMIC_RELAXED);\n+\t\t==, 0, memory_order_relaxed);\n \n \t/* Sync with distributor on GET_BUF flag. */\n-\t__atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&buf->bufptr64, req, memory_order_release);\n }\n \n struct rte_mbuf *\n@@ -44,7 +44,7 @@ struct rte_mbuf *\n {\n \tunion rte_distributor_buffer_single *buf = &d->bufs[worker_id];\n \t/* Sync with distributor. Acquire bufptr64. */\n-\tif (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)\n+\tif (atomic_load_explicit(&buf->bufptr64, memory_order_acquire)\n \t\t& RTE_DISTRIB_GET_BUF)\n \t\treturn NULL;\n \n@@ -72,10 +72,10 @@ struct rte_mbuf *\n \tuint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)\n \t\t\t| RTE_DISTRIB_RETURN_BUF;\n \tRTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK,\n-\t\t==, 0, __ATOMIC_RELAXED);\n+\t\t==, 0, memory_order_relaxed);\n \n \t/* Sync with distributor on RETURN_BUF flag. */\n-\t__atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&buf->bufptr64, req, memory_order_release);\n \treturn 0;\n }\n \n@@ -119,7 +119,7 @@ struct rte_mbuf *\n \td->in_flight_tags[wkr] = 0;\n \td->in_flight_bitmask &= ~(1UL << wkr);\n \t/* Sync with worker. Release bufptr64. */\n-\t__atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&d->bufs[wkr].bufptr64, 0, memory_order_release);\n \tif (unlikely(d->backlog[wkr].count != 0)) {\n \t\t/* On return of a packet, we need to move the\n \t\t * queued packets for this core elsewhere.\n@@ -165,21 +165,21 @@ struct rte_mbuf *\n \tfor (wkr = 0; wkr < d->num_workers; wkr++) {\n \t\tuintptr_t oldbuf = 0;\n \t\t/* Sync with worker. Acquire bufptr64. */\n-\t\tconst int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),\n-\t\t\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tconst int64_t data = atomic_load_explicit(&d->bufs[wkr].bufptr64,\n+\t\t\t\t\t\t\tmemory_order_acquire);\n \n \t\tif (data & RTE_DISTRIB_GET_BUF) {\n \t\t\tflushed++;\n \t\t\tif (d->backlog[wkr].count)\n \t\t\t\t/* Sync with worker. Release bufptr64. */\n-\t\t\t\t__atomic_store_n(&(d->bufs[wkr].bufptr64),\n+\t\t\t\tatomic_store_explicit(&d->bufs[wkr].bufptr64,\n \t\t\t\t\tbacklog_pop(&d->backlog[wkr]),\n-\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\tmemory_order_release);\n \t\t\telse {\n \t\t\t\t/* Sync with worker on GET_BUF flag. */\n-\t\t\t\t__atomic_store_n(&(d->bufs[wkr].bufptr64),\n+\t\t\t\tatomic_store_explicit(&d->bufs[wkr].bufptr64,\n \t\t\t\t\tRTE_DISTRIB_GET_BUF,\n-\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\tmemory_order_release);\n \t\t\t\td->in_flight_tags[wkr] = 0;\n \t\t\t\td->in_flight_bitmask &= ~(1UL << wkr);\n \t\t\t}\n@@ -217,8 +217,8 @@ struct rte_mbuf *\n \twhile (next_idx < num_mbufs || next_mb != NULL) {\n \t\tuintptr_t oldbuf = 0;\n \t\t/* Sync with worker. Acquire bufptr64. */\n-\t\tint64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),\n-\t\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tint64_t data = atomic_load_explicit(&d->bufs[wkr].bufptr64,\n+\t\t\t\t\t\tmemory_order_acquire);\n \n \t\tif (!next_mb) {\n \t\t\tnext_mb = mbufs[next_idx++];\n@@ -264,15 +264,15 @@ struct rte_mbuf *\n \n \t\t\tif (d->backlog[wkr].count)\n \t\t\t\t/* Sync with worker. Release bufptr64. */\n-\t\t\t\t__atomic_store_n(&(d->bufs[wkr].bufptr64),\n+\t\t\t\tatomic_store_explicit(&d->bufs[wkr].bufptr64,\n \t\t\t\t\t\tbacklog_pop(&d->backlog[wkr]),\n-\t\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\t\tmemory_order_release);\n \n \t\t\telse {\n \t\t\t\t/* Sync with worker. Release bufptr64.  */\n-\t\t\t\t__atomic_store_n(&(d->bufs[wkr].bufptr64),\n+\t\t\t\tatomic_store_explicit(&d->bufs[wkr].bufptr64,\n \t\t\t\t\t\tnext_value,\n-\t\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\t\tmemory_order_release);\n \t\t\t\td->in_flight_tags[wkr] = new_tag;\n \t\t\t\td->in_flight_bitmask |= (1UL << wkr);\n \t\t\t\tnext_mb = NULL;\n@@ -294,8 +294,8 @@ struct rte_mbuf *\n \tfor (wkr = 0; wkr < d->num_workers; wkr++)\n \t\tif (d->backlog[wkr].count &&\n \t\t\t\t/* Sync with worker. Acquire bufptr64. */\n-\t\t\t\t(__atomic_load_n(&(d->bufs[wkr].bufptr64),\n-\t\t\t\t__ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {\n+\t\t\t\t(atomic_load_explicit(&d->bufs[wkr].bufptr64,\n+\t\t\t\tmemory_order_acquire) & RTE_DISTRIB_GET_BUF)) {\n \n \t\t\tint64_t oldbuf = d->bufs[wkr].bufptr64 >>\n \t\t\t\t\tRTE_DISTRIB_FLAG_BITS;\n@@ -303,9 +303,9 @@ struct rte_mbuf *\n \t\t\tstore_return(oldbuf, d, &ret_start, &ret_count);\n \n \t\t\t/* Sync with worker. Release bufptr64. */\n-\t\t\t__atomic_store_n(&(d->bufs[wkr].bufptr64),\n+\t\t\tatomic_store_explicit(&d->bufs[wkr].bufptr64,\n \t\t\t\tbacklog_pop(&d->backlog[wkr]),\n-\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\tmemory_order_release);\n \t\t}\n \n \td->returns.start = ret_start;\ndiff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h\nindex 5f70e97..96ad050 100644\n--- a/lib/eal/arm/include/rte_pause_64.h\n+++ b/lib/eal/arm/include/rte_pause_64.h\n@@ -148,13 +148,13 @@ static inline void rte_pause(void)\n }\n \n static __rte_always_inline void\n-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,\n-\t\tint memorder)\n+rte_wait_until_equal_16(volatile uint16_t _Atomic *addr, uint16_t expected,\n+\t\tmemory_order memorder)\n {\n \tuint16_t value;\n \n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&\n-\t\tmemorder != __ATOMIC_RELAXED);\n+\tRTE_BUILD_BUG_ON(memorder != memory_order_acquire &&\n+\t\tmemorder != memory_order_relaxed);\n \n \t__RTE_ARM_LOAD_EXC_16(addr, value, memorder)\n \tif (value != expected) {\n@@ -167,13 +167,13 @@ static inline void rte_pause(void)\n }\n \n static __rte_always_inline void\n-rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,\n-\t\tint memorder)\n+rte_wait_until_equal_32(volatile uint32_t _Atomic *addr, uint32_t expected,\n+\t\tmemory_order memorder)\n {\n \tuint32_t value;\n \n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&\n-\t\tmemorder != __ATOMIC_RELAXED);\n+\tRTE_BUILD_BUG_ON(memorder != memory_order_acquire &&\n+\t\tmemorder != memory_order_relaxed);\n \n \t__RTE_ARM_LOAD_EXC_32(addr, value, memorder)\n \tif (value != expected) {\n@@ -186,13 +186,13 @@ static inline void rte_pause(void)\n }\n \n static __rte_always_inline void\n-rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,\n-\t\tint memorder)\n+rte_wait_until_equal_64(volatile uint64_t _Atomic *addr, uint64_t expected,\n+\t\tmemory_order memorder)\n {\n \tuint64_t value;\n \n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&\n-\t\tmemorder != __ATOMIC_RELAXED);\n+\tRTE_BUILD_BUG_ON(memorder != memory_order_acquire &&\n+\t\tmemorder != memory_order_relaxed);\n \n \t__RTE_ARM_LOAD_EXC_64(addr, value, memorder)\n \tif (value != expected) {\n@@ -206,8 +206,8 @@ static inline void rte_pause(void)\n \n #define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do {  \\\n \tRTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));                \\\n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&                  \\\n-\t\tmemorder != __ATOMIC_RELAXED);                            \\\n+\tRTE_BUILD_BUG_ON(memorder != memory_order_acquire &&              \\\n+\t\tmemorder != memory_order_relaxed);                        \\\n \tconst uint32_t size = sizeof(*(addr)) << 3;                       \\\n \ttypeof(*(addr)) expected_value = (expected);                      \\\n \ttypeof(*(addr)) value;                                            \\\ndiff --git a/lib/eal/common/eal_memcfg.h b/lib/eal/common/eal_memcfg.h\nindex 8889ba0..0decc29 100644\n--- a/lib/eal/common/eal_memcfg.h\n+++ b/lib/eal/common/eal_memcfg.h\n@@ -18,7 +18,7 @@\n  * Memory configuration shared across multiple processes.\n  */\n struct rte_mem_config {\n-\tvolatile uint32_t magic;   /**< Magic number - sanity check. */\n+\tuint32_t _Atomic magic;   /**< Magic number - sanity check. */\n \tuint32_t version;\n \t/**< Prevent secondary processes using different DPDK versions. */\n \ndiff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h\nindex ec1f418..19a000e 100644\n--- a/lib/eal/include/generic/rte_pause.h\n+++ b/lib/eal/include/generic/rte_pause.h\n@@ -12,10 +12,10 @@\n  * CPU pause operation.\n  */\n \n+#include <stdatomic.h>\n #include <stdint.h>\n #include <assert.h>\n #include <rte_common.h>\n-#include <rte_atomic.h>\n \n /**\n  * Pause CPU execution for a short while\n@@ -35,13 +35,13 @@\n  *  A 16-bit expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n+ *  memory_order_acquire and memory_order_relaxed. These map to\n  *  C++11 memory orders with the same names, see the C++11 standard or\n  *  the GCC wiki on atomic synchronization for detailed definition.\n  */\n static __rte_always_inline void\n-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,\n-\t\tint memorder);\n+rte_wait_until_equal_16(volatile uint16_t _Atomic *addr, uint16_t expected,\n+\t\tmemory_order memorder);\n \n /**\n  * Wait for *addr to be updated with a 32-bit expected value, with a relaxed\n@@ -53,13 +53,13 @@\n  *  A 32-bit expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n+ *  memory_order_acquire and memory_order_relaxed. These map to\n  *  C++11 memory orders with the same names, see the C++11 standard or\n  *  the GCC wiki on atomic synchronization for detailed definition.\n  */\n static __rte_always_inline void\n-rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,\n-\t\tint memorder);\n+rte_wait_until_equal_32(volatile uint32_t _Atomic *addr, uint32_t expected,\n+\t\tmemory_order memorder);\n \n /**\n  * Wait for *addr to be updated with a 64-bit expected value, with a relaxed\n@@ -71,42 +71,42 @@\n  *  A 64-bit expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n+ *  memory_order_acquire and memory_order_relaxed. These map to\n  *  C++11 memory orders with the same names, see the C++11 standard or\n  *  the GCC wiki on atomic synchronization for detailed definition.\n  */\n static __rte_always_inline void\n-rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,\n-\t\tint memorder);\n+rte_wait_until_equal_64(volatile uint64_t _Atomic *addr, uint64_t expected,\n+\t\tmemory_order memorder);\n \n #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED\n static __rte_always_inline void\n-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,\n-\t\tint memorder)\n+rte_wait_until_equal_16(volatile uint16_t _Atomic *addr, uint16_t expected,\n+\t\tmemory_order memorder)\n {\n-\tassert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);\n+\tassert(memorder == memory_order_acquire || memorder == memory_order_relaxed);\n \n-\twhile (__atomic_load_n(addr, memorder) != expected)\n+\twhile (atomic_load_explicit(addr, memorder) != expected)\n \t\trte_pause();\n }\n \n static __rte_always_inline void\n-rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,\n-\t\tint memorder)\n+rte_wait_until_equal_32(volatile uint32_t _Atomic *addr, uint32_t expected,\n+\t\tmemory_order memorder)\n {\n-\tassert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);\n+\tassert(memorder == memory_order_acquire || memorder == memory_order_relaxed);\n \n-\twhile (__atomic_load_n(addr, memorder) != expected)\n+\twhile (atomic_load_explicit(addr, memorder) != expected)\n \t\trte_pause();\n }\n \n static __rte_always_inline void\n-rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,\n-\t\tint memorder)\n+rte_wait_until_equal_64(volatile uint64_t _Atomic *addr, uint64_t expected,\n+\t\tmemory_order memorder)\n {\n-\tassert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);\n+\tassert(memorder == memory_order_acquire || memorder == memory_order_relaxed);\n \n-\twhile (__atomic_load_n(addr, memorder) != expected)\n+\twhile (atomic_load_explicit(addr, memorder) != expected)\n \t\trte_pause();\n }\n \n@@ -124,16 +124,16 @@\n  *  An expected value to be in the memory location.\n  * @param memorder\n  *  Two different memory orders that can be specified:\n- *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to\n+ *  memory_order_acquire and memory_order_relaxed. These map to\n  *  C++11 memory orders with the same names, see the C++11 standard or\n  *  the GCC wiki on atomic synchronization for detailed definition.\n  */\n #define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do { \\\n \tRTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));               \\\n-\tRTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&                 \\\n-\t\tmemorder != __ATOMIC_RELAXED);                           \\\n+\tRTE_BUILD_BUG_ON(memorder != memory_order_acquire &&             \\\n+\t\tmemorder != memory_order_relaxed);                       \\\n \ttypeof(*(addr)) expected_value = (expected);                     \\\n-\twhile (!((__atomic_load_n((addr), (memorder)) & (mask)) cond     \\\n+\twhile (!((atomic_load_explicit((addr), (memorder)) & (mask)) cond \\\n \t\t\texpected_value))                                 \\\n \t\trte_pause();                                             \\\n } while (0)\ndiff --git a/lib/eal/include/rte_mcslock.h b/lib/eal/include/rte_mcslock.h\nindex a805cb2..7c227ec 100644\n--- a/lib/eal/include/rte_mcslock.h\n+++ b/lib/eal/include/rte_mcslock.h\n@@ -33,7 +33,7 @@\n  */\n typedef struct rte_mcslock {\n \tstruct rte_mcslock *next;\n-\tint locked; /* 1 if the queue locked, 0 otherwise */\n+\tuint32_t _Atomic locked; /* 1 if the queue locked, 0 otherwise */\n } rte_mcslock_t;\n \n /**\n@@ -53,7 +53,7 @@\n \trte_mcslock_t *prev;\n \n \t/* Init me node */\n-\t__atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);\n+\tatomic_store_explicit(&me->locked, 1, memory_order_relaxed);\n \t__atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);\n \n \t/* If the queue is empty, the exchange operation is enough to acquire\n@@ -88,7 +88,7 @@\n \t * to spin on me->locked until the previous lock holder resets\n \t * the me->locked using mcslock_unlock().\n \t */\n-\trte_wait_until_equal_32((uint32_t *)&me->locked, 0, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_32(&me->locked, 0, memory_order_acquire);\n }\n \n /**\n@@ -120,14 +120,14 @@\n \t\t/* More nodes added to the queue by other CPUs.\n \t\t * Wait until the next pointer is set.\n \t\t */\n-\t\tuintptr_t *next;\n-\t\tnext = (uintptr_t *)&me->next;\n+\t\tuintptr_t _Atomic *next;\n+\t\tnext = (uintptr_t _Atomic *)&me->next;\n \t\tRTE_WAIT_UNTIL_MASKED(next, UINTPTR_MAX, !=, 0,\n \t\t\t__ATOMIC_RELAXED);\n \t}\n \n \t/* Pass lock to next waiter. */\n-\t__atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&me->next->locked, 0, memory_order_release);\n }\n \n /**\ndiff --git a/lib/eal/include/rte_pflock.h b/lib/eal/include/rte_pflock.h\nindex a3f7291..1c32512 100644\n--- a/lib/eal/include/rte_pflock.h\n+++ b/lib/eal/include/rte_pflock.h\n@@ -40,8 +40,8 @@\n  */\n struct rte_pflock {\n \tstruct {\n-\t\tuint16_t in;\n-\t\tuint16_t out;\n+\t\tuint16_t _Atomic in;\n+\t\tuint16_t _Atomic out;\n \t} rd, wr;\n };\n typedef struct rte_pflock rte_pflock_t;\n@@ -116,14 +116,14 @@ struct rte_pflock {\n \t * If no writer is present, then the operation has completed\n \t * successfully.\n \t */\n-\tw = __atomic_fetch_add(&pf->rd.in, RTE_PFLOCK_RINC, __ATOMIC_ACQUIRE)\n+\tw = atomic_fetch_add_explicit(&pf->rd.in, RTE_PFLOCK_RINC, memory_order_acquire)\n \t\t& RTE_PFLOCK_WBITS;\n \tif (w == 0)\n \t\treturn;\n \n \t/* Wait for current write phase to complete. */\n \tRTE_WAIT_UNTIL_MASKED(&pf->rd.in, RTE_PFLOCK_WBITS, !=, w,\n-\t\t__ATOMIC_ACQUIRE);\n+\t\tmemory_order_acquire);\n }\n \n /**\n@@ -139,7 +139,7 @@ struct rte_pflock {\n static inline void\n rte_pflock_read_unlock(rte_pflock_t *pf)\n {\n-\t__atomic_fetch_add(&pf->rd.out, RTE_PFLOCK_RINC, __ATOMIC_RELEASE);\n+\tatomic_fetch_add_explicit(&pf->rd.out, RTE_PFLOCK_RINC, memory_order_release);\n }\n \n /**\n@@ -160,8 +160,8 @@ struct rte_pflock {\n \t/* Acquire ownership of write-phase.\n \t * This is same as rte_ticketlock_lock().\n \t */\n-\tticket = __atomic_fetch_add(&pf->wr.in, 1, __ATOMIC_RELAXED);\n-\trte_wait_until_equal_16(&pf->wr.out, ticket, __ATOMIC_ACQUIRE);\n+\tticket = atomic_fetch_add_explicit(&pf->wr.in, 1, memory_order_relaxed);\n+\trte_wait_until_equal_16(&pf->wr.out, ticket, memory_order_acquire);\n \n \t/*\n \t * Acquire ticket on read-side in order to allow them\n@@ -172,10 +172,10 @@ struct rte_pflock {\n \t * speculatively.\n \t */\n \tw = RTE_PFLOCK_PRES | (ticket & RTE_PFLOCK_PHID);\n-\tticket = __atomic_fetch_add(&pf->rd.in, w, __ATOMIC_RELAXED);\n+\tticket = atomic_fetch_add_explicit(&pf->rd.in, w, memory_order_relaxed);\n \n \t/* Wait for any pending readers to flush. */\n-\trte_wait_until_equal_16(&pf->rd.out, ticket, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_16(&pf->rd.out, ticket, memory_order_acquire);\n }\n \n /**\n@@ -192,10 +192,10 @@ struct rte_pflock {\n rte_pflock_write_unlock(rte_pflock_t *pf)\n {\n \t/* Migrate from write phase to read phase. */\n-\t__atomic_fetch_and(&pf->rd.in, RTE_PFLOCK_LSB, __ATOMIC_RELEASE);\n+\tatomic_fetch_and_explicit(&pf->rd.in, RTE_PFLOCK_LSB, memory_order_release);\n \n \t/* Allow other writers to continue. */\n-\t__atomic_fetch_add(&pf->wr.out, 1, __ATOMIC_RELEASE);\n+\tatomic_fetch_add_explicit(&pf->wr.out, 1, memory_order_release);\n }\n \n #ifdef __cplusplus\ndiff --git a/lib/eal/include/rte_ticketlock.h b/lib/eal/include/rte_ticketlock.h\nindex 5db0d8a..5206b62 100644\n--- a/lib/eal/include/rte_ticketlock.h\n+++ b/lib/eal/include/rte_ticketlock.h\n@@ -31,7 +31,7 @@\n typedef union {\n \tuint32_t tickets;\n \tstruct {\n-\t\tuint16_t current;\n+\t\tuint16_t _Atomic current;\n \t\tuint16_t next;\n \t} s;\n } rte_ticketlock_t;\n@@ -63,7 +63,7 @@\n rte_ticketlock_lock(rte_ticketlock_t *tl)\n {\n \tuint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);\n-\trte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE);\n+\trte_wait_until_equal_16(&tl->s.current, me, memory_order_acquire);\n }\n \n /**\n@@ -75,8 +75,8 @@\n static inline void\n rte_ticketlock_unlock(rte_ticketlock_t *tl)\n {\n-\tuint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);\n-\t__atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);\n+\tuint16_t i = atomic_load_explicit(&tl->s.current, memory_order_relaxed);\n+\tatomic_store_explicit(&tl->s.current, i + 1, memory_order_release);\n }\n \n /**\ndiff --git a/lib/eal/loongarch/include/rte_pause.h b/lib/eal/loongarch/include/rte_pause.h\nindex 4302e1b..2987a1b 100644\n--- a/lib/eal/loongarch/include/rte_pause.h\n+++ b/lib/eal/loongarch/include/rte_pause.h\n@@ -9,8 +9,6 @@\n extern \"C\" {\n #endif\n \n-#include \"rte_atomic.h\"\n-\n #include \"generic/rte_pause.h\"\n \n static inline void rte_pause(void)\ndiff --git a/lib/eal/ppc/include/rte_pause.h b/lib/eal/ppc/include/rte_pause.h\nindex 16e47ce..54bbbc7 100644\n--- a/lib/eal/ppc/include/rte_pause.h\n+++ b/lib/eal/ppc/include/rte_pause.h\n@@ -9,8 +9,6 @@\n extern \"C\" {\n #endif\n \n-#include \"rte_atomic.h\"\n-\n #include \"generic/rte_pause.h\"\n \n static inline void rte_pause(void)\ndiff --git a/lib/eal/riscv/include/rte_pause.h b/lib/eal/riscv/include/rte_pause.h\nindex cb8e9ca..ffa7158 100644\n--- a/lib/eal/riscv/include/rte_pause.h\n+++ b/lib/eal/riscv/include/rte_pause.h\n@@ -11,8 +11,6 @@\n extern \"C\" {\n #endif\n \n-#include \"rte_atomic.h\"\n-\n #include \"generic/rte_pause.h\"\n \n static inline void rte_pause(void)\ndiff --git a/lib/ring/rte_ring_c11_pvt.h b/lib/ring/rte_ring_c11_pvt.h\nindex f895950..402f819 100644\n--- a/lib/ring/rte_ring_c11_pvt.h\n+++ b/lib/ring/rte_ring_c11_pvt.h\n@@ -91,14 +91,15 @@\n \t\t\treturn 0;\n \n \t\t*new_head = *old_head + n;\n-\t\tif (is_sp)\n-\t\t\tr->prod.head = *new_head, success = 1;\n-\t\telse\n+\t\tif (is_sp) {\n+\t\t\tr->prod.head = *new_head;\n+\t\t\tsuccess = 1;\n+\t\t} else\n \t\t\t/* on failure, *old_head is updated */\n-\t\t\tsuccess = __atomic_compare_exchange_n(&r->prod.head,\n+\t\t\tsuccess = atomic_compare_exchange_strong_explicit(&r->prod.head,\n \t\t\t\t\told_head, *new_head,\n-\t\t\t\t\t0, __ATOMIC_RELAXED,\n-\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\t\tmemory_order_relaxed,\n+\t\t\t\t\tmemory_order_relaxed);\n \t} while (unlikely(success == 0));\n \treturn n;\n }\n@@ -137,7 +138,7 @@\n \tint success;\n \n \t/* move cons.head atomically */\n-\t*old_head = __atomic_load_n(&r->cons.head, __ATOMIC_RELAXED);\n+\t*old_head = atomic_load_explicit(&r->cons.head, memory_order_relaxed);\n \tdo {\n \t\t/* Restore n as it may change every loop */\n \t\tn = max;\n@@ -166,14 +167,15 @@\n \t\t\treturn 0;\n \n \t\t*new_head = *old_head + n;\n-\t\tif (is_sc)\n-\t\t\tr->cons.head = *new_head, success = 1;\n-\t\telse\n+\t\tif (is_sc) {\n+\t\t\tr->cons.head = *new_head;\n+\t\t\tsuccess = 1;\n+\t\t} else\n \t\t\t/* on failure, *old_head will be updated */\n-\t\t\tsuccess = __atomic_compare_exchange_n(&r->cons.head,\n+\t\t\tsuccess = atomic_compare_exchange_strong_explicit(&r->cons.head,\n \t\t\t\t\t\t\told_head, *new_head,\n-\t\t\t\t\t\t\t0, __ATOMIC_RELAXED,\n-\t\t\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t\t\t\t\tmemory_order_relaxed,\n+\t\t\t\t\t\t\tmemory_order_relaxed);\n \t} while (unlikely(success == 0));\n \treturn n;\n }\ndiff --git a/lib/ring/rte_ring_core.h b/lib/ring/rte_ring_core.h\nindex 82b2370..cf3cb84 100644\n--- a/lib/ring/rte_ring_core.h\n+++ b/lib/ring/rte_ring_core.h\n@@ -66,8 +66,8 @@ enum rte_ring_sync_type {\n  * but offset for *sync_type* and *tail* values should remain the same.\n  */\n struct rte_ring_headtail {\n-\tvolatile uint32_t head;      /**< prod/consumer head. */\n-\tvolatile uint32_t tail;      /**< prod/consumer tail. */\n+\tuint32_t _Atomic head;      /**< prod/consumer head. */\n+\tuint32_t _Atomic tail;      /**< prod/consumer tail. */\n \tRTE_STD_C11\n \tunion {\n \t\t/** sync type of prod/cons */\ndiff --git a/lib/ring/rte_ring_generic_pvt.h b/lib/ring/rte_ring_generic_pvt.h\nindex 5acb6e5..12a3ca8 100644\n--- a/lib/ring/rte_ring_generic_pvt.h\n+++ b/lib/ring/rte_ring_generic_pvt.h\n@@ -89,11 +89,14 @@\n \t\t\treturn 0;\n \n \t\t*new_head = *old_head + n;\n-\t\tif (is_sp)\n-\t\t\tr->prod.head = *new_head, success = 1;\n-\t\telse\n-\t\t\tsuccess = rte_atomic32_cmpset(&r->prod.head,\n-\t\t\t\t\t*old_head, *new_head);\n+\t\tif (is_sp) {\n+\t\t\tr->prod.head = *new_head;\n+\t\t\tsuccess = 1;\n+\t\t} else\n+\t\t\t/* NOTE: review for potential ordering optimization */\n+\t\t\tsuccess = atomic_compare_exchange_strong_explicit(&r->prod.head,\n+\t\t\t\t\told_head, *new_head,\n+\t\t\t\t\tmemory_order_seq_cst, memory_order_seq_cst);\n \t} while (unlikely(success == 0));\n \treturn n;\n }\n@@ -162,8 +165,10 @@\n \t\t\trte_smp_rmb();\n \t\t\tsuccess = 1;\n \t\t} else {\n-\t\t\tsuccess = rte_atomic32_cmpset(&r->cons.head, *old_head,\n-\t\t\t\t\t*new_head);\n+\t\t\t/* NOTE: review for potential ordering optimization */\n+\t\t\tsuccess = atomic_compare_exchange_strong_explicit(&r->cons.head,\n+\t\t\t\t\told_head, *new_head,\n+\t\t\t\t\tmemory_order_seq_cst, memory_order_seq_cst);\n \t\t}\n \t} while (unlikely(success == 0));\n \treturn n;\ndiff --git a/lib/ring/rte_ring_peek_elem_pvt.h b/lib/ring/rte_ring_peek_elem_pvt.h\nindex bb0a7d5..6707e38 100644\n--- a/lib/ring/rte_ring_peek_elem_pvt.h\n+++ b/lib/ring/rte_ring_peek_elem_pvt.h\n@@ -59,7 +59,7 @@\n \n \tpos = tail + num;\n \tht->head = pos;\n-\t__atomic_store_n(&ht->tail, pos, __ATOMIC_RELEASE);\n+\tatomic_store_explicit(&ht->tail, pos, memory_order_release);\n }\n \n /**\n",
    "prefixes": [
        "v2",
        "3/4"
    ]
}