get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132679/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132679,
    "url": "http://patchwork.dpdk.org/api/patches/132679/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/1697497745-20664-20-git-send-email-roretzla@linux.microsoft.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1697497745-20664-20-git-send-email-roretzla@linux.microsoft.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1697497745-20664-20-git-send-email-roretzla@linux.microsoft.com",
    "date": "2023-10-16T23:09:03",
    "name": "[19/21] hash: use rte optional stdatomic API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ff6c596d58685a0fd6ef984a77bc6ab971700406",
    "submitter": {
        "id": 2077,
        "url": "http://patchwork.dpdk.org/api/people/2077/?format=api",
        "name": "Tyler Retzlaff",
        "email": "roretzla@linux.microsoft.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patchwork.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1697497745-20664-20-git-send-email-roretzla@linux.microsoft.com/mbox/",
    "series": [
        {
            "id": 29858,
            "url": "http://patchwork.dpdk.org/api/series/29858/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29858",
            "date": "2023-10-16T23:08:44",
            "name": "use rte optional stdatomic API",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/29858/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/132679/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/132679/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1A32A43183;\n\tTue, 17 Oct 2023 01:11:10 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 61F6D427E3;\n\tTue, 17 Oct 2023 01:09:30 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 33D2440E8A\n for <dev@dpdk.org>; Tue, 17 Oct 2023 01:09:09 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1086)\n id EB4CF20B74D3; Mon, 16 Oct 2023 16:09:07 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com EB4CF20B74D3",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;\n s=default; t=1697497747;\n bh=ARZifBQVDjkWPhexyyha094/2FzY2+ivQcVU9neu3KU=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=CM5mgIrHVlOB6GryhnINTAQPn7R/r/imGSJgn3LnqLJjWbNtjgLedOrarXuanxBpZ\n JTncvER3s8y6GMjrX/wkY79HfhCkwOwekKRbpWkQnFqmwrRbezx2SkJwzwdv5ObZxP\n wfxsV7lkDnZr/GjEmPF0d94DjUmO2kgCONy444JI=",
        "From": "Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "To": "dev@dpdk.org",
        "Cc": "Akhil Goyal <gakhil@marvell.com>,\n Anatoly Burakov <anatoly.burakov@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Chenbo Xia <chenbo.xia@intel.com>, Ciara Power <ciara.power@intel.com>,\n David Christensen <drc@linux.vnet.ibm.com>,\n David Hunt <david.hunt@intel.com>,\n Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,\n Dmitry Malloy <dmitrym@microsoft.com>,\n Elena Agostini <eagostini@nvidia.com>,\n Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n Fan Zhang <fanzhang.oss@gmail.com>, Ferruh Yigit <ferruh.yigit@amd.com>,\n Harman Kalra <hkalra@marvell.com>,\n Harry van Haaren <harry.van.haaren@intel.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Jerin Jacob <jerinj@marvell.com>,\n Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,\n Matan Azrad <matan@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>,\n Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,\n Nicolas Chautru <nicolas.chautru@intel.com>,\n Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,\n Pallavi Kadam <pallavi.kadam@intel.com>,\n Pavan Nikhilesh <pbhagavatula@marvell.com>,\n Reshma Pattan <reshma.pattan@intel.com>,\n Sameh Gobriel <sameh.gobriel@intel.com>,\n Shijith Thotton <sthotton@marvell.com>,\n Sivaprasad Tummala <sivaprasad.tummala@amd.com>,\n Stephen Hemminger <stephen@networkplumber.org>,\n Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>,\n Yipeng Wang <yipeng1.wang@intel.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>",
        "Subject": "[PATCH 19/21] hash: use rte optional stdatomic API",
        "Date": "Mon, 16 Oct 2023 16:09:03 -0700",
        "Message-Id": "<1697497745-20664-20-git-send-email-roretzla@linux.microsoft.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>",
        "References": "<1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Replace the use of gcc builtin __atomic_xxx intrinsics with\ncorresponding rte_atomic_xxx optional stdatomic API\n\nSigned-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>\n---\n lib/hash/rte_cuckoo_hash.c | 116 ++++++++++++++++++++++-----------------------\n lib/hash/rte_cuckoo_hash.h |   6 +--\n 2 files changed, 61 insertions(+), 61 deletions(-)",
    "diff": "diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c\nindex 19b23f2..b2cf60d 100644\n--- a/lib/hash/rte_cuckoo_hash.c\n+++ b/lib/hash/rte_cuckoo_hash.c\n@@ -149,7 +149,7 @@ struct rte_hash *\n \tunsigned int writer_takes_lock = 0;\n \tunsigned int no_free_on_del = 0;\n \tuint32_t *ext_bkt_to_free = NULL;\n-\tuint32_t *tbl_chng_cnt = NULL;\n+\tRTE_ATOMIC(uint32_t) *tbl_chng_cnt = NULL;\n \tstruct lcore_cache *local_free_slots = NULL;\n \tunsigned int readwrite_concur_lf_support = 0;\n \tuint32_t i;\n@@ -713,9 +713,9 @@ struct rte_hash *\n \t\t\t\t * variable. Release the application data\n \t\t\t\t * to the readers.\n \t\t\t\t */\n-\t\t\t\t__atomic_store_n(&k->pdata,\n+\t\t\t\trte_atomic_store_explicit(&k->pdata,\n \t\t\t\t\tdata,\n-\t\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\t\trte_memory_order_release);\n \t\t\t\t/*\n \t\t\t\t * Return index where key is stored,\n \t\t\t\t * subtracting the first dummy index\n@@ -776,9 +776,9 @@ struct rte_hash *\n \t\t\t * key_idx is the guard variable for signature\n \t\t\t * and key.\n \t\t\t */\n-\t\t\t__atomic_store_n(&prim_bkt->key_idx[i],\n+\t\t\trte_atomic_store_explicit(&prim_bkt->key_idx[i],\n \t\t\t\t\t new_idx,\n-\t\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t\t rte_memory_order_release);\n \t\t\tbreak;\n \t\t}\n \t}\n@@ -851,9 +851,9 @@ struct rte_hash *\n \t\tif (unlikely(&h->buckets[prev_alt_bkt_idx]\n \t\t\t\t!= curr_bkt)) {\n \t\t\t/* revert it to empty, otherwise duplicated keys */\n-\t\t\t__atomic_store_n(&curr_bkt->key_idx[curr_slot],\n+\t\t\trte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],\n \t\t\t\tEMPTY_SLOT,\n-\t\t\t\t__ATOMIC_RELEASE);\n+\t\t\t\trte_memory_order_release);\n \t\t\t__hash_rw_writer_unlock(h);\n \t\t\treturn -1;\n \t\t}\n@@ -865,13 +865,13 @@ struct rte_hash *\n \t\t\t * Since there is one writer, load acquires on\n \t\t\t * tbl_chng_cnt are not required.\n \t\t\t */\n-\t\t\t__atomic_store_n(h->tbl_chng_cnt,\n+\t\t\trte_atomic_store_explicit(h->tbl_chng_cnt,\n \t\t\t\t\t *h->tbl_chng_cnt + 1,\n-\t\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t\t rte_memory_order_release);\n \t\t\t/* The store to sig_current should not\n \t\t\t * move above the store to tbl_chng_cnt.\n \t\t\t */\n-\t\t\t__atomic_thread_fence(__ATOMIC_RELEASE);\n+\t\t\t__atomic_thread_fence(rte_memory_order_release);\n \t\t}\n \n \t\t/* Need to swap current/alt sig to allow later\n@@ -881,9 +881,9 @@ struct rte_hash *\n \t\tcurr_bkt->sig_current[curr_slot] =\n \t\t\tprev_bkt->sig_current[prev_slot];\n \t\t/* Release the updated bucket entry */\n-\t\t__atomic_store_n(&curr_bkt->key_idx[curr_slot],\n+\t\trte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],\n \t\t\tprev_bkt->key_idx[prev_slot],\n-\t\t\t__ATOMIC_RELEASE);\n+\t\t\trte_memory_order_release);\n \n \t\tcurr_slot = prev_slot;\n \t\tcurr_node = prev_node;\n@@ -897,20 +897,20 @@ struct rte_hash *\n \t\t * Since there is one writer, load acquires on\n \t\t * tbl_chng_cnt are not required.\n \t\t */\n-\t\t__atomic_store_n(h->tbl_chng_cnt,\n+\t\trte_atomic_store_explicit(h->tbl_chng_cnt,\n \t\t\t\t *h->tbl_chng_cnt + 1,\n-\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t rte_memory_order_release);\n \t\t/* The store to sig_current should not\n \t\t * move above the store to tbl_chng_cnt.\n \t\t */\n-\t\t__atomic_thread_fence(__ATOMIC_RELEASE);\n+\t\t__atomic_thread_fence(rte_memory_order_release);\n \t}\n \n \tcurr_bkt->sig_current[curr_slot] = sig;\n \t/* Release the new bucket entry */\n-\t__atomic_store_n(&curr_bkt->key_idx[curr_slot],\n+\trte_atomic_store_explicit(&curr_bkt->key_idx[curr_slot],\n \t\t\t new_idx,\n-\t\t\t __ATOMIC_RELEASE);\n+\t\t\t rte_memory_order_release);\n \n \t__hash_rw_writer_unlock(h);\n \n@@ -1076,9 +1076,9 @@ struct rte_hash *\n \t * not leak after the store of pdata in the key store. i.e. pdata is\n \t * the guard variable. Release the application data to the readers.\n \t */\n-\t__atomic_store_n(&new_k->pdata,\n+\trte_atomic_store_explicit(&new_k->pdata,\n \t\tdata,\n-\t\t__ATOMIC_RELEASE);\n+\t\trte_memory_order_release);\n \t/* Copy key */\n \tmemcpy(new_k->key, key, h->key_len);\n \n@@ -1149,9 +1149,9 @@ struct rte_hash *\n \t\t\t\t * key_idx is the guard variable for signature\n \t\t\t\t * and key.\n \t\t\t\t */\n-\t\t\t\t__atomic_store_n(&cur_bkt->key_idx[i],\n+\t\t\t\trte_atomic_store_explicit(&cur_bkt->key_idx[i],\n \t\t\t\t\t\t slot_id,\n-\t\t\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t\t\t rte_memory_order_release);\n \t\t\t\t__hash_rw_writer_unlock(h);\n \t\t\t\treturn slot_id - 1;\n \t\t\t}\n@@ -1185,9 +1185,9 @@ struct rte_hash *\n \t * the store to key_idx. i.e. key_idx is the guard variable\n \t * for signature and key.\n \t */\n-\t__atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],\n+\trte_atomic_store_explicit(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],\n \t\t\t slot_id,\n-\t\t\t __ATOMIC_RELEASE);\n+\t\t\t rte_memory_order_release);\n \t/* Link the new bucket to sec bucket linked list */\n \tlast = rte_hash_get_last_bkt(sec_bkt);\n \tlast->next = &h->buckets_ext[ext_bkt_id - 1];\n@@ -1290,17 +1290,17 @@ struct rte_hash *\n \t\t * key comparison will ensure that the lookup fails.\n \t\t */\n \t\tif (bkt->sig_current[i] == sig) {\n-\t\t\tkey_idx = __atomic_load_n(&bkt->key_idx[i],\n-\t\t\t\t\t  __ATOMIC_ACQUIRE);\n+\t\t\tkey_idx = rte_atomic_load_explicit(&bkt->key_idx[i],\n+\t\t\t\t\t  rte_memory_order_acquire);\n \t\t\tif (key_idx != EMPTY_SLOT) {\n \t\t\t\tk = (struct rte_hash_key *) ((char *)keys +\n \t\t\t\t\t\tkey_idx * h->key_entry_size);\n \n \t\t\t\tif (rte_hash_cmp_eq(key, k->key, h) == 0) {\n \t\t\t\t\tif (data != NULL) {\n-\t\t\t\t\t\t*data = __atomic_load_n(\n+\t\t\t\t\t\t*data = rte_atomic_load_explicit(\n \t\t\t\t\t\t\t&k->pdata,\n-\t\t\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\t\t\t\t\trte_memory_order_acquire);\n \t\t\t\t\t}\n \t\t\t\t\t/*\n \t\t\t\t\t * Return index where key is stored,\n@@ -1374,8 +1374,8 @@ struct rte_hash *\n \t\t * starts. Acquire semantics will make sure that\n \t\t * loads in search_one_bucket are not hoisted.\n \t\t */\n-\t\tcnt_b = __atomic_load_n(h->tbl_chng_cnt,\n-\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tcnt_b = rte_atomic_load_explicit(h->tbl_chng_cnt,\n+\t\t\t\trte_memory_order_acquire);\n \n \t\t/* Check if key is in primary location */\n \t\tbkt = &h->buckets[prim_bucket_idx];\n@@ -1396,7 +1396,7 @@ struct rte_hash *\n \t\t/* The loads of sig_current in search_one_bucket\n \t\t * should not move below the load from tbl_chng_cnt.\n \t\t */\n-\t\t__atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\t\t__atomic_thread_fence(rte_memory_order_acquire);\n \t\t/* Re-read the table change counter to check if the\n \t\t * table has changed during search. If yes, re-do\n \t\t * the search.\n@@ -1405,8 +1405,8 @@ struct rte_hash *\n \t\t * and key index in secondary bucket will make sure\n \t\t * that it does not get hoisted.\n \t\t */\n-\t\tcnt_a = __atomic_load_n(h->tbl_chng_cnt,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tcnt_a = rte_atomic_load_explicit(h->tbl_chng_cnt,\n+\t\t\t\t\trte_memory_order_acquire);\n \t} while (cnt_b != cnt_a);\n \n \treturn -ENOENT;\n@@ -1611,26 +1611,26 @@ struct rte_hash *\n \tfor (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {\n \t\tif (last_bkt->key_idx[i] != EMPTY_SLOT) {\n \t\t\tcur_bkt->sig_current[pos] = last_bkt->sig_current[i];\n-\t\t\t__atomic_store_n(&cur_bkt->key_idx[pos],\n+\t\t\trte_atomic_store_explicit(&cur_bkt->key_idx[pos],\n \t\t\t\t\t last_bkt->key_idx[i],\n-\t\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t\t rte_memory_order_release);\n \t\t\tif (h->readwrite_concur_lf_support) {\n \t\t\t\t/* Inform the readers that the table has changed\n \t\t\t\t * Since there is one writer, load acquire on\n \t\t\t\t * tbl_chng_cnt is not required.\n \t\t\t\t */\n-\t\t\t\t__atomic_store_n(h->tbl_chng_cnt,\n+\t\t\t\trte_atomic_store_explicit(h->tbl_chng_cnt,\n \t\t\t\t\t *h->tbl_chng_cnt + 1,\n-\t\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t\t rte_memory_order_release);\n \t\t\t\t/* The store to sig_current should\n \t\t\t\t * not move above the store to tbl_chng_cnt.\n \t\t\t\t */\n-\t\t\t\t__atomic_thread_fence(__ATOMIC_RELEASE);\n+\t\t\t\t__atomic_thread_fence(rte_memory_order_release);\n \t\t\t}\n \t\t\tlast_bkt->sig_current[i] = NULL_SIGNATURE;\n-\t\t\t__atomic_store_n(&last_bkt->key_idx[i],\n+\t\t\trte_atomic_store_explicit(&last_bkt->key_idx[i],\n \t\t\t\t\t EMPTY_SLOT,\n-\t\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t\t rte_memory_order_release);\n \t\t\treturn;\n \t\t}\n \t}\n@@ -1650,8 +1650,8 @@ struct rte_hash *\n \n \t/* Check if key is in bucket */\n \tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {\n-\t\tkey_idx = __atomic_load_n(&bkt->key_idx[i],\n-\t\t\t\t\t  __ATOMIC_ACQUIRE);\n+\t\tkey_idx = rte_atomic_load_explicit(&bkt->key_idx[i],\n+\t\t\t\t\t  rte_memory_order_acquire);\n \t\tif (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {\n \t\t\tk = (struct rte_hash_key *) ((char *)keys +\n \t\t\t\t\tkey_idx * h->key_entry_size);\n@@ -1663,9 +1663,9 @@ struct rte_hash *\n \t\t\t\tif (!h->no_free_on_del)\n \t\t\t\t\tremove_entry(h, bkt, i);\n \n-\t\t\t\t__atomic_store_n(&bkt->key_idx[i],\n+\t\t\t\trte_atomic_store_explicit(&bkt->key_idx[i],\n \t\t\t\t\t\t EMPTY_SLOT,\n-\t\t\t\t\t\t __ATOMIC_RELEASE);\n+\t\t\t\t\t\t rte_memory_order_release);\n \n \t\t\t\t*pos = i;\n \t\t\t\t/*\n@@ -2077,8 +2077,8 @@ struct rte_hash *\n \t\t * starts. Acquire semantics will make sure that\n \t\t * loads in compare_signatures are not hoisted.\n \t\t */\n-\t\tcnt_b = __atomic_load_n(h->tbl_chng_cnt,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tcnt_b = rte_atomic_load_explicit(h->tbl_chng_cnt,\n+\t\t\t\t\trte_memory_order_acquire);\n \n \t\t/* Compare signatures and prefetch key slot of first hit */\n \t\tfor (i = 0; i < num_keys; i++) {\n@@ -2121,9 +2121,9 @@ struct rte_hash *\n \t\t\t\t\t\t__builtin_ctzl(prim_hitmask[i])\n \t\t\t\t\t\t>> 1;\n \t\t\t\tuint32_t key_idx =\n-\t\t\t\t__atomic_load_n(\n+\t\t\t\trte_atomic_load_explicit(\n \t\t\t\t\t&primary_bkt[i]->key_idx[hit_index],\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\t\t\trte_memory_order_acquire);\n \t\t\t\tconst struct rte_hash_key *key_slot =\n \t\t\t\t\t(const struct rte_hash_key *)(\n \t\t\t\t\t(const char *)h->key_store +\n@@ -2137,9 +2137,9 @@ struct rte_hash *\n \t\t\t\t\t!rte_hash_cmp_eq(\n \t\t\t\t\t\tkey_slot->key, keys[i], h)) {\n \t\t\t\t\tif (data != NULL)\n-\t\t\t\t\t\tdata[i] = __atomic_load_n(\n+\t\t\t\t\t\tdata[i] = rte_atomic_load_explicit(\n \t\t\t\t\t\t\t&key_slot->pdata,\n-\t\t\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\t\t\t\t\trte_memory_order_acquire);\n \n \t\t\t\t\thits |= 1ULL << i;\n \t\t\t\t\tpositions[i] = key_idx - 1;\n@@ -2153,9 +2153,9 @@ struct rte_hash *\n \t\t\t\t\t\t__builtin_ctzl(sec_hitmask[i])\n \t\t\t\t\t\t>> 1;\n \t\t\t\tuint32_t key_idx =\n-\t\t\t\t__atomic_load_n(\n+\t\t\t\trte_atomic_load_explicit(\n \t\t\t\t\t&secondary_bkt[i]->key_idx[hit_index],\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\t\t\trte_memory_order_acquire);\n \t\t\t\tconst struct rte_hash_key *key_slot =\n \t\t\t\t\t(const struct rte_hash_key *)(\n \t\t\t\t\t(const char *)h->key_store +\n@@ -2170,9 +2170,9 @@ struct rte_hash *\n \t\t\t\t\t!rte_hash_cmp_eq(\n \t\t\t\t\t\tkey_slot->key, keys[i], h)) {\n \t\t\t\t\tif (data != NULL)\n-\t\t\t\t\t\tdata[i] = __atomic_load_n(\n+\t\t\t\t\t\tdata[i] = rte_atomic_load_explicit(\n \t\t\t\t\t\t\t&key_slot->pdata,\n-\t\t\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\t\t\t\t\t\trte_memory_order_acquire);\n \n \t\t\t\t\thits |= 1ULL << i;\n \t\t\t\t\tpositions[i] = key_idx - 1;\n@@ -2216,7 +2216,7 @@ struct rte_hash *\n \t\t/* The loads of sig_current in compare_signatures\n \t\t * should not move below the load from tbl_chng_cnt.\n \t\t */\n-\t\t__atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\t\t__atomic_thread_fence(rte_memory_order_acquire);\n \t\t/* Re-read the table change counter to check if the\n \t\t * table has changed during search. If yes, re-do\n \t\t * the search.\n@@ -2225,8 +2225,8 @@ struct rte_hash *\n \t\t * key index will make sure that it does not get\n \t\t * hoisted.\n \t\t */\n-\t\tcnt_a = __atomic_load_n(h->tbl_chng_cnt,\n-\t\t\t\t\t__ATOMIC_ACQUIRE);\n+\t\tcnt_a = rte_atomic_load_explicit(h->tbl_chng_cnt,\n+\t\t\t\t\trte_memory_order_acquire);\n \t} while (cnt_b != cnt_a);\n \n \tif (hit_mask != NULL)\n@@ -2498,8 +2498,8 @@ struct rte_hash *\n \tidx = *next % RTE_HASH_BUCKET_ENTRIES;\n \n \t/* If current position is empty, go to the next one */\n-\twhile ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],\n-\t\t\t\t\t__ATOMIC_ACQUIRE)) == EMPTY_SLOT) {\n+\twhile ((position = rte_atomic_load_explicit(&h->buckets[bucket_idx].key_idx[idx],\n+\t\t\t\t\trte_memory_order_acquire)) == EMPTY_SLOT) {\n \t\t(*next)++;\n \t\t/* End of table */\n \t\tif (*next == total_entries_main)\ndiff --git a/lib/hash/rte_cuckoo_hash.h b/lib/hash/rte_cuckoo_hash.h\nindex eb2644f..f7afc4d 100644\n--- a/lib/hash/rte_cuckoo_hash.h\n+++ b/lib/hash/rte_cuckoo_hash.h\n@@ -137,7 +137,7 @@ struct lcore_cache {\n struct rte_hash_key {\n \tunion {\n \t\tuintptr_t idata;\n-\t\tvoid *pdata;\n+\t\tRTE_ATOMIC(void *) pdata;\n \t};\n \t/* Variable key size */\n \tchar key[0];\n@@ -155,7 +155,7 @@ enum rte_hash_sig_compare_function {\n struct rte_hash_bucket {\n \tuint16_t sig_current[RTE_HASH_BUCKET_ENTRIES];\n \n-\tuint32_t key_idx[RTE_HASH_BUCKET_ENTRIES];\n+\tRTE_ATOMIC(uint32_t) key_idx[RTE_HASH_BUCKET_ENTRIES];\n \n \tuint8_t flag[RTE_HASH_BUCKET_ENTRIES];\n \n@@ -229,7 +229,7 @@ struct rte_hash {\n \t * is piggy-backed to freeing of the key index.\n \t */\n \tuint32_t *ext_bkt_to_free;\n-\tuint32_t *tbl_chng_cnt;\n+\tRTE_ATOMIC(uint32_t) *tbl_chng_cnt;\n \t/**< Indicates if the hash table changed from last read. */\n } __rte_cache_aligned;\n \n",
    "prefixes": [
        "19/21"
    ]
}