get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95063/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95063,
    "url": "http://patchwork.dpdk.org/api/patches/95063/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210630124609.8711-10-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210630124609.8711-10-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210630124609.8711-10-suanmingm@nvidia.com",
    "date": "2021-06-30T12:45:56",
    "name": "[v2,09/22] net/mlx5: manage list cache entries release",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a09f1ffd89cb8452b937598996ce2680b28a685e",
    "submitter": {
        "id": 1887,
        "url": "http://patchwork.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210630124609.8711-10-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17549,
            "url": "http://patchwork.dpdk.org/api/series/17549/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=17549",
            "date": "2021-06-30T12:45:47",
            "name": "net/mlx5: insertion rate optimization",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/17549/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/95063/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/95063/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E617DA0A0F;\n\tWed, 30 Jun 2021 14:47:43 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 48A85412A1;\n\tWed, 30 Jun 2021 14:46:58 +0200 (CEST)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam11on2082.outbound.protection.outlook.com [40.107.223.82])\n by mails.dpdk.org (Postfix) with ESMTP id 8B2A241296\n for <dev@dpdk.org>; Wed, 30 Jun 2021 14:46:54 +0200 (CEST)",
            "from BN6PR1701CA0019.namprd17.prod.outlook.com\n (2603:10b6:405:15::29) by DM4PR12MB5165.namprd12.prod.outlook.com\n (2603:10b6:5:394::9) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.21; Wed, 30 Jun\n 2021 12:46:53 +0000",
            "from BN8NAM11FT045.eop-nam11.prod.protection.outlook.com\n (2603:10b6:405:15:cafe::ab) by BN6PR1701CA0019.outlook.office365.com\n (2603:10b6:405:15::29) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.22 via Frontend\n Transport; Wed, 30 Jun 2021 12:46:53 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT045.mail.protection.outlook.com (10.13.177.47) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4287.22 via Frontend Transport; Wed, 30 Jun 2021 12:46:52 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 30 Jun\n 2021 12:46:42 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Q+/gIVLB89DoZvh/CKTQqXAG9ccb+LHdHOzSAxHujwG3PYHXMvBHpXDrbYaPVewTFqmwmnMSvcp1hD1M6I5EOXfPeN6l95KULpumql2QbeAdc/7C5AB7UR+23xDO7sqI7pxmcsx7EOymWk3O085YiqKgB8DXQZJCBpV+aSMq9eQHDo7c2bsEDpvsKSkJu4SiD6W3Tf7Xf2umHX6bcX+VLn4qT8tpe2JIzMz6Ilogeb40B/WLHJQGt41UhmLdHmlieRa7+mkKzV58mQLyCymADYHfyNt/aof6jXSR/+fjlj9Q+uapvLHRC9Qmm2ZwLNpzd6bI95iJ8k16pPGXJUCxaA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=UyrDB+s/v1ml5LIIf8QAM9Tnnii4/e5908TuWg4LkmE=;\n b=eODxjKwQFl6jqbDlwmfvzCiJVDdl+BuHE5jBm2ZMVPn3e3VmpRFW5hFLmca2ZAQ5bzxsh1yZ60pGWp9GQP4Te4rD0ffOzRZTCRQtrkAh0arEV3sDaJ/gPmOSCkEkHmxFTo8bu0gf+hF5p3fh1FWo21l0hJfksywCOAbUk0FNLD+BjvhcvjcGMpFF0fMLFUrBLhoRZYOQ+aIB4vhlM22D1TbKgDjoM85JisriiToe6JDGuUa/ArT3srbL4te8Wblj61foEdZjZiFnHSAvZMutn4oyxJ752BRrRn2rBJCsWByYexCZKY8axRQwt2hD1+m9aI05Vz9s1Hr4aVt6pwgiUg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=UyrDB+s/v1ml5LIIf8QAM9Tnnii4/e5908TuWg4LkmE=;\n b=Ks2D+/HJ/e7RnCSarU2ONucMnOVua+QzpuLq3XVFec/kWDfoBMPztekdYQ+RXxifewH0FKleKrw1DYiKW3pw3hXoIHhYqPbICIqMHuyesRsEXMq7SxsmJtSyRuBERbWyfpdk+v4AE8JnSz656x3p9ZKFnShKDPb1If+ehcI2Tt3YN0eQCFct8gKcNdukagMhduP6zFyoUzsIkXrz2ZFwN7my68Cr7wCybVbl1aA9u48PfypUTwBfqUFT8m5BEscpc8jRbpx0IAEtjGDIdreezFIfrUGQuAOvGjFnCxZyWbT0zr/0M4Vokt8pRcflndqD/eYIzAIJ7YDez/LBxUoPZQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Wed, 30 Jun 2021 15:45:56 +0300",
        "Message-ID": "<20210630124609.8711-10-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210630124609.8711-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210630124609.8711-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "fb36a420-ce44-42b7-4887-08d93bc522e4",
        "X-MS-TrafficTypeDiagnostic": "DM4PR12MB5165:",
        "X-Microsoft-Antispam-PRVS": "\n <DM4PR12MB51659388CD2E38B0A02373DFC1019@DM4PR12MB5165.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:326;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 0hCdPOTgOWOe4JvrOjQiM/kApc+4RvR46cAAQ5yg9+UzJQ9hiOt1nR7LWjtkoQniG6PDUW3dvhd7UcdTUg4SChc1IxfJLs7o6TpoH3E3zkQUWgJ1TUrdALmd4gjmiY/FxEY27zuZyScM0pj6E+Bmx6UylP/yLHKw5A75oJVCfwitMhJcx0p2tOqEJ0QpI04JXvynP4fLh08mYIwrSigt+zKvYzo0GHopJ1XX05wcH/i+8vF9IBl27R9YTCaTZfhkRkOlFEmYq2l4q/JGG3R/DS02EDgSlYOb3iOnWD8Ef30px8MeIZ9BwR4VBpi5WkkapFmI4QXVQwYCJkD7vfUpD2kAupcPrRA0iL+LpDEskchJxQ2FVZGQMB/eo+N2tf2fwrpwv9DONLyfMmW95+YDWw2aAyRcU4rIW/4RssRTUB5jElfEPw3UUyKRyowTtdkapNJnyRj0OcZdzgvNQD55ETx/e53et73SDrxVgHmP7ZynqxuXdoZhGDGPh/rXSDmEApyxJ949FVHYEfwxoPYHux3g9zKuM0EZvNc5ZN/DK9x1z0fpb8p5GiVvrqPytTbHLBrAuHEW64ARMTXvefPHVWRRkuJvKGxyl77/zgFf3t6e+MJ4eM0FKIug0HAoaqY1HL78PNXOiBF2jWWbduoKbaBMq0uKUeF/lSFxjJaPjn3AH23Cj314ddqP1ftdnPufZfZ9ZsHafUGSA0KPhEV66g==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(39860400002)(396003)(136003)(376002)(346002)(46966006)(36840700001)(47076005)(2616005)(6286002)(6636002)(82310400003)(36860700001)(16526019)(7636003)(426003)(316002)(4326008)(186003)(86362001)(478600001)(110136005)(5660300002)(54906003)(70586007)(70206006)(8936002)(26005)(1076003)(356005)(336012)(82740400003)(6666004)(7696005)(83380400001)(8676002)(55016002)(2906002)(36756003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Jun 2021 12:46:52.7899 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n fb36a420-ce44-42b7-4887-08d93bc522e4",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT045.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB5165",
        "Subject": "[dpdk-dev] [PATCH v2 09/22] net/mlx5: manage list cache entries\n release",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Matan Azrad <matan@nvidia.com>\n\nWhen a cache entry is allocated by lcore A and is released by lcore B,\nthe driver should synchronize the cache list access of lcore A.\n\nThe design decision is to manage a counter per lcore cache that will be\nincreased atomically when the non-original lcore decreases the reference\ncounter of cache entry to 0.\n\nIn list register operation, before the running lcore starts a lookup in\nits cache, it will check the counter in order to free invalid entries in\nits cache.\n\nSigned-off-by: Matan Azrad <matan@nvidia.com>\nAcked-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5_utils.c | 79 +++++++++++++++++++++++------------\n drivers/net/mlx5/mlx5_utils.h |  2 +\n 2 files changed, 54 insertions(+), 27 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c\nindex 772b352af5..7cdf44dcf7 100644\n--- a/drivers/net/mlx5/mlx5_utils.c\n+++ b/drivers/net/mlx5/mlx5_utils.c\n@@ -47,36 +47,25 @@ __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)\n \tuint32_t ret;\n \n \twhile (entry != NULL) {\n-\t\tstruct mlx5_list_entry *nentry = LIST_NEXT(entry, next);\n-\n-\t\tif (list->cb_match(list, entry, ctx)) {\n-\t\t\tif (lcore_index < RTE_MAX_LCORE) {\n+\t\tif (list->cb_match(list, entry, ctx) == 0) {\n+\t\t\tif (reuse) {\n+\t\t\t\tret = __atomic_add_fetch(&entry->ref_cnt, 1,\n+\t\t\t\t\t\t\t __ATOMIC_ACQUIRE) - 1;\n+\t\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p ref: %u.\",\n+\t\t\t\t\tlist->name, (void *)entry,\n+\t\t\t\t\tentry->ref_cnt);\n+\t\t\t} else if (lcore_index < RTE_MAX_LCORE) {\n \t\t\t\tret = __atomic_load_n(&entry->ref_cnt,\n \t\t\t\t\t\t      __ATOMIC_ACQUIRE);\n-\t\t\t\tif (ret == 0) {\n-\t\t\t\t\tLIST_REMOVE(entry, next);\n-\t\t\t\t\tlist->cb_clone_free(list, entry);\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\tentry = nentry;\n-\t\t\tcontinue;\n-\t\t}\n-\t\tif (reuse) {\n-\t\t\tret = __atomic_add_fetch(&entry->ref_cnt, 1,\n-\t\t\t\t\t\t __ATOMIC_ACQUIRE);\n-\t\t\tif (ret == 1u) {\n-\t\t\t\t/* Entry was invalid before, free it. */\n-\t\t\t\tLIST_REMOVE(entry, next);\n-\t\t\t\tlist->cb_clone_free(list, entry);\n-\t\t\t\tentry = nentry;\n-\t\t\t\tcontinue;\n \t\t\t}\n-\t\t\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p ref++: %u.\",\n-\t\t\t\tlist->name, (void *)entry, entry->ref_cnt);\n+\t\t\tif (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))\n+\t\t\t\treturn entry;\n+\t\t\tif (reuse && ret == 0)\n+\t\t\t\tentry->ref_cnt--; /* Invalid entry. */\n \t\t}\n-\t\tbreak;\n+\t\tentry = LIST_NEXT(entry, next);\n \t}\n-\treturn entry;\n+\treturn NULL;\n }\n \n struct mlx5_list_entry *\n@@ -105,10 +94,31 @@ mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,\n \t\treturn NULL;\n \tlentry->ref_cnt = 1u;\n \tlentry->gentry = gentry;\n+\tlentry->lcore_idx = (uint32_t)lcore_index;\n \tLIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);\n \treturn lentry;\n }\n \n+static void\n+__list_cache_clean(struct mlx5_list *list, int lcore_index)\n+{\n+\tstruct mlx5_list_cache *c = &list->cache[lcore_index];\n+\tstruct mlx5_list_entry *entry = LIST_FIRST(&c->h);\n+\tuint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,\n+\t\t\t\t\t       __ATOMIC_RELAXED);\n+\n+\twhile (inv_cnt != 0 && entry != NULL) {\n+\t\tstruct mlx5_list_entry *nentry = LIST_NEXT(entry, next);\n+\n+\t\tif (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {\n+\t\t\tLIST_REMOVE(entry, next);\n+\t\t\tlist->cb_clone_free(list, entry);\n+\t\t\tinv_cnt--;\n+\t\t}\n+\t\tentry = nentry;\n+\t}\n+}\n+\n struct mlx5_list_entry *\n mlx5_list_register(struct mlx5_list *list, void *ctx)\n {\n@@ -122,6 +132,8 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)\n \t\trte_errno = ENOTSUP;\n \t\treturn NULL;\n \t}\n+\t/* 0. Free entries that was invalidated by other lcores. */\n+\t__list_cache_clean(list, lcore_index);\n \t/* 1. Lookup in local cache. */\n \tlocal_entry = __list_lookup(list, lcore_index, ctx, true);\n \tif (local_entry)\n@@ -147,6 +159,7 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)\n \tentry->ref_cnt = 1u;\n \tlocal_entry->ref_cnt = 1u;\n \tlocal_entry->gentry = entry;\n+\tlocal_entry->lcore_idx = (uint32_t)lcore_index;\n \trte_rwlock_write_lock(&list->lock);\n \t/* 4. Make sure the same entry was not created before the write lock. */\n \tif (unlikely(prev_gen_cnt != list->gen_cnt)) {\n@@ -169,8 +182,8 @@ mlx5_list_register(struct mlx5_list *list, void *ctx)\n \trte_rwlock_write_unlock(&list->lock);\n \tLIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);\n \t__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);\n-\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\",\n-\t\tlist->name, (void *)entry, entry->ref_cnt);\n+\tDRV_LOG(DEBUG, \"mlx5 list %s entry %p new: %u.\", list->name,\n+\t\t(void *)entry, entry->ref_cnt);\n \treturn local_entry;\n }\n \n@@ -179,9 +192,21 @@ mlx5_list_unregister(struct mlx5_list *list,\n \t\t      struct mlx5_list_entry *entry)\n {\n \tstruct mlx5_list_entry *gentry = entry->gentry;\n+\tint lcore_idx;\n \n \tif (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_ACQUIRE) != 0)\n \t\treturn 1;\n+\tlcore_idx = rte_lcore_index(rte_lcore_id());\n+\tMLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);\n+\tif (entry->lcore_idx == (uint32_t)lcore_idx) {\n+\t\tLIST_REMOVE(entry, next);\n+\t\tlist->cb_clone_free(list, entry);\n+\t} else if (likely(lcore_idx != -1)) {\n+\t\t__atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,\n+\t\t\t\t   __ATOMIC_RELAXED);\n+\t} else {\n+\t\treturn 0;\n+\t}\n \tif (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_ACQUIRE) != 0)\n \t\treturn 1;\n \trte_rwlock_write_lock(&list->lock);\ndiff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h\nindex 684d1e8a2a..ffa9cd5142 100644\n--- a/drivers/net/mlx5/mlx5_utils.h\n+++ b/drivers/net/mlx5/mlx5_utils.h\n@@ -310,11 +310,13 @@ struct mlx5_list;\n struct mlx5_list_entry {\n \tLIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */\n \tuint32_t ref_cnt; /* 0 means, entry is invalid. */\n+\tuint32_t lcore_idx;\n \tstruct mlx5_list_entry *gentry;\n };\n \n struct mlx5_list_cache {\n \tLIST_HEAD(mlx5_list_head, mlx5_list_entry) h;\n+\tuint32_t inv_cnt; /* Invalid entries counter. */\n } __rte_cache_aligned;\n \n /**\n",
    "prefixes": [
        "v2",
        "09/22"
    ]
}