get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95074/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95074,
    "url": "http://patchwork.dpdk.org/api/patches/95074/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210630124609.8711-21-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210630124609.8711-21-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210630124609.8711-21-suanmingm@nvidia.com",
    "date": "2021-06-30T12:46:07",
    "name": "[v2,20/22] net/mlx5: support index pool none local core operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "70674b5e800437a008932f960e79c91931f7d4dc",
    "submitter": {
        "id": 1887,
        "url": "http://patchwork.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210630124609.8711-21-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17549,
            "url": "http://patchwork.dpdk.org/api/series/17549/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=17549",
            "date": "2021-06-30T12:45:47",
            "name": "net/mlx5: insertion rate optimization",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/17549/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/95074/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/95074/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C85B1A0A0F;\n\tWed, 30 Jun 2021 14:49:02 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id AB382412D8;\n\tWed, 30 Jun 2021 14:47:14 +0200 (CEST)",
            "from NAM10-BN7-obe.outbound.protection.outlook.com\n (mail-bn7nam10on2065.outbound.protection.outlook.com [40.107.92.65])\n by mails.dpdk.org (Postfix) with ESMTP id 1098E412D0\n for <dev@dpdk.org>; Wed, 30 Jun 2021 14:47:11 +0200 (CEST)",
            "from BN9PR03CA0334.namprd03.prod.outlook.com (2603:10b6:408:f6::9)\n by DM6PR12MB3227.namprd12.prod.outlook.com (2603:10b6:5:18d::14) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.20; Wed, 30 Jun\n 2021 12:47:09 +0000",
            "from BN8NAM11FT013.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:f6:cafe::6c) by BN9PR03CA0334.outlook.office365.com\n (2603:10b6:408:f6::9) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.22 via Frontend\n Transport; Wed, 30 Jun 2021 12:47:09 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT013.mail.protection.outlook.com (10.13.176.182) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4287.22 via Frontend Transport; Wed, 30 Jun 2021 12:47:08 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 30 Jun\n 2021 12:47:02 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=XfDjtCC5z7jBW+SBi8oC+/voxiI4Bp5vRIFZh/nQWBrwZiKGLvX487iz+u10oSfEKCEXDA/PCd9ZGBtjF+SI1B7DVYHFYK2AqiIAznQ+gY91XP9lDuQlFGnAJezZuhLwtTrv8msHPvk3RhKJz0/4O/YZrq7tfJ97cNeoEiFTlpv5X+RtG2hL3rvnbSvO7NgWHnmE97zKKZblEE90PrrEGf0XXlNoWM0FrU+BQCHiroDbp4DH71wO5Eram7eM+BmyjNVLak8R6esPgaQtiC40exR15KI0ELDRnaCYX3ulrJ/omslta0/KHS87of/fcs2IQ5zlhZyKdxEsqbVJoQ1Ppg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=XGXzZ+JG/VwiWi8hxkFosCFLnf9Eo1qxNqpqbKvOc+g=;\n b=fQfJ5kdIXFgbTv736cW3XEVo9tYl7yfGRHmYDlTTf9DJF1KccIi/JL1AR/VM2o0a0r/vheAaKBqslJzZM7INRfZpqC3zua3UTnXgHiRfR661fbmiUq9z5lWJQkXZ4WfD4NT8zURjaTlrBCqT8dKl8yAeXdRLwHYi3fCrryX4n6n2Ot0llxVJorbqRGqdifT50KAvY014cE1f/8NCHN/T9m629hZ8iaFV3PELRTsizQGmAs59xY1JkiRihSPc+At8KuVZZ4usHgIlSCgACmhePPWIaHUorOiZebxeJLeqcGXU5LLoiHglDVtxoQ5rKmTfbEEa56vvuGfKRY0gzvnTsQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=XGXzZ+JG/VwiWi8hxkFosCFLnf9Eo1qxNqpqbKvOc+g=;\n b=BVWXirP0Cf0O3K2j2c1iZ7Fb+bKLoTx0UDp696D8kxBhU6XUDxXxyR4Y+QSqiftiut3FqPUklue7Mflpxi3T6ukXu4LB5jbHB1Q1uJc8NhbvnGPJ9w5nHDNQTgNQBSdm+UO14MvP3jt3LDDeHgda59wTZ9KkLwmNLbfrVyNmGQ6o9xTNq1iNMs+1xwHLsrfqJyj0L03/4sOknPFoI3dtMalA5tiz0w5mbQccCCGJmG2x2CUh7bhqYJirhMfTeB3VFM4eV+alr+Ht41IL1V/tyAMxXXUjD+InyxYbXnsN+vfXsBOTzXnfog85t2f66ls4xmw0vTXU7P7dRIQerKkZ/g==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Wed, 30 Jun 2021 15:46:07 +0300",
        "Message-ID": "<20210630124609.8711-21-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210630124609.8711-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210630124609.8711-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "fb0b1d2c-2645-4468-dcea-08d93bc52c41",
        "X-MS-TrafficTypeDiagnostic": "DM6PR12MB3227:",
        "X-Microsoft-Antispam-PRVS": "\n <DM6PR12MB32279E7F3686D84AD1495DBAC1019@DM6PR12MB3227.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:72;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n QWbWiVq3aM7ASQlvY/6RyDIoHZzv0NVrePio72IM5lonzO9AucnLvGMCij6/j0xmTd1KW8I3unE0RBB1a/ylHVuWh9ED0EOJXiT1TTSS20YjIbOIcry1K2AZ8qNIDj+EX/rDvKYuL51rZPa5BPxTFGHQI8q1V1cJqx3P4wn1uIu+UFjn13kPSIUkoJ/Bh+pBNB7u2rcXGaH/Z97Z8MLWU/8NHXMl7FFDrPQleRH0UqM+Vdtk2utjGAqgdoaE2suZpKbT1OQ1cs3DFZ91+YXHagWZRMGxaFfO/3346E00nshiBhPbPjwdsnOABv8wukFiwtjO4zegn5rNujN27aEQKKHZ8fkl1ollCXD24t/GIMAWMI+17qjwu8Sv/qmFYYwYLBRoPuSCtSCjcMnhi5PXGddOLB/ScN+0CurK778yux5as26D2zbyUfFgEXCzj/xNmmBMzXLI300afrjausMnQMKHAgqhb3naUS3vNhQvOedkevNZBzdHQehUSxd54bIYo/ZQaPqvbsIocafQbNPERbR7ly1l9vR/NmSczPFNRIgVtIN/Jb86WCSrhQj6FnNijgerb2xaVybXDwvNZ8GOM11i9ch2ud5txCUieq3Z1dox2ySdtHQtGMuzmCHXhqD5E4OjSXE8kDNYBGvEM+ZjQsQA7T/IaWMUEz+qjP73nGy5CHLflYBW3P3AekZ+vQNeruU4Vv0HSNxPpy0bscMNsA==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(346002)(136003)(396003)(39860400002)(376002)(46966006)(36840700001)(7696005)(5660300002)(2616005)(6636002)(2906002)(478600001)(47076005)(70206006)(8936002)(36756003)(86362001)(36860700001)(1076003)(8676002)(82310400003)(70586007)(26005)(426003)(110136005)(55016002)(336012)(316002)(4326008)(83380400001)(7636003)(6286002)(356005)(54906003)(186003)(82740400003)(16526019);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Jun 2021 12:47:08.4137 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n fb0b1d2c-2645-4468-dcea-08d93bc52c41",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT013.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR12MB3227",
        "Subject": "[dpdk-dev] [PATCH v2 20/22] net/mlx5: support index pool none local\n core operations",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This commit supports the index pool none local core operations with\nan extra cache.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5_utils.c | 75 +++++++++++++++++++++++++----------\n drivers/net/mlx5/mlx5_utils.h |  3 +-\n 2 files changed, 56 insertions(+), 22 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c\nindex 94abe79860..c34d6d62a8 100644\n--- a/drivers/net/mlx5/mlx5_utils.c\n+++ b/drivers/net/mlx5/mlx5_utils.c\n@@ -114,6 +114,7 @@ mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)\n \t\t\tmlx5_trunk_idx_offset_get(pool, TRUNK_MAX_IDX + 1);\n \tif (!cfg->per_core_cache)\n \t\tpool->free_list = TRUNK_INVALID;\n+\trte_spinlock_init(&pool->nlcore_lock);\n \treturn pool;\n }\n \n@@ -354,20 +355,14 @@ mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx)\n }\n \n static void *\n-mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)\n+_mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)\n {\n \tstruct mlx5_indexed_trunk *trunk;\n \tstruct mlx5_indexed_cache *lc;\n \tuint32_t trunk_idx;\n \tuint32_t entry_idx;\n-\tint cidx;\n \n \tMLX5_ASSERT(idx);\n-\tcidx = rte_lcore_index(rte_lcore_id());\n-\tif (unlikely(cidx == -1)) {\n-\t\trte_errno = ENOTSUP;\n-\t\treturn NULL;\n-\t}\n \tlc = mlx5_ipool_update_global_cache(pool, cidx);\n \tidx -= 1;\n \ttrunk_idx = mlx5_trunk_idx_get(pool, idx);\n@@ -378,15 +373,27 @@ mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)\n }\n \n static void *\n-mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)\n+mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)\n {\n+\tvoid *entry;\n \tint cidx;\n \n \tcidx = rte_lcore_index(rte_lcore_id());\n \tif (unlikely(cidx == -1)) {\n-\t\trte_errno = ENOTSUP;\n-\t\treturn NULL;\n+\t\tcidx = RTE_MAX_LCORE;\n+\t\trte_spinlock_lock(&pool->nlcore_lock);\n \t}\n+\tentry = _mlx5_ipool_get_cache(pool, cidx, idx);\n+\tif (unlikely(cidx == RTE_MAX_LCORE))\n+\t\trte_spinlock_unlock(&pool->nlcore_lock);\n+\treturn entry;\n+}\n+\n+\n+static void *\n+_mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx,\n+\t\t\t uint32_t *idx)\n+{\n \tif (unlikely(!pool->cache[cidx])) {\n \t\tpool->cache[cidx] = pool->cfg.malloc(MLX5_MEM_ZERO,\n \t\t\tsizeof(struct mlx5_ipool_per_lcore) +\n@@ -399,29 +406,40 @@ mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)\n \t} else if (pool->cache[cidx]->len) {\n \t\tpool->cache[cidx]->len--;\n \t\t*idx = pool->cache[cidx]->idx[pool->cache[cidx]->len];\n-\t\treturn mlx5_ipool_get_cache(pool, *idx);\n+\t\treturn _mlx5_ipool_get_cache(pool, cidx, *idx);\n \t}\n \t/* Not enough idx in global cache. Keep fetching from global. */\n \t*idx = mlx5_ipool_allocate_from_global(pool, cidx);\n \tif (unlikely(!(*idx)))\n \t\treturn NULL;\n-\treturn mlx5_ipool_get_cache(pool, *idx);\n+\treturn _mlx5_ipool_get_cache(pool, cidx, *idx);\n }\n \n-static void\n-mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)\n+static void *\n+mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)\n {\n+\tvoid *entry;\n \tint cidx;\n+\n+\tcidx = rte_lcore_index(rte_lcore_id());\n+\tif (unlikely(cidx == -1)) {\n+\t\tcidx = RTE_MAX_LCORE;\n+\t\trte_spinlock_lock(&pool->nlcore_lock);\n+\t}\n+\tentry = _mlx5_ipool_malloc_cache(pool, cidx, idx);\n+\tif (unlikely(cidx == RTE_MAX_LCORE))\n+\t\trte_spinlock_unlock(&pool->nlcore_lock);\n+\treturn entry;\n+}\n+\n+static void\n+_mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)\n+{\n \tstruct mlx5_ipool_per_lcore *ilc;\n \tstruct mlx5_indexed_cache *gc, *olc = NULL;\n \tuint32_t reclaim_num = 0;\n \n \tMLX5_ASSERT(idx);\n-\tcidx = rte_lcore_index(rte_lcore_id());\n-\tif (unlikely(cidx == -1)) {\n-\t\trte_errno = ENOTSUP;\n-\t\treturn;\n-\t}\n \t/*\n \t * When index was allocated on core A but freed on core B. In this\n \t * case check if local cache on core B was allocated before.\n@@ -464,6 +482,21 @@ mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)\n \tpool->cache[cidx]->len++;\n }\n \n+static void\n+mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)\n+{\n+\tint cidx;\n+\n+\tcidx = rte_lcore_index(rte_lcore_id());\n+\tif (unlikely(cidx == -1)) {\n+\t\tcidx = RTE_MAX_LCORE;\n+\t\trte_spinlock_lock(&pool->nlcore_lock);\n+\t}\n+\t_mlx5_ipool_free_cache(pool, cidx, idx);\n+\tif (unlikely(cidx == RTE_MAX_LCORE))\n+\t\trte_spinlock_unlock(&pool->nlcore_lock);\n+}\n+\n void *\n mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)\n {\n@@ -643,7 +676,7 @@ mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)\n \tMLX5_ASSERT(pool);\n \tmlx5_ipool_lock(pool);\n \tif (pool->cfg.per_core_cache) {\n-\t\tfor (i = 0; i < RTE_MAX_LCORE; i++) {\n+\t\tfor (i = 0; i <= RTE_MAX_LCORE; i++) {\n \t\t\t/*\n \t\t\t * Free only old global cache. Pool gc will be\n \t\t\t * freed at last.\n@@ -712,7 +745,7 @@ mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)\n \tfor (i = 0; i < gc->len; i++)\n \t\trte_bitmap_clear(ibmp, gc->idx[i] - 1);\n \t/* Clear core cache. */\n-\tfor (i = 0; i < RTE_MAX_LCORE; i++) {\n+\tfor (i = 0; i < RTE_MAX_LCORE + 1; i++) {\n \t\tstruct mlx5_ipool_per_lcore *ilc = pool->cache[i];\n \n \t\tif (!ilc)\ndiff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h\nindex 7d9b64c877..060c52f022 100644\n--- a/drivers/net/mlx5/mlx5_utils.h\n+++ b/drivers/net/mlx5/mlx5_utils.h\n@@ -248,6 +248,7 @@ struct mlx5_ipool_per_lcore {\n struct mlx5_indexed_pool {\n \tstruct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */\n \trte_spinlock_t rsz_lock; /* Pool lock for multiple thread usage. */\n+\trte_spinlock_t nlcore_lock;\n \t/* Dim of trunk pointer array. */\n \tunion {\n \t\tstruct {\n@@ -259,7 +260,7 @@ struct mlx5_indexed_pool {\n \t\tstruct {\n \t\t\tstruct mlx5_indexed_cache *gc;\n \t\t\t/* Global cache. */\n-\t\t\tstruct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE];\n+\t\t\tstruct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];\n \t\t\t/* Local cache. */\n \t\t\tstruct rte_bitmap *ibmp;\n \t\t\tvoid *bmp_mem;\n",
    "prefixes": [
        "v2",
        "20/22"
    ]
}