get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/137445/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 137445,
    "url": "http://patchwork.dpdk.org/api/patches/137445/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20240228170046.176600-5-dsosnowski@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240228170046.176600-5-dsosnowski@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240228170046.176600-5-dsosnowski@nvidia.com",
    "date": "2024-02-28T17:00:39",
    "name": "[04/11] net/mlx5: skip the unneeded resource index allocation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "77e87bfbe1dbc0d0194db9c6a6d29705900ac362",
    "submitter": {
        "id": 2386,
        "url": "http://patchwork.dpdk.org/api/people/2386/?format=api",
        "name": "Dariusz Sosnowski",
        "email": "dsosnowski@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20240228170046.176600-5-dsosnowski@nvidia.com/mbox/",
    "series": [
        {
            "id": 31278,
            "url": "http://patchwork.dpdk.org/api/series/31278/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=31278",
            "date": "2024-02-28T17:00:35",
            "name": "net/mlx5: flow insertion performance improvements",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/31278/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/137445/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/137445/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CB12D43C2C;\n\tWed, 28 Feb 2024 18:02:26 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 479A542F23;\n\tWed, 28 Feb 2024 18:01:53 +0100 (CET)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2041.outbound.protection.outlook.com [40.107.236.41])\n by mails.dpdk.org (Postfix) with ESMTP id 0570742F23\n for <dev@dpdk.org>; Wed, 28 Feb 2024 18:01:50 +0100 (CET)",
            "from MN2PR07CA0028.namprd07.prod.outlook.com (2603:10b6:208:1a0::38)\n by PH7PR12MB9256.namprd12.prod.outlook.com (2603:10b6:510:2fe::21)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7316.36; Wed, 28 Feb\n 2024 17:01:45 +0000",
            "from BL02EPF0001A106.namprd05.prod.outlook.com\n (2603:10b6:208:1a0:cafe::2c) by MN2PR07CA0028.outlook.office365.com\n (2603:10b6:208:1a0::38) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7339.27 via Frontend\n Transport; Wed, 28 Feb 2024 17:01:45 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n BL02EPF0001A106.mail.protection.outlook.com (10.167.241.139) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7292.25 via Frontend Transport; Wed, 28 Feb 2024 17:01:45 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 28 Feb\n 2024 09:01:14 -0800",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Wed, 28 Feb\n 2024 09:01:11 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=S/Ri3HM4FNv6OYanW4gg/l7FXnYgkKTq3+kEcw0bCoGcTM/vYDx1hn6m1QGAHcpQA69lpLyvytN6pNfAjrFtcBbJF76aeuQSye3QJwoC1wZMNL+ZbEmUeLcUNYmTNg7ZImKAtdMWnDtZ1eB4HutqNWlqdPGIO4PRb+mnxstbuzRotC2m5DGy+3bXXw8A1SoLBu3RPBqxqNOA2HmETvHPspUlH5K8SUp2bzv4Dybms58A13x4NKLWUvh+xm9UFwRjyne8MK35VZBxFI1kzKTJ0nLJWMBdPX0Mc8u0dcI4y79cjuYrCDRzRn6wkfhaDcKRKamwGIl/5+nF8uLklXaN2g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=uykZfzmBKZKNlUtgIDo1KqXYFnE12NthjW136auWHYA=;\n b=ZBX6EpE6yvMlFNnBCvnd9WDcbukokROAdB6dOZJV/5ajNaFOsM5tTgKre8IVOBksuWlc+w5VV/zEay6w3hXOiiluXbswDpvrEKV9EoBCRrpkqKPuBIaunasZ5pPYDx8Cc1nknOHaUIdeUAie5clkpUsaqkTJoXo3i2j26+ZWIF2ndLeFf1y3NqUnRqZv1BygfAEpQ7nHdLPQKczH9J9+EmvdC6k6R4/yyzR2fVrpLmLlZZjPUFcvuvbPeU4Q4otHfRyWOXkBtU/Al6bIQCzYkygCgVY+UvU1NPhm7SB0awLK9ZC5jKmBas+MznWQDCtn5fWTFbAMliSoNvwzwbprew==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=uykZfzmBKZKNlUtgIDo1KqXYFnE12NthjW136auWHYA=;\n b=ZNxmeNNh4imZh/BNoxQpBvM4F4s2R8VmES3683jIDHccrxspBg0GzT3jalvqBH8iFdTTpt/d7dFGzcPXuJfPwbdPatsRi/aBvnpTUyKS/+mQ/Bd4fCuQhaxcN4cND0tfrHb/f/x9a2dlHHmKl9et2afhXFYlj6f3Inwhbl338Q+JNouTPnyVEJa8wBkA8BWj60AgdUoZpsH8SuqAV915L/e24+CQa9dlXT76YgNPfpT2ZWUKoUT/fsRDCVh1WLD7uq9j/+BU27HDqY5VPxvF26S8q60rDkez36JxA+nZ7+KL/g8wTNJpk7/mi/s81jiSNJAGy1MagMrke5xs/fi85A==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Dariusz Sosnowski <dsosnowski@nvidia.com>",
        "To": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, Raslan Darawsheh <rasland@nvidia.com>, Bing Zhao\n <bingz@nvidia.com>",
        "Subject": "[PATCH 04/11] net/mlx5: skip the unneeded resource index allocation",
        "Date": "Wed, 28 Feb 2024 18:00:39 +0100",
        "Message-ID": "<20240228170046.176600-5-dsosnowski@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "References": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BL02EPF0001A106:EE_|PH7PR12MB9256:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "90c4eb60-55fb-47d0-e117-08dc387ef1ee",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n Lvcbh6h06HS4uII2HKZnL/Cj3Wkua21CaUDCUCDPEPI1oTqct0rfINed6t7FxsKJwnsTx9pq9Ku6PoArqMCnxf5a1Y4OZ/amGg2TjQNQr3UmSJhTPKxkR+sYMJNwcvB7Mj/TQEK0x4F8/oYZb8GiFSPJloG0V13AyEN0/iEoo1jGkY9DpQUohsfLLhxo3elCIR3GDITpEVtb+KfFlDBO3JAO11SRg38m7jjDFKZqd6vtpzuo03Jp61/vsuKyZnISgIHi+SrOKVCBFEY0tA1XRQLhnAPpVtBdJ7HRG5qQ2Pce/6hB5o7YV6rdATOqq2TfdzD1H8EtbMSWlRqGfORCBEvnmKEBMjm9v3MAWlpImvCBFG2EpiP5+4ULvl9QairICYuNpzQWvaPPRBqL5i5thkAUcwQ03tDfRXGDbDvMpaGrOspBzUT0l4XnwWS5tyHMpiJgqjYkgEmpRYCCvseziGyJK9zNopXvHJXEPDGQj1bdZctk/sQIvyYXyKHXKSCxM/ZvRdo6Jpu6FTkBlqHFDtzRqqTEM3+kMZTkVHPghHTyTwCQGkKCNtUo059FVHgq4xvm1aL8WjHKrx7/I6nmfoVg4HTQZlKoDwrJh6KgsakhTVKaqGsiI1nrmSsyzq66mREBEaVt7BUnPMWnvK3qS/ZxYQ9upYRYRigCEEctFj66QWIY9UWZhH29cMytAev1pITS8IaPafN9Hf1P9738HR/kw1N7DXHMmKfgY7SFLx/7/gigFAi5n1PcQcJUbvYh",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(230273577357003)(82310400014)(36860700004); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Feb 2024 17:01:45.3423 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 90c4eb60-55fb-47d0-e117-08dc387ef1ee",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BL02EPF0001A106.namprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH7PR12MB9256",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Bing Zhao <bingz@nvidia.com>\n\nThe resource index was introduced to decouple the flow rule and its\nresources used by hardware steering. This is needed only when a rule\nupdate is supported.\n\nIn some cases, the update is not supported on a table(matcher). E.g.:\n  * Table is resizable\n  * FW gets involved\n  * Root table\n  * Not index based or optimized (not applicable)\n\nOr only one STE entry is required per rule. When doing an update, the\noperation is always atomic. There is no need for the extra resource\nindex either.\n\nIf the matcher doesn't support rule update or the maximal entry is\nonly 1 for this matcher, there is no need to manage the resource\nindex allocation and free from the pool.\n\nSigned-off-by: Bing Zhao <bingz@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow_hw.c | 129 +++++++++++++++++++-------------\n 1 file changed, 76 insertions(+), 53 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 442237f2b6..fcf493c771 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -3364,9 +3364,6 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tflow = mlx5_ipool_zmalloc(table->flow, &flow_idx);\n \tif (!flow)\n \t\tgoto error;\n-\tmlx5_ipool_malloc(table->resource, &res_idx);\n-\tif (!res_idx)\n-\t\tgoto error;\n \trule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);\n \t/*\n \t * Set the table here in order to know the destination table\n@@ -3375,7 +3372,14 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tflow->table = table;\n \tflow->mt_idx = pattern_template_index;\n \tflow->idx = flow_idx;\n-\tflow->res_idx = res_idx;\n+\tif (table->resource) {\n+\t\tmlx5_ipool_malloc(table->resource, &res_idx);\n+\t\tif (!res_idx)\n+\t\t\tgoto error;\n+\t\tflow->res_idx = res_idx;\n+\t} else {\n+\t\tflow->res_idx = flow_idx;\n+\t}\n \t/*\n \t * Set the job type here in order to know if the flow memory\n \t * should be freed or not when get the result from dequeue.\n@@ -3385,11 +3389,10 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tjob->user_data = user_data;\n \trule_attr.user_data = job;\n \t/*\n-\t * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices for rule\n-\t * insertion hints.\n+\t * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices\n+\t * for rule insertion hints.\n \t */\n-\tMLX5_ASSERT(res_idx > 0);\n-\tflow->rule_idx = res_idx - 1;\n+\tflow->rule_idx = flow->res_idx - 1;\n \trule_attr.rule_idx = flow->rule_idx;\n \t/*\n \t * Construct the flow actions based on the input actions.\n@@ -3432,12 +3435,12 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tif (likely(!ret))\n \t\treturn (struct rte_flow *)flow;\n error:\n-\tif (job)\n-\t\tflow_hw_job_put(priv, job, queue);\n+\tif (table->resource && res_idx)\n+\t\tmlx5_ipool_free(table->resource, res_idx);\n \tif (flow_idx)\n \t\tmlx5_ipool_free(table->flow, flow_idx);\n-\tif (res_idx)\n-\t\tmlx5_ipool_free(table->resource, res_idx);\n+\tif (job)\n+\t\tflow_hw_job_put(priv, job, queue);\n \trte_flow_error_set(error, rte_errno,\n \t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t   \"fail to create rte flow\");\n@@ -3508,9 +3511,6 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \tflow = mlx5_ipool_zmalloc(table->flow, &flow_idx);\n \tif (!flow)\n \t\tgoto error;\n-\tmlx5_ipool_malloc(table->resource, &res_idx);\n-\tif (!res_idx)\n-\t\tgoto error;\n \trule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);\n \t/*\n \t * Set the table here in order to know the destination table\n@@ -3519,7 +3519,14 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \tflow->table = table;\n \tflow->mt_idx = 0;\n \tflow->idx = flow_idx;\n-\tflow->res_idx = res_idx;\n+\tif (table->resource) {\n+\t\tmlx5_ipool_malloc(table->resource, &res_idx);\n+\t\tif (!res_idx)\n+\t\t\tgoto error;\n+\t\tflow->res_idx = res_idx;\n+\t} else {\n+\t\tflow->res_idx = flow_idx;\n+\t}\n \t/*\n \t * Set the job type here in order to know if the flow memory\n \t * should be freed or not when get the result from dequeue.\n@@ -3528,9 +3535,7 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \tjob->flow = flow;\n \tjob->user_data = user_data;\n \trule_attr.user_data = job;\n-\t/*\n-\t * Set the rule index.\n-\t */\n+\t/* Set the rule index. */\n \tflow->rule_idx = rule_index;\n \trule_attr.rule_idx = flow->rule_idx;\n \t/*\n@@ -3566,12 +3571,12 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \tif (likely(!ret))\n \t\treturn (struct rte_flow *)flow;\n error:\n-\tif (job)\n-\t\tflow_hw_job_put(priv, job, queue);\n-\tif (res_idx)\n+\tif (table->resource && res_idx)\n \t\tmlx5_ipool_free(table->resource, res_idx);\n \tif (flow_idx)\n \t\tmlx5_ipool_free(table->flow, flow_idx);\n+\tif (job)\n+\t\tflow_hw_job_put(priv, job, queue);\n \trte_flow_error_set(error, rte_errno,\n \t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t   \"fail to create rte flow\");\n@@ -3634,9 +3639,6 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\tmlx5_ipool_malloc(table->resource, &res_idx);\n-\tif (!res_idx)\n-\t\tgoto error;\n \tnf = job->upd_flow;\n \tmemset(nf, 0, sizeof(struct rte_flow_hw));\n \trule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);\n@@ -3647,7 +3649,14 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \tnf->table = table;\n \tnf->mt_idx = of->mt_idx;\n \tnf->idx = of->idx;\n-\tnf->res_idx = res_idx;\n+\tif (table->resource) {\n+\t\tmlx5_ipool_malloc(table->resource, &res_idx);\n+\t\tif (!res_idx)\n+\t\t\tgoto error;\n+\t\tnf->res_idx = res_idx;\n+\t} else {\n+\t\tnf->res_idx = of->res_idx;\n+\t}\n \t/*\n \t * Set the job type here in order to know if the flow memory\n \t * should be freed or not when get the result from dequeue.\n@@ -3657,11 +3666,11 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \tjob->user_data = user_data;\n \trule_attr.user_data = job;\n \t/*\n-\t * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices for rule\n-\t * insertion hints.\n+\t * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices\n+\t * for rule insertion hints.\n+\t * If there is only one STE, the update will be atomic by nature.\n \t */\n-\tMLX5_ASSERT(res_idx > 0);\n-\tnf->rule_idx = res_idx - 1;\n+\tnf->rule_idx = nf->res_idx - 1;\n \trule_attr.rule_idx = nf->rule_idx;\n \t/*\n \t * Construct the flow actions based on the input actions.\n@@ -3687,14 +3696,14 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \tif (likely(!ret))\n \t\treturn 0;\n error:\n+\tif (table->resource && res_idx)\n+\t\tmlx5_ipool_free(table->resource, res_idx);\n \t/* Flow created fail, return the descriptor and flow memory. */\n \tif (job)\n \t\tflow_hw_job_put(priv, job, queue);\n-\tif (res_idx)\n-\t\tmlx5_ipool_free(table->resource, res_idx);\n \treturn rte_flow_error_set(error, rte_errno,\n-\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\"fail to update rte flow\");\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"fail to update rte flow\");\n }\n \n /**\n@@ -3949,13 +3958,15 @@ hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,\n \t}\n \tif (job->type != MLX5_HW_Q_JOB_TYPE_UPDATE) {\n \t\tif (table) {\n-\t\t\tmlx5_ipool_free(table->resource, res_idx);\n+\t\t\tif (table->resource)\n+\t\t\t\tmlx5_ipool_free(table->resource, res_idx);\n \t\t\tmlx5_ipool_free(table->flow, flow->idx);\n \t\t}\n \t} else {\n \t\trte_memcpy(flow, job->upd_flow,\n \t\t\t   offsetof(struct rte_flow_hw, rule));\n-\t\tmlx5_ipool_free(table->resource, res_idx);\n+\t\tif (table->resource)\n+\t\t\tmlx5_ipool_free(table->resource, res_idx);\n \t}\n }\n \n@@ -4455,6 +4466,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \tuint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;\n \tuint32_t nb_flows = rte_align32pow2(attr->nb_flows);\n \tbool port_started = !!dev->data->dev_started;\n+\tbool rpool_needed;\n \tsize_t tbl_mem_size;\n \tint err;\n \n@@ -4492,13 +4504,6 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \ttbl->flow = mlx5_ipool_create(&cfg);\n \tif (!tbl->flow)\n \t\tgoto error;\n-\t/* Allocate rule indexed pool. */\n-\tcfg.size = 0;\n-\tcfg.type = \"mlx5_hw_table_rule\";\n-\tcfg.max_idx += priv->hw_q[0].size;\n-\ttbl->resource = mlx5_ipool_create(&cfg);\n-\tif (!tbl->resource)\n-\t\tgoto error;\n \t/* Register the flow group. */\n \tge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);\n \tif (!ge)\n@@ -4578,12 +4583,30 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \ttbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :\n \t\t    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :\n \t\t    MLX5DR_TABLE_TYPE_NIC_RX);\n+\t/*\n+\t * Only the matcher supports update and needs more than 1 WQE, an additional\n+\t * index is needed. Or else the flow index can be reused.\n+\t */\n+\trpool_needed = mlx5dr_matcher_is_updatable(tbl->matcher_info[0].matcher) &&\n+\t\t       mlx5dr_matcher_is_dependent(tbl->matcher_info[0].matcher);\n+\tif (rpool_needed) {\n+\t\t/* Allocate rule indexed pool. */\n+\t\tcfg.size = 0;\n+\t\tcfg.type = \"mlx5_hw_table_rule\";\n+\t\tcfg.max_idx += priv->hw_q[0].size;\n+\t\ttbl->resource = mlx5_ipool_create(&cfg);\n+\t\tif (!tbl->resource)\n+\t\t\tgoto res_error;\n+\t}\n \tif (port_started)\n \t\tLIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);\n \telse\n \t\tLIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);\n \trte_rwlock_init(&tbl->matcher_replace_rwlk);\n \treturn tbl;\n+res_error:\n+\tif (tbl->matcher_info[0].matcher)\n+\t\t(void)mlx5dr_matcher_destroy(tbl->matcher_info[0].matcher);\n at_error:\n \tfor (i = 0; i < nb_action_templates; i++) {\n \t\t__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);\n@@ -4601,8 +4624,6 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \t\tif (tbl->grp)\n \t\t\tmlx5_hlist_unregister(priv->sh->groups,\n \t\t\t\t\t      &tbl->grp->entry);\n-\t\tif (tbl->resource)\n-\t\t\tmlx5_ipool_destroy(tbl->resource);\n \t\tif (tbl->flow)\n \t\t\tmlx5_ipool_destroy(tbl->flow);\n \t\tmlx5_free(tbl);\n@@ -4811,12 +4832,13 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,\n \tuint32_t ridx = 1;\n \n \t/* Build ipool allocated object bitmap. */\n-\tmlx5_ipool_flush_cache(table->resource);\n+\tif (table->resource)\n+\t\tmlx5_ipool_flush_cache(table->resource);\n \tmlx5_ipool_flush_cache(table->flow);\n \t/* Check if ipool has allocated objects. */\n \tif (table->refcnt ||\n \t    mlx5_ipool_get_next(table->flow, &fidx) ||\n-\t    mlx5_ipool_get_next(table->resource, &ridx)) {\n+\t    (table->resource && mlx5_ipool_get_next(table->resource, &ridx))) {\n \t\tDRV_LOG(WARNING, \"Table %p is still in use.\", (void *)table);\n \t\treturn rte_flow_error_set(error, EBUSY,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -4838,7 +4860,8 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,\n \tif (table->matcher_info[1].matcher)\n \t\tmlx5dr_matcher_destroy(table->matcher_info[1].matcher);\n \tmlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);\n-\tmlx5_ipool_destroy(table->resource);\n+\tif (table->resource)\n+\t\tmlx5_ipool_destroy(table->resource);\n \tmlx5_ipool_destroy(table->flow);\n \tmlx5_free(table);\n \treturn 0;\n@@ -12340,11 +12363,11 @@ flow_hw_table_resize(struct rte_eth_dev *dev,\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  table, \"cannot resize flows pool\");\n-\tret = mlx5_ipool_resize(table->resource, nb_flows);\n-\tif (ret)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t  table, \"cannot resize resources pool\");\n+\t/*\n+\t * A resizable matcher doesn't support rule update. In this case, the ipool\n+\t * for the resource is not created and there is no need to resize it.\n+\t */\n+\tMLX5_ASSERT(!table->resource);\n \tif (mlx5_is_multi_pattern_active(&table->mpctx)) {\n \t\tret = flow_hw_table_resize_multi_pattern_actions(dev, table, nb_flows, error);\n \t\tif (ret < 0)\n",
    "prefixes": [
        "04/11"
    ]
}