get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/137446/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 137446,
    "url": "http://patchwork.dpdk.org/api/patches/137446/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20240228170046.176600-8-dsosnowski@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240228170046.176600-8-dsosnowski@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240228170046.176600-8-dsosnowski@nvidia.com",
    "date": "2024-02-28T17:00:42",
    "name": "[07/11] net/mlx5: remove updated flow from job",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3fdce5b634d842efa83fafed666855f80d65095a",
    "submitter": {
        "id": 2386,
        "url": "http://patchwork.dpdk.org/api/people/2386/?format=api",
        "name": "Dariusz Sosnowski",
        "email": "dsosnowski@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20240228170046.176600-8-dsosnowski@nvidia.com/mbox/",
    "series": [
        {
            "id": 31278,
            "url": "http://patchwork.dpdk.org/api/series/31278/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=31278",
            "date": "2024-02-28T17:00:35",
            "name": "net/mlx5: flow insertion performance improvements",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/31278/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/137446/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/137446/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 36A2143C2C;\n\tWed, 28 Feb 2024 18:02:36 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 80BE042F47;\n\tWed, 28 Feb 2024 18:01:54 +0100 (CET)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam11on2058.outbound.protection.outlook.com [40.107.223.58])\n by mails.dpdk.org (Postfix) with ESMTP id DEB9942F23\n for <dev@dpdk.org>; Wed, 28 Feb 2024 18:01:50 +0100 (CET)",
            "from BY3PR10CA0011.namprd10.prod.outlook.com (2603:10b6:a03:255::16)\n by PH7PR12MB7820.namprd12.prod.outlook.com (2603:10b6:510:268::8)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7316.39; Wed, 28 Feb\n 2024 17:01:44 +0000",
            "from SJ1PEPF00001CE3.namprd05.prod.outlook.com\n (2603:10b6:a03:255:cafe::eb) by BY3PR10CA0011.outlook.office365.com\n (2603:10b6:a03:255::16) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7339.28 via Frontend\n Transport; Wed, 28 Feb 2024 17:01:43 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n SJ1PEPF00001CE3.mail.protection.outlook.com (10.167.242.11) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7292.25 via Frontend Transport; Wed, 28 Feb 2024 17:01:43 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 28 Feb\n 2024 09:01:19 -0800",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Wed, 28 Feb\n 2024 09:01:17 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=GERmOGwTzcwWT8/ZX0daPDEJcibDpzJOUyUdnvpgXf3zDfkQ3Cpu8CjMSECHgEtHqJ/P2MoLZGVQmq7+Y0XaCNZBcVgOAdu+heZophPd4R1dUOCheaGE9gJwfD3hgXBqBVlfON8mHG1ha4SlgPZ3wqe8CoUqWiKaFpd7P9ZQZ15liXT0Wkv1fIicwjRrHLNdYn04+QUJaTr+ROVKSw18BJ0SwhDA9/p9tluRyXTbfNGi4MKSruiJPlWL0KmrnmECYwY2sUjem9rhRmo4JJQMHj5jZeXLjuShAg+9Han/zkEzeuu0cFzKa586yCArBa3MuxlGY+vZZD13F0NfiJCnVA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=85oW0FR/VaKiJuUnu05JCYmxaQKgpt/mHqhNNZ5fGl0=;\n b=H93bq2weimiEY/a+5jOEFZaN5Hl+eumc2pxbcJ0yWxFlI88YU7oncBNgib1pZYPzK6pMngViC00HhqN/PC6imGTkrSuullNz5+jbtbMhvm5z+x5TU7OE1BFb8uQHgNz7vrMbQWfLYuGByfL4j4YOk/vulEZNGqYho0GgdwoD+UIf/mc/whMCPgBDLbkxO4UPR2Puit2+vjZgTetXYCS9Gu3zzdqGGErzeNIv9pJgGJcNbIA3RSDR7ZcNWY40LoO/Pt7Vmi7ujInCCvyf5NWR4B1z20fPTKGYiKnKL77ElJpzsi2GETwtwLOBpbH+0vX2jeWk6gDVtlu1+alyiU1Q7g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=85oW0FR/VaKiJuUnu05JCYmxaQKgpt/mHqhNNZ5fGl0=;\n b=PfB0yf1wUt2/kpDt50Y6aOLMNV8zavUW2YWRYC9LDBMv55bz35Z9Kv5i6Lxq2lRhJEKEHpP3KhGZGzW1qE8b43XL4Ki7nuycmK3qhmULGk6MjBWI2zLI/6HFF5/wDIZ9C8q7CgUJJ6GvZVzFbEZA30h0GM6mLpX2AHtKQUhMB9an5hVbT7h/biw95cxr7h86YDZEkL85RlturiEcgWLF2/6y2r1ZjBRPix6LvKrNBONXPwnJm7nPH8qEqD3h5kmI76N4kafhIBsl+YeIwgNhT6BNCqSwz89m0sGxu2puWMpCHSu7AgSbroOFMEXZz8msFpTq7kra38NdivU03KfEWQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Dariusz Sosnowski <dsosnowski@nvidia.com>",
        "To": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, Raslan Darawsheh <rasland@nvidia.com>, Bing Zhao\n <bingz@nvidia.com>",
        "Subject": "[PATCH 07/11] net/mlx5: remove updated flow from job",
        "Date": "Wed, 28 Feb 2024 18:00:42 +0100",
        "Message-ID": "<20240228170046.176600-8-dsosnowski@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "References": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "SJ1PEPF00001CE3:EE_|PH7PR12MB7820:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "69cdf7f5-dea5-47f0-0a1f-08dc387ef0fe",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n dnquq3Q8brpnaQMnbu1/CD5BVmPGu4WGc7BvtvD/sEWHuxA+6zyDBV1lbJuFtTRxOThLrb2VJahp54+WYGlWZ6L3uPBZN/5UKifc+WXtnX5vje7E58aU0y7EVLMwnIAzHdDMEmw0DFTvUvfbohxF4phWGTguQ6F5mHZk4Khy51G+JcEXL7AUlcURb2x8O3AJDcyoU3DuIvxarUVyyuCrO0XU6QGIMJoCZ7eeSnHbxiKYy1LPJMCsIX2YvJAhG4kVemQM/GzALYNyJt5Yu4ioeFE04YoRGk8f/uB+OM9D1FjQeFhozTKdYj7nv5z0P9NC5jwqJY1mwzwy2B8iQ3LAISKp6BeAEowiAz3lqdBjvfirK8azO6dPAGmkOA9E9QsBxVmiv6gWrv1VKnfsqKRImYxPhGKOVuG+4SWlhIUGL+dOx0J5eIzYUDtunq7obiIQS8eG5AlCtT8WWRCZsZe+/0bQfzGVRv9TkvqLykzU+p9b5eSaPgOU5UV61i8jnd6S7JiCIPosiJuOoRQ48r/GIO99FDWmT/dnPbtLVa26Jk4QGUtYIHUsgqzpbMRBWF3OkT3mKOjhP4EjiNutV3jI0QGiVN/pxm9RgoXZt+ePWQF9tVTOSF+ALCW7GRpyaqeClC6ziBeaKxAJDR6ndOJ8bknuJxi8nsAdvhjaf4Jdw7SHhYQN2wbePnf5tiONsYUm1Lt2OsmoVZs0C7OYO4vs/Op2D6ZTdEj3i5gqdMMgaTkO+hlAB4pzg3pIdkxXTOxv",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230031)(82310400014)(36860700004); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Feb 2024 17:01:43.9358 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 69cdf7f5-dea5-47f0-0a1f-08dc387ef0fe",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n SJ1PEPF00001CE3.namprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH7PR12MB7820",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "mlx5_hw_q_job struct held a reference to a temporary flow rule struct,\nused during flow rule update operation. It serves as a container for\nflow actions data calculated during actions construction.\nAfter flow rule update operation succeeds, data from temporary flow rule\nis copied over to original flow rule.\n\nAlthough access to this temporary flow rule struct is required\nduring both operation enqueue step and completion polling step,\nthere can be only one ongoing flow update operation for a given\nflow rule. As a result there is no need to store it per job.\n\nThis patch removes all references to temporary flow rule struct\nstored in mlx5_hw_q_job and removes relevant allocations to reduce\njob memory footprint.\nTemporary flow rule struct stored per job is replaced with:\n\n- If table is not resizable - An array of rte_flow_hw_aux structs,\n  stored in template table. This array holds one entry per each\n  flow rule, each containing a single mentioned temporary struct.\n- If table is resizable - Additional rte_flow_hw_aux struct,\n  allocated alongside rte_flow_hw in resizable ipool.\n\nSigned-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\n---\n drivers/net/mlx5/mlx5.h         |   1 -\n drivers/net/mlx5/mlx5_flow.h    |   7 +++\n drivers/net/mlx5/mlx5_flow_hw.c | 100 ++++++++++++++++++++++++++------\n 3 files changed, 89 insertions(+), 19 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex fc3d28e6f2..0cc32bf67b 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -407,7 +407,6 @@ struct mlx5_hw_q_job {\n \t\t/* Data extracted from hardware */\n \t\tvoid *hw;\n \t} query;\n-\tstruct rte_flow_hw *upd_flow; /* Flow with updated values. */\n };\n \n /* HW steering job descriptor LIFO pool. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 96b43ce61e..8fd07bdce4 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1281,6 +1281,12 @@ struct rte_flow_hw {\n \tuint8_t rule[]; /* HWS layer data struct. */\n } __rte_packed;\n \n+/** Auxiliary data stored per flow which is not required to be stored in main flow structure. */\n+struct rte_flow_hw_aux {\n+\t/** Placeholder flow struct used during flow rule update operation. */\n+\tstruct rte_flow_hw upd_flow;\n+};\n+\n #ifdef PEDANTIC\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\n@@ -1589,6 +1595,7 @@ struct rte_flow_template_table {\n \t/* Action templates bind to the table. */\n \tstruct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];\n \tstruct mlx5_indexed_pool *flow; /* The table's flow ipool. */\n+\tstruct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */\n \tstruct mlx5_indexed_pool *resource; /* The table's resource ipool. */\n \tstruct mlx5_flow_template_table_cfg cfg;\n \tuint32_t type; /* Flow table type RX/TX/FDB. */\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex c3d9eef999..acc56819eb 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -79,6 +79,66 @@ struct mlx5_indlst_legacy {\n #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \\\n (((const struct encap_type *)(ptr))->definition)\n \n+/**\n+ * Returns the size of a struct with a following layout:\n+ *\n+ * @code{.c}\n+ * struct rte_flow_hw {\n+ *     // rte_flow_hw fields\n+ *     uint8_t rule[mlx5dr_rule_get_handle_size()];\n+ * };\n+ * @endcode\n+ *\n+ * Such struct is used as a basic container for HW Steering flow rule.\n+ */\n+static size_t\n+mlx5_flow_hw_entry_size(void)\n+{\n+\treturn sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();\n+}\n+\n+/**\n+ * Returns the size of \"auxed\" rte_flow_hw structure which is assumed to be laid out as follows:\n+ *\n+ * @code{.c}\n+ * struct {\n+ *     struct rte_flow_hw {\n+ *         // rte_flow_hw fields\n+ *         uint8_t rule[mlx5dr_rule_get_handle_size()];\n+ *     } flow;\n+ *     struct rte_flow_hw_aux aux;\n+ * };\n+ * @endcode\n+ *\n+ * Such struct is used whenever rte_flow_hw_aux cannot be allocated separately from the rte_flow_hw\n+ * e.g., when table is resizable.\n+ */\n+static size_t\n+mlx5_flow_hw_auxed_entry_size(void)\n+{\n+\tsize_t rule_size = mlx5dr_rule_get_handle_size();\n+\n+\treturn sizeof(struct rte_flow_hw) + rule_size + sizeof(struct rte_flow_hw_aux);\n+}\n+\n+/**\n+ * Returns a valid pointer to rte_flow_hw_aux associated with given rte_flow_hw\n+ * depending on template table configuration.\n+ */\n+static __rte_always_inline struct rte_flow_hw_aux *\n+mlx5_flow_hw_aux(uint16_t port_id, struct rte_flow_hw *flow)\n+{\n+\tstruct rte_flow_template_table *table = flow->table;\n+\n+\tif (rte_flow_template_table_resizable(port_id, &table->cfg.attr)) {\n+\t\tsize_t offset = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();\n+\n+\t\treturn RTE_PTR_ADD(flow, offset);\n+\t} else {\n+\t\treturn &table->flow_aux[flow->idx - 1];\n+\t}\n+}\n+\n static int\n mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,\n \t\t\t       struct rte_flow_template_table *tbl,\n@@ -3632,6 +3692,7 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \tstruct mlx5_flow_hw_action_params ap;\n \tstruct rte_flow_hw *of = (struct rte_flow_hw *)flow;\n \tstruct rte_flow_hw *nf;\n+\tstruct rte_flow_hw_aux *aux;\n \tstruct rte_flow_template_table *table = of->table;\n \tstruct mlx5_hw_q_job *job = NULL;\n \tuint32_t res_idx = 0;\n@@ -3642,7 +3703,8 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\tnf = job->upd_flow;\n+\taux = mlx5_flow_hw_aux(dev->data->port_id, of);\n+\tnf = &aux->upd_flow;\n \tmemset(nf, 0, sizeof(struct rte_flow_hw));\n \trule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);\n \t/*\n@@ -3689,11 +3751,8 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n-\t/*\n-\t * Switch the old flow and the new flow.\n-\t */\n+\t/* Switch to the old flow. New flow will retrieved from the table on completion. */\n \tjob->flow = of;\n-\tjob->upd_flow = nf;\n \tret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,\n \t\t\t\t\taction_template_index, rule_acts, &rule_attr);\n \tif (likely(!ret))\n@@ -3966,8 +4025,10 @@ hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,\n \t\t\tmlx5_ipool_free(table->flow, flow->idx);\n \t\t}\n \t} else {\n-\t\trte_memcpy(flow, job->upd_flow,\n-\t\t\t   offsetof(struct rte_flow_hw, rule));\n+\t\tstruct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);\n+\t\tstruct rte_flow_hw *upd_flow = &aux->upd_flow;\n+\n+\t\trte_memcpy(flow, upd_flow, offsetof(struct rte_flow_hw, rule));\n \t\tif (table->resource)\n \t\t\tmlx5_ipool_free(table->resource, res_idx);\n \t}\n@@ -4456,7 +4517,6 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \t\t.data = &flow_attr,\n \t};\n \tstruct mlx5_indexed_pool_config cfg = {\n-\t\t.size = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size(),\n \t\t.trunk_size = 1 << 12,\n \t\t.per_core_cache = 1 << 13,\n \t\t.need_lock = 1,\n@@ -4477,6 +4537,9 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \tif (!attr->flow_attr.group)\n \t\tmax_tpl = 1;\n \tcfg.max_idx = nb_flows;\n+\tcfg.size = !rte_flow_template_table_resizable(dev->data->port_id, attr) ?\n+\t\t   mlx5_flow_hw_entry_size() :\n+\t\t   mlx5_flow_hw_auxed_entry_size();\n \t/* For table has very limited flows, disable cache. */\n \tif (nb_flows < cfg.trunk_size) {\n \t\tcfg.per_core_cache = 0;\n@@ -4507,6 +4570,11 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \ttbl->flow = mlx5_ipool_create(&cfg);\n \tif (!tbl->flow)\n \t\tgoto error;\n+\t/* Allocate table of auxiliary flow rule structs. */\n+\ttbl->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux) * nb_flows,\n+\t\t\t\t    RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));\n+\tif (!tbl->flow_aux)\n+\t\tgoto error;\n \t/* Register the flow group. */\n \tge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);\n \tif (!ge)\n@@ -4627,6 +4695,8 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \t\tif (tbl->grp)\n \t\t\tmlx5_hlist_unregister(priv->sh->groups,\n \t\t\t\t\t      &tbl->grp->entry);\n+\t\tif (tbl->flow_aux)\n+\t\t\tmlx5_free(tbl->flow_aux);\n \t\tif (tbl->flow)\n \t\t\tmlx5_ipool_destroy(tbl->flow);\n \t\tmlx5_free(tbl);\n@@ -4865,6 +4935,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,\n \tmlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);\n \tif (table->resource)\n \t\tmlx5_ipool_destroy(table->resource);\n+\tmlx5_free(table->flow_aux);\n \tmlx5_ipool_destroy(table->flow);\n \tmlx5_free(table);\n \treturn 0;\n@@ -9991,8 +10062,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t\tgoto err;\n \t\t}\n \t\tmem_size += (sizeof(struct mlx5_hw_q_job *) +\n-\t\t\t     sizeof(struct mlx5_hw_q_job) +\n-\t\t\t     sizeof(struct rte_flow_hw)) * _queue_attr[i]->size;\n+\t\t\t     sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;\n \t}\n \tpriv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,\n \t\t\t\t 64, SOCKET_ID_ANY);\n@@ -10001,23 +10071,17 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\tgoto err;\n \t}\n \tfor (i = 0; i < nb_q_updated; i++) {\n-\t\tstruct rte_flow_hw *upd_flow = NULL;\n-\n \t\tpriv->hw_q[i].job_idx = _queue_attr[i]->size;\n \t\tpriv->hw_q[i].size = _queue_attr[i]->size;\n \t\tif (i == 0)\n \t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)\n \t\t\t\t\t    &priv->hw_q[nb_q_updated];\n \t\telse\n-\t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)\n-\t\t\t\t&job[_queue_attr[i - 1]->size - 1].upd_flow[1];\n+\t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)&job[_queue_attr[i - 1]->size];\n \t\tjob = (struct mlx5_hw_q_job *)\n \t\t      &priv->hw_q[i].job[_queue_attr[i]->size];\n-\t\tupd_flow = (struct rte_flow_hw *)&job[_queue_attr[i]->size];\n-\t\tfor (j = 0; j < _queue_attr[i]->size; j++) {\n-\t\t\tjob[j].upd_flow = &upd_flow[j];\n+\t\tfor (j = 0; j < _queue_attr[i]->size; j++)\n \t\t\tpriv->hw_q[i].job[j] = &job[j];\n-\t\t}\n \t\t/* Notice ring name length is limited. */\n \t\tpriv->hw_q[i].indir_cq = mlx5_hwq_ring_create\n \t\t\t(dev->data->port_id, i, _queue_attr[i]->size, \"indir_act_cq\");\n",
    "prefixes": [
        "07/11"
    ]
}