get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/137441/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 137441,
    "url": "http://patchwork.dpdk.org/api/patches/137441/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20240228170046.176600-2-dsosnowski@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240228170046.176600-2-dsosnowski@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240228170046.176600-2-dsosnowski@nvidia.com",
    "date": "2024-02-28T17:00:36",
    "name": "[01/11] net/mlx5: allocate local DR rule action buffers",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0612547315c790b09b15bf15a2ab913333b23100",
    "submitter": {
        "id": 2386,
        "url": "http://patchwork.dpdk.org/api/people/2386/?format=api",
        "name": "Dariusz Sosnowski",
        "email": "dsosnowski@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20240228170046.176600-2-dsosnowski@nvidia.com/mbox/",
    "series": [
        {
            "id": 31278,
            "url": "http://patchwork.dpdk.org/api/series/31278/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=31278",
            "date": "2024-02-28T17:00:35",
            "name": "net/mlx5: flow insertion performance improvements",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/31278/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/137441/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/137441/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C0EFD43C2C;\n\tWed, 28 Feb 2024 18:01:45 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 275AD42D90;\n\tWed, 28 Feb 2024 18:01:39 +0100 (CET)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2043.outbound.protection.outlook.com [40.107.93.43])\n by mails.dpdk.org (Postfix) with ESMTP id 43B5642D90\n for <dev@dpdk.org>; Wed, 28 Feb 2024 18:01:38 +0100 (CET)",
            "from BYAPR05CA0052.namprd05.prod.outlook.com (2603:10b6:a03:74::29)\n by PH0PR12MB8007.namprd12.prod.outlook.com (2603:10b6:510:28e::5)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7316.41; Wed, 28 Feb\n 2024 17:01:35 +0000",
            "from SJ1PEPF00001CE1.namprd05.prod.outlook.com\n (2603:10b6:a03:74:cafe::94) by BYAPR05CA0052.outlook.office365.com\n (2603:10b6:a03:74::29) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.7339.27 via Frontend\n Transport; Wed, 28 Feb 2024 17:01:34 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n SJ1PEPF00001CE1.mail.protection.outlook.com (10.167.242.9) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.7292.25 via Frontend Transport; Wed, 28 Feb 2024 17:01:34 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 28 Feb\n 2024 09:01:06 -0800",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Wed, 28 Feb\n 2024 09:01:04 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=g+lB1T9rIWBeEJD4zmMQ9UdgQmm2guQMXs1QFwKEFlIzZDW8koDshw8jLQ72FInBl3O3Yqy9NpWcjzP569Mj3krmmVtc46EbhQo0qJQTvEYrkz+m3AM0u5o40z9HUfe+OBtCy4G6AjVn/CmtHBS9ZvGr+ZlTm0768t55Nx0Gy5EiRQxFnorhQs1+0y5XawiQBePB9nn/Ml9nj89sfjLOaoEdzI7MH7q0oRGncYiXdfLbK+3YwQ7GRyZkxRaHdymyNe5UI+9eHMQp3gxQ+LpJR0t1Uyx2BMQmekSIFbL0J868d9/1pSM1a3OqXWxP6mh5MXWWRmC/ZxzDW44tpoHNnw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=OOgVb/x5u6nab6VpiUDwvuMqK5JuQFgN8ILQAe/XSqE=;\n b=PW/w9H3BgCNIKzSAM6Qp1PikJDRwD+f3rrl6d74QXGaE8nko4NNv6tpnErBQckI4OAk+Vl6CtdMjGLLZQdCJbtIvYB2SUyMvxV5sEhUYUcs7GITJIfpg2bi8xQtVWVcWo7AvlPvUb/aeGBYuXGAMD+TGnpIF3bpFYmNhVshOXsGGFzST9Y2GMbbOMzb6R1dRK7xX9Ej6ovTfSZrTbQUZEleIVj7J+3tqp/hfl2Ca5c/1u65WkOCAP6CMOTDTQ47iGmcQ7Lgrv2PZh271uVm9ppVAx2PbWHCEs87AV7lTPnTCyuW1PJ+pX6MbohlSduks7gA4kHGnUzC7Vp30qG7D/g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=OOgVb/x5u6nab6VpiUDwvuMqK5JuQFgN8ILQAe/XSqE=;\n b=nDmCf9Xdohmdiy0VAO8DFOLMSoZfvtY+hyirNPg/E6MOVRWGEpMC/HiOsNfhZDcVi+aZUR2XS5CMvMf4hX7FaLRKgyIxJSy4nv+LdHL4VMdAE7gabS7jbAo1IBSrhfMJDc0FYjocwY3G7c+M40XM+JNH+iZpxpy0AftmMPMY+Fcv68v2UxtTgVrYKfy16BlHpyIw2/X2mHRY3OWp38FCCi7UeN3rpr7HFNw9HfXnmaFHTUJlyzWNqW+OTkC4gCmSyStMDyL2p8LAMUZoC0IeA43wg8sS7gD4rXBg+3lQtQsH0BZ8Zk3hQlIpBzKKbAlOWfgZcmlJaNbAfxbFdQyJpg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Dariusz Sosnowski <dsosnowski@nvidia.com>",
        "To": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>,\n Suanming Mou <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, Raslan Darawsheh <rasland@nvidia.com>, Bing Zhao\n <bingz@nvidia.com>",
        "Subject": "[PATCH 01/11] net/mlx5: allocate local DR rule action buffers",
        "Date": "Wed, 28 Feb 2024 18:00:36 +0100",
        "Message-ID": "<20240228170046.176600-2-dsosnowski@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "References": "<20240228170046.176600-1-dsosnowski@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "SJ1PEPF00001CE1:EE_|PH0PR12MB8007:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "b07e024f-8050-4d2b-54f6-08dc387eeb7d",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 0B4GPv+CaQvlNMRhihW6doQ3Nxl5VXNkkmHAx1oi/6bplDTyAAvwk3ILyDE65Vj8fPmI/l3Na7ozyh7t4aMe2cOY2pUhwqOZZKr9PAAJXTCdtOJJ2c7NtTezJOOhL57KGV/UZfQdGgVaCrYcQrnS6ZL/CIHk/dbr+7fca+WKnon7SYNu7K+ETbkvNoKu9SVG3xsDXwQ3jEo/cmC7FYqB+WHh11kgd46LyPzpxnmfyro75T5NbYLGSV9Ohb+MICeeAFUGq1Dl1x5sKkypwts2BuZWrJ6F8iR2p0eceMuZ836K+NPpehqIztcunRINg69ybCg7iwje1qGUnKNAWFoqaKzpBxKV56zOLO8J8jiBE0rtp96dlTEg3QgkbMajTIR/5e6nXy3a9hFal70uwZgYdJIzWx1490Kvi4O057pYYiYGK1Q2ipUg4dQyOnZUtZ2v+10UfkfKWPeCsJDa6uqp+0F31Fp3f5mXgCNB+lEZs6DBLef0kcjHGNUYSbudemc9icbABEkpMxbc9iBKBny+9uuPKkg7p7ocOAfsUXARCpYYrvWT18W5VO4nY1eHitQhWLJbjar8GHMuoi2XlSgiDSN022VyOQ/ISU/YuOmPEy40G0plsqiFBAefLzLkDB7Ll54Wo42i8Btu7OjdFXlhpYgS0qL2l5OV10mfNwU7Dph8QvpH23cMENBHos9ZsDoh501l7k+iozA47BFOwpXOiitmacIdVJR3JC/wWZwgUg+oSs0iRPtR2rfb63BMj/Oo",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230031)(36860700004)(82310400014); DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Feb 2024 17:01:34.6724 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b07e024f-8050-4d2b-54f6-08dc387eeb7d",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n SJ1PEPF00001CE1.namprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH0PR12MB8007",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Goal of this is to remove the unnecessary copying of precalculated\nmlx5dr_rule_action structures used to create HWS flow rules.\n\nBefore this patch, during template table creation an array of these\nstructures was calculated for each actions template used.\nEach of these structures contained either full action definition or\npartial (depends on mask configuration).\nDuring flow creation, this array was copied to stack and later passed to\nmlx5dr_rule_create().\n\nThis patch removes this copy by implementing the following:\n\n- Allocate an array of mlx5dr_rule_action structures for each actions\n  template and queue.\n- Populate them with precalculated data from relevant actions templates.\n- During flow creation, construction of unmasked actions works on an\n  array dedicated for the specific queue and actions template.\n- Pass this buffer to mlx5dr_rule_create directly.\n\nSigned-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.h    | 13 +++++++++\n drivers/net/mlx5/mlx5_flow_hw.c | 51 +++++++++++++++++++++++++++++----\n 2 files changed, 59 insertions(+), 5 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 1530e6962f..11135645ef 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1554,6 +1554,10 @@ struct mlx5_matcher_info {\n \tuint32_t refcnt;\n };\n \n+struct mlx5_dr_rule_action_container {\n+\tstruct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS];\n+} __rte_cache_aligned;\n+\n struct rte_flow_template_table {\n \tLIST_ENTRY(rte_flow_template_table) next;\n \tstruct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */\n@@ -1573,6 +1577,15 @@ struct rte_flow_template_table {\n \tuint32_t refcnt; /* Table reference counter. */\n \tstruct mlx5_tbl_multi_pattern_ctx mpctx;\n \tstruct mlx5dr_matcher_attr matcher_attr;\n+\t/**\n+\t * Variable length array of containers containing precalculated templates of DR actions\n+\t * arrays. This array is allocated at template table creation time and contains\n+\t * one container per each queue, per each actions template.\n+\t * Essentially rule_acts is a 2-dimensional array indexed with (AT index, queue) pair.\n+\t * Each container will provide a local \"queue buffer\" to work on for flow creation\n+\t * operations when using a given actions template.\n+\t */\n+\tstruct mlx5_dr_rule_action_container rule_acts[];\n };\n \n static __rte_always_inline struct mlx5dr_matcher *\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex f778fd0698..442237f2b6 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -2499,6 +2499,34 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\t  \"fail to create rte table\");\n }\n \n+static __rte_always_inline struct mlx5dr_rule_action *\n+flow_hw_get_dr_action_buffer(struct mlx5_priv *priv,\n+\t\t\t     struct rte_flow_template_table *table,\n+\t\t\t     uint8_t action_template_index,\n+\t\t\t     uint32_t queue)\n+{\n+\tuint32_t offset = action_template_index * priv->nb_queue + queue;\n+\n+\treturn &table->rule_acts[offset].acts[0];\n+}\n+\n+static void\n+flow_hw_populate_rule_acts_caches(struct rte_eth_dev *dev,\n+\t\t\t\t  struct rte_flow_template_table *table,\n+\t\t\t\t  uint8_t at_idx)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tuint32_t q;\n+\n+\tfor (q = 0; q < priv->nb_queue; ++q) {\n+\t\tstruct mlx5dr_rule_action *rule_acts =\n+\t\t\t\tflow_hw_get_dr_action_buffer(priv, table, at_idx, q);\n+\n+\t\trte_memcpy(rule_acts, table->ats[at_idx].acts.rule_acts,\n+\t\t\t   sizeof(table->ats[at_idx].acts.rule_acts));\n+\t}\n+}\n+\n /**\n  * Translate rte_flow actions to DR action.\n  *\n@@ -2526,6 +2554,7 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\t\t\ttbl->ats[i].action_template,\n \t\t\t\t\t\t&tbl->mpctx, error))\n \t\t\tgoto err;\n+\t\tflow_hw_populate_rule_acts_caches(dev, tbl, i);\n \t}\n \tret = mlx5_tbl_multi_pattern_process(dev, tbl, &tbl->mpctx.segments[0],\n \t\t\t\t\t     rte_log2_u32(tbl->cfg.attr.nb_flows),\n@@ -2914,7 +2943,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tstruct mlx5_multi_pattern_segment *mp_segment = NULL;\n \n-\trte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);\n \tattr.group = table->grp->group_id;\n \tft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];\n \tif (table->type == MLX5DR_TABLE_TYPE_FDB) {\n@@ -3316,7 +3344,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t\t.user_data = user_data,\n \t\t.burst = attr->postpone,\n \t};\n-\tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n+\tstruct mlx5dr_rule_action *rule_acts;\n \tstruct rte_flow_hw *flow = NULL;\n \tstruct mlx5_hw_q_job *job = NULL;\n \tconst struct rte_flow_item *rule_items;\n@@ -3339,6 +3367,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tmlx5_ipool_malloc(table->resource, &res_idx);\n \tif (!res_idx)\n \t\tgoto error;\n+\trule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);\n \t/*\n \t * Set the table here in order to know the destination table\n \t * when free the flow afterward.\n@@ -3460,7 +3489,7 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \t\t.user_data = user_data,\n \t\t.burst = attr->postpone,\n \t};\n-\tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n+\tstruct mlx5dr_rule_action *rule_acts;\n \tstruct rte_flow_hw *flow = NULL;\n \tstruct mlx5_hw_q_job *job = NULL;\n \tuint32_t flow_idx = 0;\n@@ -3482,6 +3511,7 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,\n \tmlx5_ipool_malloc(table->resource, &res_idx);\n \tif (!res_idx)\n \t\tgoto error;\n+\trule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);\n \t/*\n \t * Set the table here in order to know the destination table\n \t * when free the flow afterwards.\n@@ -3591,7 +3621,7 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\t.user_data = user_data,\n \t\t.burst = attr->postpone,\n \t};\n-\tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n+\tstruct mlx5dr_rule_action *rule_acts;\n \tstruct rte_flow_hw *of = (struct rte_flow_hw *)flow;\n \tstruct rte_flow_hw *nf;\n \tstruct rte_flow_template_table *table = of->table;\n@@ -3609,6 +3639,7 @@ flow_hw_async_flow_update(struct rte_eth_dev *dev,\n \t\tgoto error;\n \tnf = job->upd_flow;\n \tmemset(nf, 0, sizeof(struct rte_flow_hw));\n+\trule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);\n \t/*\n \t * Set the table here in order to know the destination table\n \t * when free the flow afterwards.\n@@ -4335,6 +4366,7 @@ mlx5_hw_build_template_table(struct rte_eth_dev *dev,\n \t\t\ti++;\n \t\t\tgoto at_error;\n \t\t}\n+\t\tflow_hw_populate_rule_acts_caches(dev, tbl, i);\n \t}\n \ttbl->nb_action_templates = nb_action_templates;\n \tif (mlx5_is_multi_pattern_active(&tbl->mpctx)) {\n@@ -4423,6 +4455,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \tuint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;\n \tuint32_t nb_flows = rte_align32pow2(attr->nb_flows);\n \tbool port_started = !!dev->data->dev_started;\n+\tsize_t tbl_mem_size;\n \tint err;\n \n \t/* HWS layer accepts only 1 item template with root table. */\n@@ -4442,8 +4475,16 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n+\t/*\n+\t * Amount of memory required for rte_flow_template_table struct:\n+\t * - Size of the struct itself.\n+\t * - VLA of DR rule action containers at the end =\n+\t *     number of actions templates * number of queues * size of DR rule actions container.\n+\t */\n+\ttbl_mem_size = sizeof(*tbl);\n+\ttbl_mem_size += nb_action_templates * priv->nb_queue * sizeof(tbl->rule_acts[0]);\n \t/* Allocate the table memory. */\n-\ttbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());\n+\ttbl = mlx5_malloc(MLX5_MEM_ZERO, tbl_mem_size, RTE_CACHE_LINE_SIZE, rte_socket_id());\n \tif (!tbl)\n \t\tgoto error;\n \ttbl->cfg = *table_cfg;\n",
    "prefixes": [
        "01/11"
    ]
}