get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/133291/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 133291,
    "url": "http://patchwork.dpdk.org/api/patches/133291/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20231025102727.145493-9-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231025102727.145493-9-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231025102727.145493-9-getelson@nvidia.com",
    "date": "2023-10-25T10:27:25",
    "name": "[v5,08/10] net/mlx5: support HWS mirror action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b65bbfaab020a2d153ff3ef5312c8c6fe77aa13a",
    "submitter": {
        "id": 1882,
        "url": "http://patchwork.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20231025102727.145493-9-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 29978,
            "url": "http://patchwork.dpdk.org/api/series/29978/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29978",
            "date": "2023-10-25T10:27:17",
            "name": "net/mlx5: support indirect actions list",
            "version": 5,
            "mbox": "http://patchwork.dpdk.org/series/29978/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/133291/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/133291/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E2593431FB;\n\tWed, 25 Oct 2023 12:28:58 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id ABAD341157;\n\tWed, 25 Oct 2023 12:28:24 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2040.outbound.protection.outlook.com [40.107.236.40])\n by mails.dpdk.org (Postfix) with ESMTP id C5267427DF\n for <dev@dpdk.org>; Wed, 25 Oct 2023 12:28:21 +0200 (CEST)",
            "from SN6PR08CA0016.namprd08.prod.outlook.com (2603:10b6:805:66::29)\n by CO6PR12MB5409.namprd12.prod.outlook.com (2603:10b6:5:357::7) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6907.33; Wed, 25 Oct\n 2023 10:28:19 +0000",
            "from SA2PEPF00001508.namprd04.prod.outlook.com\n (2603:10b6:805:66:cafe::5d) by SN6PR08CA0016.outlook.office365.com\n (2603:10b6:805:66::29) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6933.19 via Frontend\n Transport; Wed, 25 Oct 2023 10:28:18 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n SA2PEPF00001508.mail.protection.outlook.com (10.167.242.40) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6933.15 via Frontend Transport; Wed, 25 Oct 2023 10:28:18 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 25 Oct\n 2023 03:28:06 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Wed, 25 Oct\n 2023 03:28:03 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=lM4rbDS5aBSjIxAlBMj75nuqxqidvCjGLNDwyZBfWrrQcp8lrhTaGdcxNbUeJkAwUDXDoMjTiVBEcdcQO6qu4q4pah6PpmV7bd7M6Tv++wn9NtxARVjMw+/MtgyZtlYxTr+Zumlb6uy+cJ15CIG4jlc7uJnsVVSC3xuabyQXlmehA6sDXQWGMotSAiLLf1vJr7NBzQIR7uObzAqIcZJSwA65svDaNgs3+66yea8K5c0hJsO/121FOqACk1gM8p74els2r/ZI/48n87zuGJ978iIG5RPlq+zjDETppihPk9i9m7z164YyQonOvbILlJFyZR6RWs2FcNn6YmhpetQ4Hg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=80U1iT/cwRLx7lNAEP6T+Mx13GMQGmHgniFmG/gUUdI=;\n b=ht2bGSKgaKNTuDiY5BAwuzrc5M7eGJwzdTR6a3dndTm+UsolQmkhZ7fnM474GYdYmLcQJctD/it9pUt9vqX5wrj7u6cfFaVkc8vjqYzckBJ1cViUFexWHRVtAdg26oLdx89nZ3YM36A9Hx25oi+EvTyebH/quliY0v8/iyFe8ZQ+3lw+hIP4KzWvtAncxrEII6kZgHbfM7jIMNH7Eyqo1Kzv5p3OXPkcU/+iEefz8/Dww/5O7/k9PBL41Yw9/uy0ywq1sUUcZ7OakLYyctRILN8VwkW3ruTVVGADWWdxs69/BJsAYAzG1cOsJdaBoWmkLjmOlVmYmwfur/bZQSeXKw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=80U1iT/cwRLx7lNAEP6T+Mx13GMQGmHgniFmG/gUUdI=;\n b=Zy0Z91jMsrmIdWqQdFbpsuOfFMELFm43cNrKs7pqlP/zRlSf2HKoAmPZKCAGctfHlhh745845cCoSoejFwsM9Lh01vQaif/jbBCdr80XAUMdu0x5Aema4BjTatPwJziURyIpF8sXZOLHmjZzMH8ZFP9dWNwJjKAhDxAkYHMG2wTFn6p6obcgFREiBaSVpZoDis12PXPOG5JhgoZXqA6iJrQTM3XocZDtGkfoYTyysYlWK4/EvTuO6JZHw/bv0U/z1evorxInpYR/8rTzu09Gly4Q6fjQuIDYJTjaNVVdjDy6hAD65/NfANJFrc64cE1z9LfbDRvmX/9NUJZgf1Uo9Q==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, =?utf-8?b?wqA=?= <mkashani@nvidia.com>,\n <rasland@nvidia.com>, Suanming Mou <suanmingm@nvidia.com>,\n Matan Azrad <matan@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>",
        "Subject": "[PATCH v5 08/10] net/mlx5: support HWS mirror action",
        "Date": "Wed, 25 Oct 2023 13:27:25 +0300",
        "Message-ID": "<20231025102727.145493-9-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.39.2",
        "In-Reply-To": "<20231025102727.145493-1-getelson@nvidia.com>",
        "References": "<20231017080928.30454-1-getelson@nvidia.com>\n <20231025102727.145493-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "SA2PEPF00001508:EE_|CO6PR12MB5409:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "0362dab6-fc82-474a-a5ed-08dbd5451b25",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n zfDrGbvWj/KJ7Cw4sEd00pCi10KKLKhxEHdszrLmUFOMiLOSkypcU6oofXQLV4cU2F+Pyq3bPLFH5NqaTYEGBTvJ9a3gNQSAC5PD8yirJS75Hoj6NYCKdzJVzfNafLSMBWu32RpEQ8BZ6F2dtBJIBeW2GpWNE3LIcKOob1Rt5riScwxlIxkfAfh8E9rSP4m/alzZNfQLqH6o7gZre5QJfpQ+V0Q9uHuOPxQdhU/xKwLLHEIojFXSBNnXcQ7iFDTTTt+GdkSsm8UaO1+nIlNaSmdCOSjS15JU0ouoJ+bkIb0PBpa5GpL1GA1j3Yu8aI0JUMRUarFcAgAzw7Bqi1dA6z98Uf0oKrgdLICWaEEj1ixCYxaUa3B8aGZpUXjpFL4QGiC16ReDIO/Q96M6lTvfoUfaOx6oXBZKE3Fl7iy2jK5jEB3iita9yGaM6ILeauqh0uEt1w+Ovn9ok7PIT2oPcK/FNkri1B89qnbBPeCFFUT8HJ8It4h0UtjnE0L33qv0XUIXi8p7m3U2YI0knHPDpfbIPlYQy+jsD5fYfDVLJ+waQ6PXM/yF75nN3p0KI+xX6tKDFaZpypCvWFq+Jj+8+xOKVkbAQ6u+F9k5gYQqgRN1fSPgS7NW1s+Hqt4pknTLJGkA0aOklkUPuXrg+9id7W8GlPFMEh/awuKB7EEMWD/JCxUpG1cNkO+a+D1jJBxO+dexdEbsv/5a8S/K1oz+zTKbCKBespuRIq+XBlWyhXD7XBL5V/AQrIBQtQLFzDRa",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230031)(4636009)(396003)(376002)(136003)(346002)(39860400002)(230922051799003)(64100799003)(1800799009)(451199024)(186009)(82310400011)(46966006)(40470700004)(36840700001)(478600001)(30864003)(2906002)(55016003)(40460700003)(5660300002)(4326008)(8676002)(8936002)(40480700001)(41300700001)(36756003)(316002)(70586007)(70206006)(86362001)(426003)(6666004)(6916009)(7696005)(2616005)(16526019)(26005)(107886003)(82740400003)(336012)(7636003)(1076003)(356005)(36860700001)(83380400001)(6286002)(54906003)(47076005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "25 Oct 2023 10:28:18.6562 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 0362dab6-fc82-474a-a5ed-08dbd5451b25",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n SA2PEPF00001508.namprd04.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CO6PR12MB5409",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "HWS mirror clones original packet to one or two destinations and\nproceeds with the original packet path.\n\nThe mirror has no dedicated RTE flow action type.\nMirror object is referenced by INDIRECT_LIST action.\nINDIRECT_LIST for a mirror built from actions list:\n\n    SAMPLE [/ SAMPLE] / <Orig. packet destination> / END\n\nMirror SAMPLE action defines packet clone. It specifies the clone\ndestination and optional clone reformat action.\nDestination action for both clone and original packet depends on HCA\ndomain:\n- for NIC RX, destination is ether RSS or QUEUE\n- for FDB, destination is PORT\n\nHWS mirror was inplemented with the INDIRECT_LIST flow action.\n\nMLX5 PMD defines general `struct mlx5_indirect_list` type for all.\nINDIRECT_LIST handler objects:\n\n\t\tstruct mlx5_indirect_list {\n\t\t\tenum mlx5_indirect_list_type type;\n\t\t\tLIST_ENTRY(mlx5_indirect_list) chain;\n\t\t\tchar data[];\n\t\t};\n\nSpecific INDIRECT_LIST type must overload `mlx5_indirect_list::data`\nand provide unique `type` value.\nPMD returns a pointer to `mlx5_indirect_list` object.\n\nExisting non-masked actions template API cannot identify flow actions\nin INDIRECT_LIST handler because INDIRECT_LIST handler can represent\nseveral flow actions.\n\nFor example:\nA: SAMPLE / JUMP\nB: SAMPE / SAMPLE / RSS\n\nActions template command\n\n\ttemplate indirect_list / end mask indirect_list 0 / end\n\ndoes not provide any information to differentiate between flow\nactions in A and B.\n\nMLX5 PMD requires INDIRECT_LIST configuration parameter in the\ntemplate section:\n\nNon-masked INDIRECT_LIST API:\n=============================\n\n\ttemplate indirect_list X / end mask indirect_list 0 / end\n\nPMD identifies type of X handler and will use the same type in\ntemplate creation. Actual parameters for actions in the list will\nbe extracted from flow configuration\n\nMasked INDIRECT_LIST API:\n=========================\n\n\ttemplate indirect_list X / end mask indirect_list -lUL / end\n\nPMD creates action template from actions types and configurations\nreferenced by X.\n\nINDIRECT_LIST action without configuration is invalid and will be\nrejected by PMD.\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Suanming Mou <suanmingm@nvidia.com>\n---\n doc/guides/nics/features/mlx5.ini      |   1 +\n doc/guides/rel_notes/release_23_11.rst |   1 +\n drivers/net/mlx5/mlx5.c                |   1 +\n drivers/net/mlx5/mlx5.h                |   2 +\n drivers/net/mlx5/mlx5_flow.c           | 134 ++++++\n drivers/net/mlx5/mlx5_flow.h           |  69 ++-\n drivers/net/mlx5/mlx5_flow_hw.c        | 615 ++++++++++++++++++++++++-\n 7 files changed, 818 insertions(+), 5 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini\nindex fc67415c6c..a85d755734 100644\n--- a/doc/guides/nics/features/mlx5.ini\n+++ b/doc/guides/nics/features/mlx5.ini\n@@ -106,6 +106,7 @@ drop                 = Y\n flag                 = Y\n inc_tcp_ack          = Y\n inc_tcp_seq          = Y\n+indirect_list        = Y\n jump                 = Y\n mark                 = Y\n meter                = Y\ndiff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst\nindex 0a6fc76a9d..81d606e773 100644\n--- a/doc/guides/rel_notes/release_23_11.rst\n+++ b/doc/guides/rel_notes/release_23_11.rst\n@@ -143,6 +143,7 @@ New Features\n * **Updated NVIDIA mlx5 net driver.**\n \n   * Added support for Network Service Header (NSH) flow matching.\n+  * Added support for ``RTE_FLOW_ACTION_TYPE_INDIRECT_LIST`` flow action.\n \n * **Updated Solarflare net driver.**\n \ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 997df595d0..08b7b03365 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -2168,6 +2168,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t/* Free the eCPRI flex parser resource. */\n \tmlx5_flex_parser_ecpri_release(dev);\n \tmlx5_flex_item_port_cleanup(dev);\n+\tmlx5_indirect_list_handles_release(dev);\n #ifdef HAVE_MLX5_HWS_SUPPORT\n \tflow_hw_destroy_vport_action(dev);\n \tflow_hw_resource_release(dev);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 0b709a1bda..f3b872f59c 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1791,6 +1791,8 @@ struct mlx5_priv {\n \tLIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;\n \t/* Standalone indirect tables. */\n \tLIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;\n+\t/* Objects created with indirect list action */\n+\tLIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;\n \t/* Pointer to next element. */\n \trte_rwlock_t ind_tbls_lock;\n \tuint32_t refcnt; /**< Reference counter. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 8ad85e6027..99b814d815 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -62,6 +62,30 @@ struct tunnel_default_miss_ctx {\n \t};\n };\n \n+void\n+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\twhile (!LIST_EMPTY(&priv->indirect_list_head)) {\n+\t\tstruct mlx5_indirect_list *e =\n+\t\t\tLIST_FIRST(&priv->indirect_list_head);\n+\n+\t\tLIST_REMOVE(e, entry);\n+\t\tswitch (e->type) {\n+#ifdef HAVE_MLX5_HWS_SUPPORT\n+\t\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n+\t\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);\n+\t\tbreak;\n+#endif\n+\t\tdefault:\n+\t\t\tDRV_LOG(ERR, \"invalid indirect list type\");\n+\t\t\tMLX5_ASSERT(false);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+}\n+\n static int\n flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n \t\t\t     struct rte_flow *flow,\n@@ -1120,6 +1144,32 @@ mlx5_flow_async_action_handle_query_update\n \t enum rte_flow_query_update_mode qu_mode,\n \t void *user_data, struct rte_flow_error *error);\n \n+static struct rte_flow_action_list_handle *\n+mlx5_action_list_handle_create(struct rte_eth_dev *dev,\n+\t\t\t       const struct rte_flow_indir_action_conf *conf,\n+\t\t\t       const struct rte_flow_action *actions,\n+\t\t\t       struct rte_flow_error *error);\n+\n+static int\n+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_flow_action_list_handle *handle,\n+\t\t\t\tstruct rte_flow_error *error);\n+\n+static struct rte_flow_action_list_handle *\n+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\t\t\t  const struct rte_flow_op_attr *attr,\n+\t\t\t\t\t  const struct\n+\t\t\t\t\t  rte_flow_indir_action_conf *conf,\n+\t\t\t\t\t  const struct rte_flow_action *actions,\n+\t\t\t\t\t  void *user_data,\n+\t\t\t\t\t  struct rte_flow_error *error);\n+static int\n+mlx5_flow_async_action_list_handle_destroy\n+\t\t\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\t const struct rte_flow_op_attr *op_attr,\n+\t\t\t struct rte_flow_action_list_handle *action_handle,\n+\t\t\t void *user_data, struct rte_flow_error *error);\n+\n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n \t.create = mlx5_flow_create,\n@@ -1135,6 +1185,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.action_handle_update = mlx5_action_handle_update,\n \t.action_handle_query = mlx5_action_handle_query,\n \t.action_handle_query_update = mlx5_action_handle_query_update,\n+\t.action_list_handle_create = mlx5_action_list_handle_create,\n+\t.action_list_handle_destroy = mlx5_action_list_handle_destroy,\n \t.tunnel_decap_set = mlx5_flow_tunnel_decap_set,\n \t.tunnel_match = mlx5_flow_tunnel_match,\n \t.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,\n@@ -1163,6 +1215,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.async_action_handle_query = mlx5_flow_async_action_handle_query,\n \t.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,\n \t.async_actions_update = mlx5_flow_async_flow_update,\n+\t.async_action_list_handle_create =\n+\t\tmlx5_flow_async_action_list_handle_create,\n+\t.async_action_list_handle_destroy =\n+\t\tmlx5_flow_async_action_list_handle_destroy,\n };\n \n /* Tunnel information. */\n@@ -10869,6 +10925,84 @@ mlx5_action_handle_query_update(struct rte_eth_dev *dev,\n \t\t\t\t\t query, qu_mode, error);\n }\n \n+\n+#define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret)                           \\\n+{                                                                              \\\n+\tstruct rte_flow_attr attr = { .transfer = 0 };                         \\\n+\tenum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr);    \\\n+\tif (drv_type == MLX5_FLOW_TYPE_MIN ||                                  \\\n+\t    drv_type == MLX5_FLOW_TYPE_MAX) {                                  \\\n+\t\trte_flow_error_set(error, ENOTSUP,                             \\\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,                 \\\n+\t\t\t\t   NULL, \"invalid driver type\");               \\\n+\t\treturn ret;                                                    \\\n+\t}                                                                      \\\n+\t(fops) = flow_get_drv_ops(drv_type);                                   \\\n+\tif (!(fops) || !(fops)->drv_cb) {                                      \\\n+\t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \\\n+\t\t\t\t   NULL, \"no action_list handler\");            \\\n+\t\treturn ret;                                                    \\\n+\t}                                                                      \\\n+}\n+\n+static struct rte_flow_action_list_handle *\n+mlx5_action_list_handle_create(struct rte_eth_dev *dev,\n+\t\t\t       const struct rte_flow_indir_action_conf *conf,\n+\t\t\t       const struct rte_flow_action *actions,\n+\t\t\t       struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);\n+\treturn fops->action_list_handle_create(dev, conf, actions, error);\n+}\n+\n+static int\n+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_flow_action_list_handle *handle,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);\n+\treturn fops->action_list_handle_destroy(dev, handle, error);\n+}\n+\n+static struct rte_flow_action_list_handle *\n+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev,\n+\t\t\t\t\t  uint32_t queue_id,\n+\t\t\t\t\t  const struct\n+\t\t\t\t\t  rte_flow_op_attr *op_attr,\n+\t\t\t\t\t  const struct\n+\t\t\t\t\t  rte_flow_indir_action_conf *conf,\n+\t\t\t\t\t  const struct rte_flow_action *actions,\n+\t\t\t\t\t  void *user_data,\n+\t\t\t\t\t  struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops, async_action_list_handle_create, NULL);\n+\treturn fops->async_action_list_handle_create(dev, queue_id, op_attr,\n+\t\t\t\t\t\t     conf, actions, user_data,\n+\t\t\t\t\t\t     error);\n+}\n+\n+static int\n+mlx5_flow_async_action_list_handle_destroy\n+\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t const struct rte_flow_op_attr *op_attr,\n+\t struct rte_flow_action_list_handle *action_handle,\n+\t void *user_data, struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tMLX5_DRV_FOPS_OR_ERR(dev, fops,\n+\t\t\t     async_action_list_handle_destroy, ENOTSUP);\n+\treturn fops->async_action_list_handle_destroy(dev, queue_id, op_attr,\n+\t\t\t\t\t\t      action_handle, user_data,\n+\t\t\t\t\t\t      error);\n+}\n+\n /**\n  * Destroy all indirect actions (shared RSS).\n  *\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 903ff66d72..580db80fd4 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -65,7 +65,7 @@ enum mlx5_rte_flow_field_id {\n \t(((uint32_t)(uintptr_t)(handle)) & \\\n \t ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))\n \n-enum {\n+enum mlx5_indirect_type {\n \tMLX5_INDIRECT_ACTION_TYPE_RSS,\n \tMLX5_INDIRECT_ACTION_TYPE_AGE,\n \tMLX5_INDIRECT_ACTION_TYPE_COUNT,\n@@ -97,6 +97,28 @@ enum {\n #define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER\n #define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX\n \n+enum mlx5_indirect_list_type {\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,\n+};\n+\n+/*\n+ * Base type for indirect list type.\n+ * Actual indirect list type MUST override that type and put type spec data\n+ * after the `chain`.\n+ */\n+struct mlx5_indirect_list {\n+\t/* type field MUST be the first */\n+\tenum mlx5_indirect_list_type type;\n+\tLIST_ENTRY(mlx5_indirect_list) entry;\n+\t/* put type specific data after chain */\n+};\n+\n+static __rte_always_inline enum mlx5_indirect_list_type\n+mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)\n+{\n+\treturn obj->type;\n+}\n+\n /* Matches on selected register. */\n struct mlx5_rte_flow_item_tag {\n \tenum modify_reg id;\n@@ -1218,6 +1240,10 @@ struct rte_flow_hw {\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\n \n+struct mlx5dr_action;\n+typedef struct mlx5dr_action *\n+(*indirect_list_callback_t)(const struct rte_flow_action *);\n+\n /* rte flow action translate to DR action struct. */\n struct mlx5_action_construct_data {\n \tLIST_ENTRY(mlx5_action_construct_data) next;\n@@ -1266,6 +1292,9 @@ struct mlx5_action_construct_data {\n \t\tstruct {\n \t\t\tuint32_t id;\n \t\t} shared_meter;\n+\t\tstruct {\n+\t\t\tindirect_list_callback_t cb;\n+\t\t} indirect_list;\n \t};\n };\n \n@@ -1776,6 +1805,17 @@ typedef int (*mlx5_flow_action_query_update_t)\n \t\t\t const void *update, void *data,\n \t\t\t enum rte_flow_query_update_mode qu_mode,\n \t\t\t struct rte_flow_error *error);\n+typedef struct rte_flow_action_list_handle *\n+(*mlx5_flow_action_list_handle_create_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_indir_action_conf *conf,\n+\t\t\t const struct rte_flow_action *actions,\n+\t\t\t struct rte_flow_error *error);\n+typedef int\n+(*mlx5_flow_action_list_handle_destroy_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t struct rte_flow_action_list_handle *handle,\n+\t\t\t struct rte_flow_error *error);\n typedef int (*mlx5_flow_sync_domain_t)\n \t\t\t(struct rte_eth_dev *dev,\n \t\t\t uint32_t domains,\n@@ -1964,6 +2004,20 @@ typedef int (*mlx5_flow_async_action_handle_destroy_t)\n \t\t\t struct rte_flow_action_handle *handle,\n \t\t\t void *user_data,\n \t\t\t struct rte_flow_error *error);\n+typedef struct rte_flow_action_list_handle *\n+(*mlx5_flow_async_action_list_handle_create_t)\n+\t\t\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t const struct rte_flow_indir_action_conf *conf,\n+\t\t\t const struct rte_flow_action *actions,\n+\t\t\t void *user_data, struct rte_flow_error *error);\n+typedef int\n+(*mlx5_flow_async_action_list_handle_destroy_t)\n+\t\t\t(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\t const struct rte_flow_op_attr *op_attr,\n+\t\t\t struct rte_flow_action_list_handle *action_handle,\n+\t\t\t void *user_data, struct rte_flow_error *error);\n+\n \n struct mlx5_flow_driver_ops {\n \tmlx5_flow_validate_t validate;\n@@ -1999,6 +2053,8 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_action_update_t action_update;\n \tmlx5_flow_action_query_t action_query;\n \tmlx5_flow_action_query_update_t action_query_update;\n+\tmlx5_flow_action_list_handle_create_t action_list_handle_create;\n+\tmlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;\n \tmlx5_flow_sync_domain_t sync_domain;\n \tmlx5_flow_discover_priorities_t discover_priorities;\n \tmlx5_flow_item_create_t item_create;\n@@ -2025,6 +2081,10 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_async_action_handle_query_update_t async_action_query_update;\n \tmlx5_flow_async_action_handle_query_t async_action_query;\n \tmlx5_flow_async_action_handle_destroy_t async_action_destroy;\n+\tmlx5_flow_async_action_list_handle_create_t\n+\t\tasync_action_list_handle_create;\n+\tmlx5_flow_async_action_list_handle_destroy_t\n+\t\tasync_action_list_handle_destroy;\n };\n \n /* mlx5_flow.c */\n@@ -2755,4 +2815,11 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)\n #endif\n \treturn UINT32_MAX;\n }\n+void\n+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);\n+#ifdef HAVE_MLX5_HWS_SUPPORT\n+struct mlx5_mirror;\n+void\n+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);\n+#endif\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex b2215fb5cf..1c3d915be1 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -58,6 +58,24 @@\n #define MLX5_HW_VLAN_PUSH_VID_IDX 1\n #define MLX5_HW_VLAN_PUSH_PCP_IDX 2\n \n+#define MLX5_MIRROR_MAX_CLONES_NUM 3\n+#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4\n+\n+struct mlx5_mirror_clone {\n+\tenum rte_flow_action_type type;\n+\tvoid *action_ctx;\n+};\n+\n+struct mlx5_mirror {\n+\t/* type field MUST be the first */\n+\tenum mlx5_indirect_list_type type;\n+\tLIST_ENTRY(mlx5_indirect_list) entry;\n+\n+\tuint32_t clones_num;\n+\tstruct mlx5dr_action *mirror_action;\n+\tstruct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];\n+};\n+\n static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);\n static int flow_hw_translate_group(struct rte_eth_dev *dev,\n \t\t\t\t   const struct mlx5_flow_template_table_cfg *cfg,\n@@ -568,6 +586,22 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,\n \treturn 0;\n }\n \n+static __rte_always_inline int\n+flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,\n+\t\t\t\t      struct mlx5_hw_actions *acts,\n+\t\t\t\t      enum rte_flow_action_type type,\n+\t\t\t\t      uint16_t action_src, uint16_t action_dst,\n+\t\t\t\t      indirect_list_callback_t cb)\n+{\n+\tstruct mlx5_action_construct_data *act_data;\n+\n+\tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n+\tif (!act_data)\n+\t\treturn -1;\n+\tact_data->indirect_list.cb = cb;\n+\tLIST_INSERT_HEAD(&acts->act_list, act_data, next);\n+\treturn 0;\n+}\n /**\n  * Append dynamic encap action to the dynamic action list.\n  *\n@@ -1383,6 +1417,48 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+static struct mlx5dr_action *\n+flow_hw_mirror_action(const struct rte_flow_action *action)\n+{\n+\tstruct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;\n+\n+\treturn mirror->mirror_action;\n+}\n+\n+static int\n+table_template_translate_indirect_list(struct rte_eth_dev *dev,\n+\t\t\t\t       const struct rte_flow_action *action,\n+\t\t\t\t       const struct rte_flow_action *mask,\n+\t\t\t\t       struct mlx5_hw_actions *acts,\n+\t\t\t\t       uint16_t action_src,\n+\t\t\t\t       uint16_t action_dst)\n+{\n+\tint ret;\n+\tbool is_masked = action->conf && mask->conf;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tenum mlx5_indirect_list_type type;\n+\n+\tif (!action->conf)\n+\t\treturn -EINVAL;\n+\ttype = mlx5_get_indirect_list_type(action->conf);\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n+\t\tif (is_masked) {\n+\t\t\tacts->rule_acts[action_dst].action = flow_hw_mirror_action(action);\n+\t\t} else {\n+\t\t\tret = flow_hw_act_data_indirect_list_append\n+\t\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n+\t\t\t\t action_src, action_dst, flow_hw_mirror_action);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n /**\n  * Translate rte_flow actions to DR action.\n  *\n@@ -1419,7 +1495,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \tstruct rte_flow_action *actions = at->actions;\n \tstruct rte_flow_action *action_start = actions;\n \tstruct rte_flow_action *masks = at->masks;\n-\tenum mlx5dr_action_type refmt_type = 0;\n+\tenum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;\n \tconst struct rte_flow_action_raw_encap *raw_encap_data;\n \tconst struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;\n \tuint16_t reformat_src = 0;\n@@ -1433,7 +1509,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \tuint16_t action_pos;\n \tuint16_t jump_pos;\n \tuint32_t ct_idx;\n-\tint err;\n+\tint ret, err;\n \tuint32_t target_grp = 0;\n \tint table_type;\n \n@@ -1445,7 +1521,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \telse\n \t\ttype = MLX5DR_TABLE_TYPE_NIC_RX;\n \tfor (; !actions_end; actions++, masks++) {\n-\t\tswitch (actions->type) {\n+\t\tswitch ((int)actions->type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n+\t\t\taction_pos = at->actions_off[actions - at->actions];\n+\t\t\tif (!attr->group) {\n+\t\t\t\tDRV_LOG(ERR, \"Indirect action is not supported in root table.\");\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\tret = table_template_translate_indirect_list\n+\t\t\t\t(dev, actions, masks, acts,\n+\t\t\t\t actions - action_start,\n+\t\t\t\t action_pos);\n+\t\t\tif (ret)\n+\t\t\t\tgoto err;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\taction_pos = at->actions_off[actions - at->actions];\n \t\t\tif (!attr->group) {\n@@ -2301,7 +2390,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tMLX5_ASSERT(action->type ==\n \t\t\t\t    RTE_FLOW_ACTION_TYPE_INDIRECT ||\n \t\t\t\t    (int)action->type == act_data->type);\n-\t\tswitch (act_data->type) {\n+\t\tswitch ((int)act_data->type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n+\t\t\trule_acts[act_data->action_dst].action =\n+\t\t\t\tact_data->indirect_list.cb(action);\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tif (flow_hw_shared_action_construct\n \t\t\t\t\t(dev, queue, action, table, it_idx,\n@@ -4366,6 +4459,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,\n \t\tswitch (action->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tret = flow_hw_validate_action_indirect(dev, action,\n \t\t\t\t\t\t\t       mask,\n@@ -4607,6 +4702,28 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,\n \treturn 0;\n }\n \n+\n+static int\n+flow_hw_template_actions_list(struct rte_flow_actions_template *at,\n+\t\t\t      unsigned int action_src,\n+\t\t\t      enum mlx5dr_action_type *action_types,\n+\t\t\t      uint16_t *curr_off)\n+{\n+\tenum mlx5_indirect_list_type list_type;\n+\n+\tlist_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);\n+\tswitch (list_type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n+\t\taction_template_set_type(at, action_types, action_src, curr_off,\n+\t\t\t\t\t MLX5DR_ACTION_TYP_DEST_ARRAY);\n+\t\tbreak;\n+\tdefault:\n+\t\tDRV_LOG(ERR, \"Unsupported indirect list type\");\n+\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n /**\n  * Create DR action template based on a provided sequence of flow actions.\n  *\n@@ -4639,6 +4756,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)\n \t\tswitch (at->actions[i].type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n+\t\t\tret = flow_hw_template_actions_list(at, i, action_types,\n+\t\t\t\t\t\t\t    &curr_off);\n+\t\t\tif (ret)\n+\t\t\t\treturn NULL;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tret = flow_hw_dr_actions_template_handle_shared\n \t\t\t\t\t\t\t\t (&at->masks[i],\n@@ -5119,6 +5242,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \t\t * Need to restore the indirect action index from action conf here.\n \t\t */\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n+\t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:\n \t\t\tat->actions[i].conf = actions->conf;\n \t\t\tat->masks[i].conf = masks->conf;\n \t\t\tbreak;\n@@ -9354,6 +9478,483 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,\n \treturn flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);\n }\n \n+static void\n+mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,\n+\t\t\t  struct mlx5_mirror_clone *clone)\n+{\n+\tswitch (clone->type) {\n+\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\tmlx5_hrxq_release(dev,\n+\t\t\t\t  ((struct mlx5_hrxq *)(clone->action_ctx))->idx);\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n+\t\tflow_hw_jump_release(dev, clone->action_ctx);\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n+\tcase RTE_FLOW_ACTION_TYPE_RAW_DECAP:\n+\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n+\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n+void\n+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)\n+{\n+\tuint32_t i;\n+\n+\tif (mirror->entry.le_prev)\n+\t\tLIST_REMOVE(mirror, entry);\n+\tfor (i = 0; i < mirror->clones_num; i++)\n+\t\tmlx5_mirror_destroy_clone(dev, &mirror->clone[i]);\n+\tif (mirror->mirror_action)\n+\t\tmlx5dr_action_destroy(mirror->mirror_action);\n+\tif (release)\n+\t\tmlx5_free(mirror);\n+}\n+\n+static inline enum mlx5dr_table_type\n+get_mlx5dr_table_type(const struct rte_flow_attr *attr)\n+{\n+\tenum mlx5dr_table_type type;\n+\n+\tif (attr->transfer)\n+\t\ttype = MLX5DR_TABLE_TYPE_FDB;\n+\telse if (attr->egress)\n+\t\ttype = MLX5DR_TABLE_TYPE_NIC_TX;\n+\telse\n+\t\ttype = MLX5DR_TABLE_TYPE_NIC_RX;\n+\treturn type;\n+}\n+\n+static __rte_always_inline bool\n+mlx5_mirror_terminal_action(const struct rte_flow_action *action)\n+{\n+\tswitch (action->type) {\n+\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n+\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\t\treturn true;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn false;\n+}\n+\n+static bool\n+mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,\n+\tconst struct rte_flow_action *action)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tswitch (action->type) {\n+\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\tif (priv->sh->esw_mode)\n+\t\t\treturn false;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n+\tcase RTE_FLOW_ACTION_TYPE_RAW_DECAP:\n+\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n+\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n+\t\tif (!priv->sh->esw_mode)\n+\t\t\treturn false;\n+\t\tif (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&\n+\t\t    action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)\n+\t\t\treturn false;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn false;\n+\t}\n+\treturn true;\n+}\n+\n+/**\n+ * Valid mirror actions list includes one or two SAMPLE actions\n+ * followed by JUMP.\n+ *\n+ * @return\n+ * Number of mirrors *action* list was valid.\n+ * -EINVAL otherwise.\n+ */\n+static int\n+mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,\n+\t\t\t\t     const struct rte_flow_action *actions)\n+{\n+\tif (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {\n+\t\tint i = 1;\n+\t\tbool valid;\n+\t\tconst struct rte_flow_action_sample *sample = actions[0].conf;\n+\t\tvalid = mlx5_mirror_validate_sample_action(dev, sample->actions);\n+\t\tif (!valid)\n+\t\t\treturn -EINVAL;\n+\t\tif (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {\n+\t\t\ti = 2;\n+\t\t\tsample = actions[1].conf;\n+\t\t\tvalid = mlx5_mirror_validate_sample_action(dev, sample->actions);\n+\t\t\tif (!valid)\n+\t\t\t\treturn -EINVAL;\n+\t\t}\n+\t\treturn mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;\n+\t}\n+\treturn -EINVAL;\n+}\n+\n+static int\n+mirror_format_tir(struct rte_eth_dev *dev,\n+\t\t  struct mlx5_mirror_clone *clone,\n+\t\t  const struct mlx5_flow_template_table_cfg *table_cfg,\n+\t\t  const struct rte_flow_action *action,\n+\t\t  struct mlx5dr_action_dest_attr *dest_attr,\n+\t\t  struct rte_flow_error *error)\n+{\n+\tuint32_t hws_flags;\n+\tenum mlx5dr_table_type table_type;\n+\tstruct mlx5_hrxq *tir_ctx;\n+\n+\ttable_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);\n+\thws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];\n+\ttir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);\n+\tif (!tir_ctx)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t  action, \"failed to create QUEUE action for mirror clone\");\n+\tdest_attr->dest = tir_ctx->action;\n+\tclone->action_ctx = tir_ctx;\n+\treturn 0;\n+}\n+\n+static int\n+mirror_format_jump(struct rte_eth_dev *dev,\n+\t\t   struct mlx5_mirror_clone *clone,\n+\t\t   const struct mlx5_flow_template_table_cfg *table_cfg,\n+\t\t   const struct rte_flow_action *action,\n+\t\t   struct mlx5dr_action_dest_attr *dest_attr,\n+\t\t   struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_action_jump *jump_conf = action->conf;\n+\tstruct mlx5_hw_jump_action *jump = flow_hw_jump_action_register\n+\t\t\t\t\t\t(dev, table_cfg,\n+\t\t\t\t\t\t jump_conf->group, error);\n+\n+\tif (!jump)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t  action, \"failed to create JUMP action for mirror clone\");\n+\tdest_attr->dest = jump->hws_action;\n+\tclone->action_ctx = jump;\n+\treturn 0;\n+}\n+\n+static int\n+mirror_format_port(struct rte_eth_dev *dev,\n+\t\t   const struct rte_flow_action *action,\n+\t\t   struct mlx5dr_action_dest_attr *dest_attr,\n+\t\t   struct rte_flow_error __rte_unused *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_ethdev *port_action = action->conf;\n+\n+\tdest_attr->dest = priv->hw_vport[port_action->port_id];\n+\treturn 0;\n+}\n+\n+#define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \\\n+(((const struct encap_type *)(ptr))->definition)\n+\n+static int\n+hw_mirror_clone_reformat(const struct rte_flow_action *actions,\n+\t\t\t struct mlx5dr_action_dest_attr *dest_attr,\n+\t\t\t enum mlx5dr_action_type *action_type, bool decap)\n+{\n+\tint ret;\n+\tuint8_t encap_buf[MLX5_ENCAP_MAX_LEN];\n+\tconst struct rte_flow_item *encap_item = NULL;\n+\tconst struct rte_flow_action_raw_encap *encap_conf = NULL;\n+\ttypeof(dest_attr->reformat) *reformat = &dest_attr->reformat;\n+\n+\tswitch (actions[0].type) {\n+\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n+\t\tencap_conf = actions[0].conf;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n+\t\tencap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,\n+\t\t\t\t\t\t   actions);\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n+\t\tencap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,\n+\t\t\t\t\t\t   actions);\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\t*action_type = decap ?\n+\t\t       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :\n+\t\t       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;\n+\tif (encap_item) {\n+\t\tret = flow_dv_convert_encap_data(encap_item, encap_buf,\n+\t\t\t\t\t\t &reformat->reformat_data_sz, NULL);\n+\t\tif (ret)\n+\t\t\treturn -EINVAL;\n+\t\treformat->reformat_data = (void *)(uintptr_t)encap_buf;\n+\t} else {\n+\t\treformat->reformat_data = (void *)(uintptr_t)encap_conf->data;\n+\t\treformat->reformat_data_sz = encap_conf->size;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+hw_mirror_format_clone(struct rte_eth_dev *dev,\n+\t\t\tstruct mlx5_mirror_clone *clone,\n+\t\t\tconst struct mlx5_flow_template_table_cfg *table_cfg,\n+\t\t\tconst struct rte_flow_action *actions,\n+\t\t\tstruct mlx5dr_action_dest_attr *dest_attr,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tint ret;\n+\tuint32_t i;\n+\tbool decap_seen = false;\n+\n+\tfor (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {\n+\t\tdest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];\n+\t\tswitch (actions[i].type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t\tret = mirror_format_tir(dev, clone, table_cfg,\n+\t\t\t\t\t\t&actions[i], dest_attr, error);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\t\t\tret = mirror_format_port(dev, &actions[i],\n+\t\t\t\t\t\t dest_attr, error);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n+\t\t\tret = mirror_format_jump(dev, clone, table_cfg,\n+\t\t\t\t\t\t &actions[i], dest_attr, error);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RAW_DECAP:\n+\t\t\tdecap_seen = true;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n+\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n+\t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n+\t\t\tret = hw_mirror_clone_reformat(&actions[i], dest_attr,\n+\t\t\t\t\t\t       &dest_attr->action_type[i],\n+\t\t\t\t\t\t       decap_seen);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t\t  &actions[i],\n+\t\t\t\t\t\t\t  \"failed to create reformat action\");\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  &actions[i], \"unsupported sample action\");\n+\t\t}\n+\t\tclone->type = actions->type;\n+\t}\n+\tdest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;\n+\treturn 0;\n+}\n+\n+static struct rte_flow_action_list_handle *\n+mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,\n+\t\t\t     const struct mlx5_flow_template_table_cfg *table_cfg,\n+\t\t\t     const struct rte_flow_action *actions,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tuint32_t hws_flags;\n+\tint ret = 0, i, clones_num;\n+\tstruct mlx5_mirror *mirror;\n+\tenum mlx5dr_table_type table_type;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];\n+\tenum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]\n+\t\t\t\t\t\t  [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];\n+\n+\tmemset(mirror_attr, 0, sizeof(mirror_attr));\n+\tmemset(array_action_types, 0, sizeof(array_action_types));\n+\ttable_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);\n+\thws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];\n+\tclones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);\n+\tif (clones_num < 0) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   actions, \"Invalid mirror list format\");\n+\t\treturn NULL;\n+\t}\n+\tmirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),\n+\t\t\t     0, SOCKET_ID_ANY);\n+\tif (!mirror) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   actions, \"Failed to allocate mirror context\");\n+\t\treturn NULL;\n+\t}\n+\tmirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;\n+\tmirror->clones_num = clones_num;\n+\tfor (i = 0; i < clones_num; i++) {\n+\t\tconst struct rte_flow_action *clone_actions;\n+\n+\t\tmirror_attr[i].action_type = array_action_types[i];\n+\t\tif (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {\n+\t\t\tconst struct rte_flow_action_sample *sample = actions[i].conf;\n+\n+\t\t\tclone_actions = sample->actions;\n+\t\t} else {\n+\t\t\tclone_actions = &actions[i];\n+\t\t}\n+\t\tret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,\n+\t\t\t\t\t     clone_actions, &mirror_attr[i],\n+\t\t\t\t\t     error);\n+\n+\t\tif (ret)\n+\t\t\tgoto error;\n+\t}\n+\thws_flags |= MLX5DR_ACTION_FLAG_SHARED;\n+\tmirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,\n+\t\t\t\t\t\t\t\tclones_num,\n+\t\t\t\t\t\t\t\tmirror_attr,\n+\t\t\t\t\t\t\t\thws_flags);\n+\tif (!mirror->mirror_action) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   actions, \"Failed to create HWS mirror action\");\n+\t\tgoto error;\n+\t}\n+\n+\tLIST_INSERT_HEAD(&priv->indirect_list_head,\n+\t\t\t (struct mlx5_indirect_list *)mirror, entry);\n+\treturn (struct rte_flow_action_list_handle *)mirror;\n+\n+error:\n+\tmlx5_hw_mirror_destroy(dev, mirror, true);\n+\treturn NULL;\n+}\n+\n+static struct rte_flow_action_list_handle *\n+flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t\tconst struct rte_flow_op_attr *attr,\n+\t\t\t\t\tconst struct rte_flow_indir_action_conf *conf,\n+\t\t\t\t\tconst struct rte_flow_action *actions,\n+\t\t\t\t\tvoid *user_data,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_hw_q_job *job = NULL;\n+\tbool push = flow_hw_action_push(attr);\n+\tstruct rte_flow_action_list_handle *handle;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct mlx5_flow_template_table_cfg table_cfg = {\n+\t\t.external = true,\n+\t\t.attr = {\n+\t\t\t.flow_attr = {\n+\t\t\t\t.ingress = conf->ingress,\n+\t\t\t\t.egress = conf->egress,\n+\t\t\t\t.transfer = conf->transfer\n+\t\t\t}\n+\t\t}\n+\t};\n+\n+\tif (!actions) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   NULL, \"No action list\");\n+\t\treturn NULL;\n+\t}\n+\tif (attr) {\n+\t\tjob = flow_hw_action_job_init(priv, queue, NULL, user_data,\n+\t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,\n+\t\t\t\t\t      error);\n+\t\tif (!job)\n+\t\t\treturn NULL;\n+\t}\n+\tswitch (actions[0].type) {\n+\tcase RTE_FLOW_ACTION_TYPE_SAMPLE:\n+\t\thandle = mlx5_hw_mirror_handle_create(dev, &table_cfg,\n+\t\t\t\t\t\t      actions, error);\n+\t\tbreak;\n+\tdefault:\n+\t\thandle = NULL;\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   actions, \"Invalid list\");\n+\t}\n+\tif (job) {\n+\t\tjob->action = handle;\n+\t\tflow_hw_action_finalize(dev, queue, job, push, false,\n+\t\t\t\t\thandle != NULL);\n+\t}\n+\treturn handle;\n+}\n+\n+static struct rte_flow_action_list_handle *\n+flow_hw_action_list_handle_create(struct rte_eth_dev *dev,\n+\t\t\t\t  const struct rte_flow_indir_action_conf *conf,\n+\t\t\t\t  const struct rte_flow_action *actions,\n+\t\t\t\t  struct rte_flow_error *error)\n+{\n+\treturn flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,\n+\t\t\t\t\t\t       NULL, conf, actions,\n+\t\t\t\t\t\t       NULL, error);\n+}\n+\n+static int\n+flow_hw_async_action_list_handle_destroy\n+\t\t\t(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t struct rte_flow_action_list_handle *handle,\n+\t\t\t void *user_data, struct rte_flow_error *error)\n+{\n+\tint ret = 0;\n+\tstruct mlx5_hw_q_job *job = NULL;\n+\tbool push = flow_hw_action_push(attr);\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tenum mlx5_indirect_list_type type =\n+\t\tmlx5_get_indirect_list_type((void *)handle);\n+\n+\tif (attr) {\n+\t\tjob = flow_hw_action_job_init(priv, queue, NULL, user_data,\n+\t\t\t\t\t      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,\n+\t\t\t\t\t      error);\n+\t\tif (!job)\n+\t\t\treturn rte_errno;\n+\t}\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n+\t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);\n+\t\tbreak;\n+\tdefault:\n+\t\thandle = NULL;\n+\t\tret = rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t  NULL, \"Invalid indirect list handle\");\n+\t}\n+\tif (job) {\n+\t\tjob->action = handle;\n+\t\tflow_hw_action_finalize(dev, queue, job, push, false,\n+\t\t\t\t       handle != NULL);\n+\t}\n+\tmlx5_free(handle);\n+\treturn ret;\n+}\n+\n+static int\n+flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,\n+\t\t\t\t   struct rte_flow_action_list_handle *handle,\n+\t\t\t\t   struct rte_flow_error *error)\n+{\n+\treturn flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,\n+\t\t\t\t\t\t\tNULL, handle, NULL,\n+\t\t\t\t\t\t\terror);\n+}\n+\n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.info_get = flow_hw_info_get,\n \t.configure = flow_hw_configure,\n@@ -9382,6 +9983,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.action_update = flow_hw_action_update,\n \t.action_query = flow_hw_action_query,\n \t.action_query_update = flow_hw_action_query_update,\n+\t.action_list_handle_create = flow_hw_action_list_handle_create,\n+\t.action_list_handle_destroy = flow_hw_action_list_handle_destroy,\n+\t.async_action_list_handle_create =\n+\t\tflow_hw_async_action_list_handle_create,\n+\t.async_action_list_handle_destroy =\n+\t\tflow_hw_async_action_list_handle_destroy,\n \t.query = flow_hw_query,\n \t.get_aged_flows = flow_hw_get_aged_flows,\n \t.get_q_aged_flows = flow_hw_get_q_aged_flows,\n",
    "prefixes": [
        "v5",
        "08/10"
    ]
}