get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/129197/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 129197,
    "url": "http://patchwork.dpdk.org/api/patches/129197/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230702045758.23244-3-igozlan@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230702045758.23244-3-igozlan@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230702045758.23244-3-igozlan@nvidia.com",
    "date": "2023-07-02T04:57:56",
    "name": "[v2,3/5] net/mlx5: add indirect encap decap support",
    "commit_ref": null,
    "pull_url": null,
    "state": "not-applicable",
    "archived": true,
    "hash": "f36f4e12bc5020c32b25cb01ddb611a1aa17474b",
    "submitter": {
        "id": 3118,
        "url": "http://patchwork.dpdk.org/api/people/3118/?format=api",
        "name": "Itamar Gozlan",
        "email": "igozlan@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230702045758.23244-3-igozlan@nvidia.com/mbox/",
    "series": [
        {
            "id": 28775,
            "url": "http://patchwork.dpdk.org/api/series/28775/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=28775",
            "date": "2023-07-02T04:57:54",
            "name": "[v2,1/5] net/mlx5: support indirect list METER_MARK action",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/28775/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/129197/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/129197/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7AFF242DA8;\n\tMon,  3 Jul 2023 11:21:25 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id AE15642D13;\n\tMon,  3 Jul 2023 11:21:16 +0200 (CEST)",
            "from NAM04-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam04on2079.outbound.protection.outlook.com [40.107.100.79])\n by mails.dpdk.org (Postfix) with ESMTP id 5D40D40689\n for <dev@dpdk.org>; Sun,  2 Jul 2023 06:58:30 +0200 (CEST)",
            "from DM6PR04CA0013.namprd04.prod.outlook.com (2603:10b6:5:334::18)\n by PH8PR12MB7205.namprd12.prod.outlook.com (2603:10b6:510:227::18) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6544.24; Sun, 2 Jul\n 2023 04:58:26 +0000",
            "from DM6NAM11FT084.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:334:cafe::d0) by DM6PR04CA0013.outlook.office365.com\n (2603:10b6:5:334::18) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6544.26 via Frontend\n Transport; Sun, 2 Jul 2023 04:58:26 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n DM6NAM11FT084.mail.protection.outlook.com (10.13.172.132) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6521.43 via Frontend Transport; Sun, 2 Jul 2023 04:58:25 +0000",
            "from rnnvmail204.nvidia.com (10.129.68.6) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.5; Sat, 1 Jul 2023\n 21:58:13 -0700",
            "from rnnvmail205.nvidia.com (10.129.68.10) by rnnvmail204.nvidia.com\n (10.129.68.6) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.37; Sat, 1 Jul 2023\n 21:58:12 -0700",
            "from nvidia.com (10.127.8.12) by mail.nvidia.com (10.129.68.10) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.37 via Frontend\n Transport; Sat, 1 Jul 2023 21:58:10 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=AXIknOzX4crhCmtR3gOvQmSv7dhcNCxhn6oNa3BssAz3uVzYkGTKFyq1vClpzLnO7+dfYkIMF8FbXMVL2EOuUF397QpQFu66gHngoW7qrmWX9xw0W07DpOpdkhXgoTeMWZ1LlmPmOaQKttUll4raUtytu8dedqPAClN7q8FOPTMogVkGli6rTsZTH5MxvnkDpAsEiEDyfzRD6tKOuTaceHEKJxcNE9jtH8CKL5m01ZicjEmZCPltcmxs1EeEzOGYPugU2ycdbVbLQcN8FOChlgVIvbWa3g4aZlkgViaQDZPQoe4mBwR6ssyfSiKTuQAyI35RQBLr7DAp3jk3WQc3sw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=opmdIgxMN2ETTXP26wL4Hy7CHMKjIjHhdpDSRjGn/IE=;\n b=ADIlHDtGunEMBLExWEsfZUeBAmlQqiim5+Ye9/zf4Zv4KLGmZlGNbFOUKgSD3pEaI66FUiYU4skO9QeqJF9rep6DiF2Wtr8TVhAQ9BUMVblYOChKInilpyyGuiqQvmecAvR831yP5ihJd11XrG90Xi0hZrDHepiOl5uWJNZ0QNxDBaAaORe9b3AUcNj2Xj0VjjvAJE/bgZ+WODTaUPhvnD8s1eeuRKUcImZnrI3Kbn+E6OdNGXcjLmueDqEaFOIGgBICPhlreDbGkLL7OsWt6LBgQuxXpWb/1Mnec1U5IfZ6/mfM7DftBXva/IuN/S888KAu/4sMC80OtE4pYImg2A==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=opmdIgxMN2ETTXP26wL4Hy7CHMKjIjHhdpDSRjGn/IE=;\n b=naxqTCtyEHLADob46S83M+u9sWuCyQZch+SeEC0fbU3634bU1yZNU4nFqM6jC3qryHoCuu4ceseU+Ipr0csGwAlBjGdsUN1UFC2dXCSrMG8f3L0Hd1SYI9BqC6AhcIHveUbQWwUwxdZQSIKoAkPK7alR32jirSfbkLqN+y4vQ0IoHNmb/8AHpLCsIb0KrRvnhvgly4MZjK+gC/PEvlraNAc0JiqbzuelUv0K9rAPklNIIe+Vh3sBPBMZLxLF7+hDfrbOsulpGbIgbLT4+KM9sus9X4f94qvHBc5Mt6eLxJYNH7YnSLn+P5idelFcKPxBADKGY1g/8Z2vptBmnXiUPw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Itamar Gozlan <igozlan@nvidia.com>",
        "To": "<valex@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <thomas@monjalon.net>, <suanmingm@nvidia.com>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>, Rongwei Liu <rongweil@nvidia.com>",
        "Subject": "[v2 3/5] net/mlx5: add indirect encap decap support",
        "Date": "Sun, 2 Jul 2023 07:57:56 +0300",
        "Message-ID": "<20230702045758.23244-3-igozlan@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20230702045758.23244-1-igozlan@nvidia.com>",
        "References": "<20230629072125.20369-5-igozlan@nvidia.com>\n <20230702045758.23244-1-igozlan@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"UTF-8\"",
        "Content-Transfer-Encoding": "8bit",
        "X-NV-OnPremToCloud": "ExternallySecured",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT084:EE_|PH8PR12MB7205:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "aab2afd5-2983-4590-90a2-08db7ab8f836",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n Ww4ldrNoh/EVPng1wgzaJ1FRFv5L6AmrAF1h7JtBeqyat23mO/H4bd8Gm/l8b/JbuJ6I7PhvqWNViR1pw7y8G2AVPfVpFxd8WhYNiW5ka7UsrUwZ2ER664gUHrfZj7KZfqMnJmafE6h3ScVLLK2rwitW247EhU/woWjChU0PGhvMeORk0+6IIAZAUrwZtC+C/SVaYoYQ05DjERXeKsjT8jmHCH/D5vBhVWSIhYtJ8Lm1UOz/nJfSfXN6MhT5J9XAJw/YzJAJV5h7af9OpbQT6lRjbzmsLRlCLaFhBAvI3aTZhNwoG7AirzVg163c7qf6x2bA+U6S/G+ActE1xwJ3X4MuQKjej2slBEytp0/6KABeBylItilJwr8Cp8A+xGhYHNcw+KmS9pDy0h+v2T80nBZjWpCd4G6AUITwgIIXTYdm7XD0UjRJ/NX3uph/Vt0dQRbgacgUvCEwwzGrI5uQIUG5ePgH4OqEIKxW+4oKkI70HvzGHV8GK8jCms888hJqLWJdv+OaXAjnqqkXHgqM+K6HHjvfsdjDJOjEPDozMOZk1XfC/KgCjA2FdA+js9+w+7rJ88twHxsWZfmOcMknrQSwhXM7P35+0MeDSKJQDxhJHcwP97IshFp6XsHj8ed2vymlBw3bAn2nkGPnODXdHTORVcm0jmmSTSeA2ZE4Axcpksa1NLddOgaCMLx0MOMWv3s1aOxTOS4oWZ1VOJUQsuHtfpTdZaPvxOomIfZAto9zPwSH0o4oSPhASrShg0bU",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230028)(4636009)(136003)(39860400002)(376002)(346002)(396003)(451199021)(46966006)(40470700004)(36840700001)(1076003)(82740400003)(40480700001)(356005)(7636003)(40460700003)(316002)(55016003)(70586007)(70206006)(6636002)(4326008)(107886003)(36860700001)(47076005)(2616005)(83380400001)(82310400005)(426003)(336012)(186003)(6286002)(26005)(478600001)(110136005)(54906003)(2906002)(30864003)(8676002)(8936002)(36756003)(5660300002)(7696005)(86362001)(6666004)(41300700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "02 Jul 2023 04:58:25.8391 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n aab2afd5-2983-4590-90a2-08db7ab8f836",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT084.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH8PR12MB7205",
        "X-Mailman-Approved-At": "Mon, 03 Jul 2023 11:21:13 +0200",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Rongwei Liu <rongweil@nvidia.com>\n\nSupport the raw_encap/decap combinations in the indirect action\nlist, and translates to 4 types of underlayer tunnel operations:\n1. Layer 2 encapsulation like VxLAN.\n2. Layer 2 decapsulation like VxLAN.\n3. Layer 3 encapsulation like GRE.\n4. Layer 3 decapsulation like GRE.\n\nEach indirect action list has a unique handle ID and stands for\ndifferent tunnel operations. The operation is shared globally with\nfixed patterns. It means there is no configuration associated with\neach handle ID and conf pointer should be NULL always no matter in\nthe action template or flow rules.\n\nIf the handle ID mask in the action template is NULL, each flow rule\ncan take its own indirect handle, otherwise, the ID in action template\nis used for all rules.\nThe handle ID used in the flow rules must be the same type as the one\nin the action template.\n\nTestpmd cli example:\n\nflow indirect_action 0 create action_id 10 transfer list actions\nraw_decap index 1 / raw_encap index 2 / end \n\nflow pattern_template 0 create transfer pattern_template_id 1 template\neth / ipv4 / udp / end\n\nflow actions_template 0 create transfer actions_template_id 1 template\nindirect_list handle 10 / jump / end mask indirect_list / jump / end\n\nflow template_table 0 create table_id 1 group 1 priority 0 transfer\nrules_number 64 pattern_template 1 actions_template 1\n\nflow queue 0 create 0 template_table 1 pattern_template 0\nactions_template 0 postpone no pattern eth / ipv4 / udp / end actions\nindirect_list handle 11 / jump group 10 / end \n\nSigned-off-by: Rongwei Liu <rongweil@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c    |   5 +\n drivers/net/mlx5/mlx5_flow.h    |  16 ++\n drivers/net/mlx5/mlx5_flow_hw.c | 323 ++++++++++++++++++++++++++++++++\n 3 files changed, 344 insertions(+)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex fb7b82fa26..45f2210ae7 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -54,6 +54,7 @@ void\n mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_flow_error error;\n \n \twhile (!LIST_EMPTY(&priv->indirect_list_head)) {\n \t\tstruct mlx5_indirect_list *e =\n@@ -68,6 +69,10 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)\n \t\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n \t\t\tmlx5_destroy_legacy_indirect(dev, e);\n \t\t\tbreak;\n+\t\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:\n+\t\t\tmlx5_reformat_action_destroy(dev,\n+\t\t\t\t(struct rte_flow_action_list_handle *)e, &error);\n+\t\t\tbreak;\n #endif\n \t\tdefault:\n \t\t\tDRV_LOG(ERR, \"invalid indirect list type\");\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 46bfd4d8a7..e273bd958d 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -116,6 +116,7 @@ enum mlx5_indirect_list_type {\n \tMLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,\n \tMLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,\n \tMLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,\n+\tMLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3,\n };\n \n /**\n@@ -1433,6 +1434,8 @@ struct mlx5_hw_jump_action {\n \n /* Encap decap action struct. */\n struct mlx5_hw_encap_decap_action {\n+\tstruct mlx5_indirect_list indirect;\n+\tenum mlx5dr_action_type action_type;\n \tstruct mlx5dr_action *action; /* Action object. */\n \t/* Is header_reformat action shared across flows in table. */\n \tbool shared;\n@@ -2596,6 +2599,16 @@ flow_hw_validate_action_ipsec(struct rte_eth_dev *dev,\n \t\t\t      uint64_t action_flags,\n \t\t\t      struct rte_flow_error *error);\n \n+struct mlx5_hw_encap_decap_action*\n+mlx5_reformat_action_create(struct rte_eth_dev *dev,\n+\t\t\t    const struct rte_flow_indir_action_conf *conf,\n+\t\t\t    const struct rte_flow_action *encap_action,\n+\t\t\t    const struct rte_flow_action *decap_action,\n+\t\t\t    struct rte_flow_error *error);\n+int mlx5_reformat_action_destroy(struct rte_eth_dev *dev,\n+\t\t\t\t struct rte_flow_action_list_handle *handle,\n+\t\t\t\t struct rte_flow_error *error);\n+\n int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,\n \t\t\t\t    const struct rte_flow_attr *attr,\n \t\t\t\t    struct rte_flow_error *error);\n@@ -3041,5 +3054,8 @@ mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);\n void\n mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,\n \t\t\t     struct mlx5_indirect_list *ptr);\n+void\n+mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,\n+\t\t\t    struct mlx5_indirect_list *reformat);\n #endif\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 7b4661ad4f..5e5ebbe620 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -1472,6 +1472,49 @@ hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,\n \treturn ret;\n }\n \n+static int\n+flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t__rte_unused const struct mlx5_action_construct_data *data,\n+\t\t\tconst struct rte_flow_action *action,\n+\t\t\tstruct mlx5dr_rule_action *dr_rule)\n+{\n+\tconst struct rte_flow_action_indirect_list *indlst_conf = action->conf;\n+\n+\tdr_rule->action = ((struct mlx5_hw_encap_decap_action *)\n+\t\t\t   (indlst_conf->handle))->action;\n+\tif (!dr_rule->action)\n+\t\treturn -EINVAL;\n+\treturn 0;\n+}\n+\n+/**\n+ * Template conf must not be masked. If handle is masked, use the one in template,\n+ * otherwise update per flow rule.\n+ */\n+static int\n+hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,\n+\t\t\t\t\t   const struct rte_flow_action *action,\n+\t\t\t\t\t   const struct rte_flow_action *mask,\n+\t\t\t\t\t   struct mlx5_hw_actions *acts,\n+\t\t\t\t\t   uint16_t action_src, uint16_t action_dst)\n+{\n+\tint ret = -1;\n+\tconst struct rte_flow_action_indirect_list *mask_conf = mask->conf;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tif (mask_conf && mask_conf->handle && !mask_conf->conf)\n+\t\t/**\n+\t\t * If handle was masked, assign fixed DR action.\n+\t\t */\n+\t\tret = flow_hw_reformat_action(dev, NULL, action,\n+\t\t\t\t\t      &acts->rule_acts[action_dst]);\n+\telse if (mask_conf && !mask_conf->handle && !mask_conf->conf)\n+\t\tret = flow_hw_act_data_indirect_list_append\n+\t\t\t(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,\n+\t\t\t action_src, action_dst, flow_hw_reformat_action);\n+\treturn ret;\n+}\n+\n static int\n flow_dr_set_meter(struct mlx5_priv *priv,\n \t\t  struct mlx5dr_rule_action *dr_rule,\n@@ -1628,6 +1671,13 @@ table_template_translate_indirect_list(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\t       acts, action_src,\n \t\t\t\t\t\t\t       action_dst);\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:\n+\t\tif (list_conf->conf)\n+\t\t\treturn -EINVAL;\n+\t\tret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,\n+\t\t\t\t\t\t\t\t acts, action_src,\n+\t\t\t\t\t\t\t\t action_dst);\n+\t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n \t}\n@@ -4966,6 +5016,7 @@ flow_hw_template_actions_list(struct rte_flow_actions_template *at,\n \t\tstruct mlx5_indlst_legacy *legacy;\n \t\tstruct rte_flow_action_list_handle *handle;\n \t} indlst_obj = { .handle = indlst_conf->handle };\n+\tenum mlx5dr_action_type type;\n \n \tswitch (list_type) {\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:\n@@ -4979,6 +5030,11 @@ flow_hw_template_actions_list(struct rte_flow_actions_template *at,\n \t\taction_template_set_type(at, action_types, action_src, curr_off,\n \t\t\t\t\t MLX5DR_ACTION_TYP_DEST_ARRAY);\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:\n+\t\ttype = ((struct mlx5_hw_encap_decap_action *)\n+\t\t\t(indlst_conf->handle))->action_type;\n+\t\taction_template_set_type(at, action_types, action_src, curr_off, type);\n+\t\tbreak;\n \tdefault:\n \t\tDRV_LOG(ERR, \"Unsupported indirect list type\");\n \t\treturn -EINVAL;\n@@ -10055,12 +10111,79 @@ flow_hw_inlist_type_get(const struct rte_flow_action *actions)\n \t\treturn actions[1].type == RTE_FLOW_ACTION_TYPE_END ?\n \t\t       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :\n \t\t       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;\n+\tcase RTE_FLOW_ACTION_TYPE_RAW_DECAP:\n+\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n+\t\treturn MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;\n \tdefault:\n \t\tbreak;\n \t}\n \treturn MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;\n }\n \n+static struct rte_flow_action_list_handle*\n+mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,\n+\t\t\t\t  const struct mlx5_flow_template_table_cfg *table_cfg,\n+\t\t\t\t  const struct rte_flow_action *actions,\n+\t\t\t\t  struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;\n+\tconst struct rte_flow_action *encap = NULL;\n+\tconst struct rte_flow_action *decap = NULL;\n+\tstruct rte_flow_indir_action_conf indirect_conf = {\n+\t\t.ingress = flow_attr->ingress,\n+\t\t.egress = flow_attr->egress,\n+\t\t.transfer = flow_attr->transfer,\n+\t};\n+\tstruct mlx5_hw_encap_decap_action *handle;\n+\tuint64_t action_flags = 0;\n+\n+\t/*\n+\t * Allow\n+\t * 1. raw_decap / raw_encap / end\n+\t * 2. raw_encap / end\n+\t * 3. raw_decap / end\n+\t */\n+\twhile (actions->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\tif (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {\n+\t\t\tif (action_flags) {\n+\t\t\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t   actions, \"Invalid indirect action list sequence\");\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n+\t\t\tdecap = actions;\n+\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {\n+\t\t\tif (action_flags & MLX5_FLOW_ACTION_ENCAP) {\n+\t\t\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t   actions, \"Invalid indirect action list sequence\");\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n+\t\t\tencap = actions;\n+\t\t} else {\n+\t\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   actions, \"Invalid indirect action type in list\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tactions++;\n+\t}\n+\tif (!decap && !encap) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   actions, \"Invalid indirect action combinations\");\n+\t\treturn NULL;\n+\t}\n+\thandle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);\n+\tif (!handle) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   actions, \"Failed to create HWS decap_encap action\");\n+\t\treturn NULL;\n+\t}\n+\thandle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;\n+\tLIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);\n+\treturn (struct rte_flow_action_list_handle *)handle;\n+}\n+\n static struct rte_flow_action_list_handle *\n flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t\tconst struct rte_flow_op_attr *attr,\n@@ -10112,6 +10235,10 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\thandle = mlx5_hw_mirror_handle_create(dev, &table_cfg,\n \t\t\t\t\t\t      actions, error);\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:\n+\t\thandle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,\n+\t\t\t\t\t\t\t   actions, error);\n+\t\tbreak;\n \tdefault:\n \t\thandle = NULL;\n \t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n@@ -10171,6 +10298,11 @@ flow_hw_async_action_list_handle_destroy\n \tcase MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:\n \t\tmlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:\n+\t\tLIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,\n+\t\t\t    entry);\n+\t\tmlx5_reformat_action_destroy(dev, handle, error);\n+\t\tbreak;\n \tdefault:\n \t\tret = rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n@@ -11468,4 +11600,195 @@ mlx5_flow_hw_put_dr_action(struct rte_eth_dev *dev,\n \t}\n }\n \n+static __rte_always_inline uint32_t\n+mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)\n+{\n+\tuint32_t tbl_type;\n+\n+\tif (domain->transfer)\n+\t\ttbl_type = MLX5DR_ACTION_FLAG_HWS_FDB;\n+\telse if (domain->egress)\n+\t\ttbl_type = MLX5DR_ACTION_FLAG_HWS_TX;\n+\telse if (domain->ingress)\n+\t\ttbl_type = MLX5DR_ACTION_FLAG_HWS_RX;\n+\telse\n+\t\ttbl_type = UINT32_MAX;\n+\treturn tbl_type;\n+}\n+\n+static struct mlx5_hw_encap_decap_action *\n+__mlx5_reformat_create(struct rte_eth_dev *dev,\n+\t\t       const struct rte_flow_action_raw_encap *encap_conf,\n+\t\t       const struct rte_flow_indir_action_conf *domain,\n+\t\t       enum mlx5dr_action_type type)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hw_encap_decap_action *handle;\n+\tstruct mlx5dr_action_reformat_header hdr;\n+\tuint32_t flags;\n+\n+\tflags = mlx5_reformat_domain_to_tbl_type(domain);\n+\tflags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;\n+\tif (flags == UINT32_MAX) {\n+\t\tDRV_LOG(ERR, \"Reformat: invalid indirect action configuration\");\n+\t\treturn NULL;\n+\t}\n+\t/* Allocate new list entry. */\n+\thandle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);\n+\tif (!handle) {\n+\t\tDRV_LOG(ERR, \"Reformat: failed to allocate reformat entry\");\n+\t\treturn NULL;\n+\t}\n+\thandle->action_type = type;\n+\thdr.sz = encap_conf ? encap_conf->size : 0;\n+\thdr.data = encap_conf ? encap_conf->data : NULL;\n+\thandle->action = mlx5dr_action_create_reformat(priv->dr_ctx,\n+\t\t\t\t\ttype, 1, &hdr, 0, flags);\n+\tif (!handle->action) {\n+\t\tDRV_LOG(ERR, \"Reformat: failed to create reformat action\");\n+\t\tmlx5_free(handle);\n+\t\treturn NULL;\n+\t}\n+\treturn handle;\n+}\n+\n+/**\n+ * Create mlx5 reformat action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in] conf\n+ *   Pointer to the indirect action parameters.\n+ * @param[in] encap_action\n+ *   Pointer to the raw_encap action configuration.\n+ * @param[in] decap_action\n+ *   Pointer to the raw_decap action configuration.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   A valid shared action handle in case of success, NULL otherwise and\n+ *   rte_errno is set.\n+ */\n+struct mlx5_hw_encap_decap_action*\n+mlx5_reformat_action_create(struct rte_eth_dev *dev,\n+\t\t\t    const struct rte_flow_indir_action_conf *conf,\n+\t\t\t    const struct rte_flow_action *encap_action,\n+\t\t\t    const struct rte_flow_action *decap_action,\n+\t\t\t    struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hw_encap_decap_action *handle;\n+\tconst struct rte_flow_action_raw_encap *encap = NULL;\n+\tconst struct rte_flow_action_raw_decap *decap = NULL;\n+\tenum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;\n+\n+\tMLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);\n+\tMLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);\n+\tif (priv->sh->config.dv_flow_en != 2) {\n+\t\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t   \"Reformat: hardware does not support\");\n+\t\treturn NULL;\n+\t}\n+\tif (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t   \"Reformat: domain should be specified\");\n+\t\treturn NULL;\n+\t}\n+\tif ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t   \"Reformat: missed action configuration\");\n+\t\treturn NULL;\n+\t}\n+\tif (encap_action && !decap_action) {\n+\t\tencap = (const struct rte_flow_action_raw_encap *)encap_action->conf;\n+\t\tif (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||\n+\t\t    encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t\t   \"Reformat: Invalid encap length\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\ttype = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;\n+\t} else if (decap_action && !encap_action) {\n+\t\tdecap = (const struct rte_flow_action_raw_decap *)decap_action->conf;\n+\t\tif (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t\t   \"Reformat: Invalid decap length\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\ttype = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;\n+\t} else if (encap_action && decap_action) {\n+\t\tdecap = (const struct rte_flow_action_raw_decap *)decap_action->conf;\n+\t\tencap = (const struct rte_flow_action_raw_encap *)encap_action->conf;\n+\t\tif (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&\n+\t\t    encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&\n+\t\t    encap->size <= MLX5_ENCAP_MAX_LEN) {\n+\t\t\ttype = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;\n+\t\t} else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&\n+\t\t\t   encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {\n+\t\t\ttype = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;\n+\t\t} else {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t\t   \"Reformat: Invalid decap & encap length\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t} else if (!encap_action && !decap_action) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t   \"Reformat: Invalid decap & encap configurations\");\n+\t\treturn NULL;\n+\t}\n+\tif (!priv->dr_ctx) {\n+\t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   encap_action, \"Reformat: HWS not supported\");\n+\t\treturn NULL;\n+\t}\n+\thandle = __mlx5_reformat_create(dev, encap, conf, type);\n+\tif (!handle) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,\n+\t\t\t\t   \"Reformat: failed to create indirect action\");\n+\t\treturn NULL;\n+\t}\n+\treturn handle;\n+}\n+\n+/**\n+ * Destroy the indirect reformat action.\n+ * Release action related resources on the NIC and the memory.\n+ * Lock free, (mutex should be acquired by caller).\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in] handle\n+ *   The indirect action list handle to be removed.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. Initialized in case of\n+ *   error only.\n+ *\n+ * @return\n+ *   0 on success, otherwise negative errno value.\n+ */\n+int\n+mlx5_reformat_action_destroy(struct rte_eth_dev *dev,\n+\t\t\t     struct rte_flow_action_list_handle *handle,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hw_encap_decap_action *action;\n+\n+\taction = (struct mlx5_hw_encap_decap_action *)handle;\n+\tif (!priv->dr_ctx || !action)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, handle,\n+\t\t\t\t\t  \"Reformat: invalid action handle\");\n+\tmlx5dr_action_destroy(action->action);\n+\tmlx5_free(handle);\n+\treturn 0;\n+}\n #endif\n",
    "prefixes": [
        "v2",
        "3/5"
    ]
}