get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131971/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131971,
    "url": "http://patchwork.dpdk.org/api/patches/131971/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230926155836.3290061-1-tshmilovich@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230926155836.3290061-1-tshmilovich@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230926155836.3290061-1-tshmilovich@nvidia.com",
    "date": "2023-09-26T15:58:35",
    "name": "net/mlx5: supporting group set miss actions API",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "218adf0940ece36b6665e9dc88e19cddb062023d",
    "submitter": {
        "id": 3144,
        "url": "http://patchwork.dpdk.org/api/people/3144/?format=api",
        "name": "Tomer Shmilovich",
        "email": "tshmilovich@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230926155836.3290061-1-tshmilovich@nvidia.com/mbox/",
    "series": [
        {
            "id": 29642,
            "url": "http://patchwork.dpdk.org/api/series/29642/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29642",
            "date": "2023-09-26T15:58:35",
            "name": "net/mlx5: supporting group set miss actions API",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/29642/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/131971/comments/",
    "check": "fail",
    "checks": "http://patchwork.dpdk.org/api/patches/131971/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E7B8A42644;\n\tTue, 26 Sep 2023 17:59:33 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id BABDD402D6;\n\tTue, 26 Sep 2023 17:59:33 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2040.outbound.protection.outlook.com [40.107.236.40])\n by mails.dpdk.org (Postfix) with ESMTP id 8E2B540269\n for <dev@dpdk.org>; Tue, 26 Sep 2023 17:59:32 +0200 (CEST)",
            "from CH2PR17CA0003.namprd17.prod.outlook.com (2603:10b6:610:53::13)\n by MW4PR12MB6897.namprd12.prod.outlook.com (2603:10b6:303:208::15)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6792.28; Tue, 26 Sep\n 2023 15:59:30 +0000",
            "from DS2PEPF00003443.namprd04.prod.outlook.com\n (2603:10b6:610:53:cafe::1b) by CH2PR17CA0003.outlook.office365.com\n (2603:10b6:610:53::13) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6792.35 via Frontend\n Transport; Tue, 26 Sep 2023 15:59:30 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n DS2PEPF00003443.mail.protection.outlook.com (10.167.17.70) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6838.14 via Frontend Transport; Tue, 26 Sep 2023 15:59:30 +0000",
            "from rnnvmail202.nvidia.com (10.129.68.7) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Tue, 26 Sep\n 2023 08:59:21 -0700",
            "from nvidia.com (10.126.231.35) by rnnvmail202.nvidia.com\n (10.129.68.7) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.41; Tue, 26 Sep\n 2023 08:59:18 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=KH7Ree+i2WkHdQwPOR7FrwyxbhuwssYUo1vhK0mUAyTVAeWhJxB1LpWltLe4w8khXqs4lRYh954G/942uppj1cIrCWg6b3sITb1aeXgmfv1JEjJR7HZgaYSyrbfRQHeQlCAcWRvfj5gMXO3/bzN729h8GrQqFTZKAbSjB5HYSmitq6+gpUV6VRvYuzPYCUsl9rTaQQvhSsdTjTQvZX4q5sXFXxoyvnp06Q2JaqVP/SUPHmeW3IkY5SMo3Iuyb+dgqRq9k8qInStajcrfl3uSfZWqeoSl/XX4CLSxqBskpQsvhRHYKrMY4nHjcoNe84FcjM4bhRc0Id33VGoXwcoajA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=ffad8hF4X2iQtlwGWztnCWErLqn4h8+zDkFOVb2FSqI=;\n b=FsHXjqNwoRkxZhVs2x9dnDA75NbC+cFXu/hU+BLx3SL/Ynnta855he/u4yzTik0tpTlizOk55bDs6Xm9y1j8Bzpq6OhqS5izJl0/3ebbiJM16F7i9fVofKyuA5VuMtF5oYk2ChqEWv100IylafECE7xPrDtYk3X71Rfk/D2oRemnGlo+BHcPb+sZrj+/ibAX6+dTOcL0gzFuYC1mOWdXxpDR3ee8kmrr0HcUIqSt7dlV8Yd4RooyMc3v8oYqmgkOtxsflH33K6YjQQ5kZ3qLsdV/LQtc3EnRntxEY+A26TokNJ/oswyUozhCoOPOtxZ/TN0PLLpKTYLpIpSa39Y3OA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=ffad8hF4X2iQtlwGWztnCWErLqn4h8+zDkFOVb2FSqI=;\n b=JPTN57fFW9zhWg545lSio4HwXUbgyI1DuFbz6R5gIKBu2JCMiZH614ZXgoSGU+Bgv3T2H1AIuxx7PLEw4n3mfKyhOhzRBcBGAzZ2KqQBovxACjHscXObCxJ5w9HN+6QpzYzVpf1QF9opSk/OtGJfLcS+5jMqlfm+sX9IXjkjPvbbqXU4BOjrEH2alDpaAlp+Z39vDcymBd8rtBCboXeHDYiEbP+lDa+9/4fbJHz7X3y3Qg/IZxAwWz9wiDuN0YmLCNwj/Ch5+aCfLRe/sb9gnfYvb5jAVAEj0XwpQ5Ex+184t3YgUoaXkfpcss1UJ34plhh1umpSLbAyt409RIZCzg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Tomer Shmilovich <tshmilovich@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>, Suanming Mou\n <suanmingm@nvidia.com>",
        "CC": "<dev@dpdk.org>",
        "Subject": "[PATCH] net/mlx5: supporting group set miss actions API",
        "Date": "Tue, 26 Sep 2023 15:58:35 +0000",
        "Message-ID": "<20230926155836.3290061-1-tshmilovich@nvidia.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail202.nvidia.com (10.129.68.7)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DS2PEPF00003443:EE_|MW4PR12MB6897:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "0cf1c953-b8fc-42fa-0428-08dbbea99173",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n OdhXFIYBTPnvTxb3i2qtTSKO4Hhz9PxS4hXNA+5t32jg64oyExS+7a/iqjQMBlx7VZT2eJ7w6B8BC1H9XujpIau1PwuxE39ioBIrcuDaW0hxtM5TloeOa9eYzcB0sm7XWmKDGwREctmf82GBEVApjeDCkUEbr6/L+FXaExNDV+EDzBJXWLsnPh5xFlXxE9hwYzs/cX3BigX8Fq6P8Cy4LrtxfScXL0ewnpaG6H1ggWRNMRjMfzIizDuXo+ydITSCRDZsRTbUspWmrkmrj1lNTEpXIlsUq+x/Fj8sWxgODVvMvQBxmYrIzPMnXEoToUF5lz3jZmRPuyefnwo2qR1ty+dYThZunPkbxSronB765sjFNWBFFsyOGV/m8RJpFyEg6smz9mAVxi2NS+O180fY0pGdThiPr+EpVShQdzgLYBergFSvG5PVI/kRRwORTcenBLY0A26X/PzK+p2oRulWSLgbw4NaEL+j5hT7H6UHlTybJkNwjU73Zwk3TFi18O5sMpn93W7AKWIfAOoZrCST9nMfQVnYaDDenmw9kcZbkFKxab3JyNAqt0EdBvVOj8rDeAR4f4xuzyMwRS2CCh3d+yRfn68Rf6YpT+cWup2HatKzF79qBQdzoQXGeEtaD7A+dtHwGbngeb9RoXlhsVK+ovxMq8/unq65sy+J2OqPFjEJ05hBgxNxPbuW1AdgND8BRxXIcZbJKzXe+NRVMTD1daHHB/oSMXWqGHnfURx9yh4=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230031)(4636009)(136003)(376002)(396003)(39860400002)(346002)(230922051799003)(82310400011)(451199024)(186009)(1800799009)(46966006)(40470700004)(36840700001)(6666004)(36756003)(7696005)(40460700003)(86362001)(7636003)(83380400001)(356005)(82740400003)(47076005)(36860700001)(2616005)(16526019)(40480700001)(6636002)(55016003)(70206006)(70586007)(6286002)(110136005)(26005)(8676002)(41300700001)(316002)(1076003)(2906002)(4326008)(5660300002)(30864003)(426003)(336012)(8936002)(478600001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "26 Sep 2023 15:59:30.0479 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 0cf1c953-b8fc-42fa-0428-08dbbea99173",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DS2PEPF00003443.namprd04.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW4PR12MB6897",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add implementation for rte_flow_group_set_miss_actions() API.\n\nSigned-off-by: Tomer Shmilovich <tshmilovich@nvidia.com>\n---\nDepends-on: series-29572 (\"ethdev: add group set miss actions API\")\nDepends-on: patch-130772 (\"net/mlx5: fix jump ipool entry size\")\nDepends-on: patch-131567 (\"net/mlx5/hws: supporting default miss table in HWS\")\n\n drivers/net/mlx5/mlx5.h         |   2 +\n drivers/net/mlx5/mlx5_flow.c    |  41 +++++\n drivers/net/mlx5/mlx5_flow.h    |   9 +\n drivers/net/mlx5/mlx5_flow_hw.c | 301 ++++++++++++++++++++++++++++++++\n 4 files changed, 353 insertions(+)\n\n--\n2.34.1",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex c587e13c63..1323bb4165 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1848,6 +1848,8 @@ struct mlx5_priv {\n \tstruct mlx5_hw_q *hw_q;\n \t/* HW steering rte flow table list header. */\n \tLIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;\n+\t/* HW steering rte flow group list header */\n+\tLIST_HEAD(flow_hw_grp, mlx5_flow_group) flow_hw_grp;\n \tstruct mlx5dr_action *hw_push_vlan[MLX5DR_TABLE_TYPE_MAX];\n \tstruct mlx5dr_action *hw_pop_vlan[MLX5DR_TABLE_TYPE_MAX];\n \tstruct mlx5dr_action **hw_vport;\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex f7f8f54eb4..2204fa05d2 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -1027,6 +1027,12 @@ static int\n mlx5_flow_table_destroy(struct rte_eth_dev *dev,\n \t\t\tstruct rte_flow_template_table *table,\n \t\t\tstruct rte_flow_error *error);\n+static int\n+mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev,\n+\t\t\t\t uint32_t group_id,\n+\t\t\t\t const struct rte_flow_group_attr *attr,\n+\t\t\t\t const struct rte_flow_action actions[],\n+\t\t\t\t struct rte_flow_error *error);\n static struct rte_flow *\n mlx5_flow_async_flow_create(struct rte_eth_dev *dev,\n \t\t\t    uint32_t queue,\n@@ -1151,6 +1157,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.actions_template_destroy = mlx5_flow_actions_template_destroy,\n \t.template_table_create = mlx5_flow_table_create,\n \t.template_table_destroy = mlx5_flow_table_destroy,\n+\t.group_set_miss_actions = mlx5_flow_group_set_miss_actions,\n \t.async_create = mlx5_flow_async_flow_create,\n \t.async_create_by_index = mlx5_flow_async_flow_create_by_index,\n \t.async_destroy = mlx5_flow_async_flow_destroy,\n@@ -9286,6 +9293,40 @@ mlx5_flow_table_destroy(struct rte_eth_dev *dev,\n \treturn fops->template_table_destroy(dev, table, error);\n }\n\n+/**\n+ * PMD group set miss actions.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] attr\n+ *   Pointer to group attributes\n+ * @param[in] actions\n+ *   Array of actions\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_group_set_miss_actions(struct rte_eth_dev *dev,\n+\t\t\t\t uint32_t group_id,\n+\t\t\t\t const struct rte_flow_group_attr *attr,\n+\t\t\t\t const struct rte_flow_action actions[],\n+\t\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\tstruct rte_flow_attr fattr = {0};\n+\n+\tif (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"group set miss actions with incorrect steering mode\");\n+\tfops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\treturn fops->group_set_miss_actions(dev, group_id, attr, actions, error);\n+}\n+\n /**\n  * Enqueue flow creation.\n  *\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 3a97975d69..5963474e10 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1369,9 +1369,11 @@ struct mlx5_hw_action_template {\n /* mlx5 flow group struct. */\n struct mlx5_flow_group {\n \tstruct mlx5_list_entry entry;\n+\tLIST_ENTRY(mlx5_flow_group) next;\n \tstruct rte_eth_dev *dev; /* Reference to corresponding device. */\n \tstruct mlx5dr_table *tbl; /* HWS table object. */\n \tstruct mlx5_hw_jump_action jump; /* Jump action. */\n+\tstruct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */\n \tenum mlx5dr_table_type type; /* Table type. */\n \tuint32_t group_id; /* Group id. */\n \tuint32_t idx; /* Group memory index. */\n@@ -1872,6 +1874,12 @@ typedef int (*mlx5_flow_table_destroy_t)\n \t\t\t(struct rte_eth_dev *dev,\n \t\t\t struct rte_flow_template_table *table,\n \t\t\t struct rte_flow_error *error);\n+typedef int (*mlx5_flow_group_set_miss_actions_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t uint32_t group_id,\n+\t\t\t const struct rte_flow_group_attr *attr,\n+\t\t\t const struct rte_flow_action actions[],\n+\t\t\t struct rte_flow_error *error);\n typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)\n \t\t\t(struct rte_eth_dev *dev,\n \t\t\t uint32_t queue,\n@@ -2010,6 +2018,7 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_actions_template_destroy_t actions_template_destroy;\n \tmlx5_flow_table_create_t template_table_create;\n \tmlx5_flow_table_destroy_t template_table_destroy;\n+\tmlx5_flow_group_set_miss_actions_t group_set_miss_actions;\n \tmlx5_flow_async_flow_create_t async_flow_create;\n \tmlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;\n \tmlx5_flow_async_flow_update_t async_flow_update;\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex cbd741605b..91c6c749a2 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -3800,6 +3800,301 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,\n \treturn 0;\n }\n\n+/**\n+ * Parse group's miss actions.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] cfg\n+ *   Pointer to the table_cfg structure.\n+ * @param[in] actions\n+ *   Array of actions to perform on group miss. Supported types:\n+ *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.\n+ * @param[out] dst_group_id\n+ *   Pointer to destination group id output. will be set to 0 if actions is END,\n+ *   otherwise will be set to destination group id.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+\n+static int\n+flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,\n+\t\t\t\t struct mlx5_flow_template_table_cfg *cfg,\n+\t\t\t\t const struct rte_flow_action actions[],\n+\t\t\t\t uint32_t *dst_group_id,\n+\t\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_action_jump *jump_conf;\n+\tuint32_t temp = 0;\n+\tuint32_t i;\n+\n+\tfor (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {\n+\t\tswitch (actions[i].type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n+\t\t\tcontinue;\n+\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n+\t\t\tif (temp)\n+\t\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,\n+\t\t\t\t\t\t\t  \"Miss actions can contain only a single JUMP\");\n+\n+\t\t\tjump_conf = (const struct rte_flow_action_jump *)actions[i].conf;\n+\t\t\tif (!jump_conf)\n+\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\t\t  jump_conf, \"Jump conf must not be NULL\");\n+\n+\t\t\tif (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))\n+\t\t\t\treturn -rte_errno;\n+\n+\t\t\tif (!temp)\n+\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t\t\t  \"Failed to set group miss actions - Invalid target group\");\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  &actions[i], \"Unsupported default miss action type\");\n+\t\t}\n+\t}\n+\n+\t*dst_group_id = temp;\n+\treturn 0;\n+}\n+\n+/**\n+ * Set group's miss group.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] cfg\n+ *   Pointer to the table_cfg structure.\n+ * @param[in] src_grp\n+ *   Pointer to source group structure.\n+ *   if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.\n+ * @param[in] dst_grp\n+ *   Pointer to destination group structure.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+\n+static int\n+flow_hw_group_set_miss_group(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_flow_template_table_cfg *cfg,\n+\t\t\t     struct mlx5_flow_group *src_grp,\n+\t\t\t     struct mlx5_flow_group *dst_grp,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct rte_flow_error sub_error = {\n+\t\t.type = RTE_FLOW_ERROR_TYPE_NONE,\n+\t\t.cause = NULL,\n+\t\t.message = NULL,\n+\t};\n+\tstruct mlx5_flow_cb_ctx ctx = {\n+\t\t.dev = dev,\n+\t\t.error = &sub_error,\n+\t\t.data = &cfg->attr.flow_attr,\n+\t};\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_list_entry *ge;\n+\tbool ref = false;\n+\tint ret;\n+\n+\tif (!dst_grp)\n+\t\treturn -EINVAL;\n+\n+\t/* If group doesn't exist - needs to be created. */\n+\tif (!src_grp) {\n+\t\tge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);\n+\t\tif (!ge)\n+\t\t\treturn -rte_errno;\n+\n+\t\tsrc_grp = container_of(ge, struct mlx5_flow_group, entry);\n+\t\tLIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);\n+\t\tref = true;\n+\t} else if (!src_grp->miss_group) {\n+\t\t/* If group exists, but has no miss actions - need to increase ref_cnt. */\n+\t\tLIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);\n+\t\tsrc_grp->entry.ref_cnt++;\n+\t\tref = true;\n+\t}\n+\n+\tret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);\n+\tif (ret)\n+\t\tgoto mlx5dr_error;\n+\n+\t/* If group existed and had old miss actions - ref_cnt is already correct.\n+\t * However, need to reduce ref counter for old miss group.\n+\t */\n+\tif (src_grp->miss_group)\n+\t\tmlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);\n+\n+\tsrc_grp->miss_group = dst_grp;\n+\treturn 0;\n+\n+mlx5dr_error:\n+\t/* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */\n+\tif (ref) {\n+\t\tmlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);\n+\t\tLIST_REMOVE(src_grp, next);\n+\t}\n+\n+\treturn rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"Failed to set group miss actions\");\n+}\n+\n+/**\n+ * Unset group's miss group.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] grp\n+ *   Pointer to group structure.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+\n+static int\n+flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,\n+\t\t\t       struct mlx5_flow_group *grp,\n+\t\t\t       struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tint ret;\n+\n+\t/* If group doesn't exist - no need to change anything. */\n+\tif (!grp)\n+\t\treturn 0;\n+\n+\t/* If group exists, but miss actions is already default behavior -\n+\t * no need to change anything.\n+\t */\n+\tif (!grp->miss_group)\n+\t\treturn 0;\n+\n+\tret = mlx5dr_table_set_default_miss(grp->tbl, NULL);\n+\tif (ret)\n+\t\treturn rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"Failed to unset group miss actions\");\n+\n+\tmlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);\n+\tgrp->miss_group = NULL;\n+\n+\tLIST_REMOVE(grp, next);\n+\tmlx5_hlist_unregister(priv->sh->groups, &grp->entry);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Set group miss actions.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] group_id\n+ *   Group id.\n+ * @param[in] attr\n+ *   Pointer to group attributes structure.\n+ * @param[in] actions\n+ *   Array of actions to perform on group miss. Supported types:\n+ *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+\n+static int\n+flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,\n+\t\t\t       uint32_t group_id,\n+\t\t\t       const struct rte_flow_group_attr *attr,\n+\t\t\t       const struct rte_flow_action actions[],\n+\t\t\t       struct rte_flow_error *error)\n+{\n+\tstruct rte_flow_error sub_error = {\n+\t\t.type = RTE_FLOW_ERROR_TYPE_NONE,\n+\t\t.cause = NULL,\n+\t\t.message = NULL,\n+\t};\n+\tstruct mlx5_flow_template_table_cfg cfg = {\n+\t\t.external = true,\n+\t\t.attr = {\n+\t\t\t.flow_attr = {\n+\t\t\t\t.group = group_id,\n+\t\t\t\t.ingress = attr->ingress,\n+\t\t\t\t.egress = attr->egress,\n+\t\t\t\t.transfer = attr->transfer,\n+\t\t\t},\n+\t\t},\n+\t};\n+\tstruct mlx5_flow_cb_ctx ctx = {\n+\t\t.dev = dev,\n+\t\t.error = &sub_error,\n+\t\t.data = &cfg.attr.flow_attr,\n+\t};\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_group *src_grp = NULL;\n+\tstruct mlx5_flow_group *dst_grp = NULL;\n+\tstruct mlx5_list_entry *ge;\n+\tuint32_t dst_group_id = 0;\n+\tint ret;\n+\n+\tif (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))\n+\t\treturn -rte_errno;\n+\n+\tif (!group_id)\n+\t\treturn rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL, \"Failed to set group miss actions - invalid group id\");\n+\n+\tret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);\n+\tif (ret)\n+\t\treturn -rte_errno;\n+\n+\tif (dst_group_id == group_id) {\n+\t\treturn rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL, \"Failed to set group miss actions - target group id must differ from group_id\");\n+\t}\n+\n+\tcfg.attr.flow_attr.group = group_id;\n+\tge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);\n+\tif (ge)\n+\t\tsrc_grp = container_of(ge, struct mlx5_flow_group, entry);\n+\n+\tif (dst_group_id) {\n+\t\t/* Increase ref_cnt for new miss group. */\n+\t\tcfg.attr.flow_attr.group = dst_group_id;\n+\t\tge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);\n+\t\tif (!ge)\n+\t\t\treturn -rte_errno;\n+\n+\t\tdst_grp = container_of(ge, struct mlx5_flow_group, entry);\n+\n+\t\tcfg.attr.flow_attr.group = group_id;\n+\t\tret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);\n+\t\tif (ret)\n+\t\t\tgoto error;\n+\t} else {\n+\t\treturn flow_hw_group_unset_miss_group(dev, src_grp, error);\n+\t}\n+\n+\treturn 0;\n+\n+error:\n+\tif (dst_grp)\n+\t\tmlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);\n+\treturn -rte_errno;\n+}\n+\n static bool\n flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,\n \t\t\t     enum rte_flow_field_id field)\n@@ -8009,6 +8304,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \tstruct rte_flow_template_table *tbl;\n \tstruct rte_flow_pattern_template *it;\n \tstruct rte_flow_actions_template *at;\n+\tstruct mlx5_flow_group *grp;\n \tuint32_t i;\n\n \tif (!priv->dr_ctx)\n@@ -8017,6 +8313,10 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \tflow_hw_flush_all_ctrl_flows(dev);\n \tflow_hw_cleanup_tx_repr_tagging(dev);\n \tflow_hw_cleanup_ctrl_rx_tables(dev);\n+\twhile (!LIST_EMPTY(&priv->flow_hw_grp)) {\n+\t\tgrp = LIST_FIRST(&priv->flow_hw_grp);\n+\t\tflow_hw_group_unset_miss_group(dev, grp, NULL);\n+\t}\n \twhile (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {\n \t\ttbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);\n \t\tflow_hw_table_destroy(dev, tbl, NULL);\n@@ -9344,6 +9644,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.actions_template_destroy = flow_hw_actions_template_destroy,\n \t.template_table_create = flow_hw_template_table_create,\n \t.template_table_destroy = flow_hw_table_destroy,\n+\t.group_set_miss_actions = flow_hw_group_set_miss_actions,\n \t.async_flow_create = flow_hw_async_flow_create,\n \t.async_flow_create_by_index = flow_hw_async_flow_create_by_index,\n \t.async_flow_update = flow_hw_async_flow_update,\n",
    "prefixes": []
}