get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96315/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96315,
    "url": "http://patchwork.dpdk.org/api/patches/96315/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210727073121.895620-4-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210727073121.895620-4-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210727073121.895620-4-dkozlyuk@nvidia.com",
    "date": "2021-07-27T07:31:19",
    "name": "[3/4] net/mlx5: preserve indirect actions across port restart",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "330ea309a54a7e8acf116af0ed6d3cb66defe0e9",
    "submitter": {
        "id": 2248,
        "url": "http://patchwork.dpdk.org/api/people/2248/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@nvidia.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210727073121.895620-4-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 18009,
            "url": "http://patchwork.dpdk.org/api/series/18009/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=18009",
            "date": "2021-07-27T07:31:16",
            "name": "net/mlx5: keep indirect actions across port restart",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/18009/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/96315/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/96315/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3E94FA0C4D;\n\tTue, 27 Jul 2021 09:32:04 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 243EF41123;\n\tTue, 27 Jul 2021 09:31:47 +0200 (CEST)",
            "from NAM04-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam08on2085.outbound.protection.outlook.com [40.107.102.85])\n by mails.dpdk.org (Postfix) with ESMTP id 7CE7C4111F;\n Tue, 27 Jul 2021 09:31:45 +0200 (CEST)",
            "from BN9PR03CA0060.namprd03.prod.outlook.com (2603:10b6:408:fb::35)\n by MN2PR12MB3214.namprd12.prod.outlook.com (2603:10b6:208:106::17)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4352.26; Tue, 27 Jul\n 2021 07:31:43 +0000",
            "from BN8NAM11FT017.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:fb:cafe::ac) by BN9PR03CA0060.outlook.office365.com\n (2603:10b6:408:fb::35) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4352.25 via Frontend\n Transport; Tue, 27 Jul 2021 07:31:43 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT017.mail.protection.outlook.com (10.13.177.93) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4352.24 via Frontend Transport; Tue, 27 Jul 2021 07:31:43 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 27 Jul\n 2021 07:31:41 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=DIdpmpn6ZYm/lCiGC9BdbRFr3AyJvDLaqrZ41Xrl3MUI5PZCS+8oC2R2hq/u8LLcNkn/A7HqWeEg6K98BV7W7Gq9TUWbi8SYv6VUaXCXh0JB5meHt5DNHe8s1+u6hARzaK5gjem5I/tKc5iGq0ji0jBax7N3/vRdvnYQvIST5kmsCANVWow/f2KT0R1zbOb4Z5cndfSU4z3K4w2YP5qm/xBa8GOe2LFToUdrjbyvb93aBXPhSCBTKmFfVpm1JwKMrdWXUhChHOpX3qaqC4m+NL5FpqJUsr4bjzYgfvjvfPgqxbjyw4CtelivR5eoFL2qV4bM008EbIeuUUjeRDNdQQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aUxdE+A928SKBEhKLP4RA4R9tbRf0r5Io6+6kpHNS3k=;\n b=krtJJhuggMcxt7QNSlBNWIGGz5Y8A4q+WKS65xF31d8WO/wJ8w3gsn72UOmOBWludnlsmTrB31CYQvUjhjNujX8Lxunx0kq28QttFF23CaxQVfmg/4Hr3tqHQHMZaqX4W00dvw6aqws51mUb6rzzRAwi7m3ItZkI3WpKmVvEqkbLUyoHnGUznU96LAoPK2UWYuMHlkZAl3CKdAbEPztHE+FJPVHBjwS51r2JGDlnu6MIQH6GeqglRxFZRBbtw9hZF/KiX9VT3lwILDfMRsB1y1rjgKTBbEOAXbF01E0QI8GcaLCXuqz+o+OoeUBffpwtRJibRgPbWCH8ZRF83K+HHA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aUxdE+A928SKBEhKLP4RA4R9tbRf0r5Io6+6kpHNS3k=;\n b=LhZVEo0QhHzemz3BhfEqOZmBf43EEAXLz+tbnl8NBA2REGmK4j4b/ZPDeS+iT4dRfroHU/ry1V15DEaKP/LREGRwdjWX5o14rTekIpK3N+Go3Rhn20ar6o0vcov2iS8wGWvBf75QJYAS+Xsfnp0xhYK5c5hGL1eIyuwY5tQSI+hh5cYoIVvLD561pH6OSNZK9aC36XSdJCg+Yx3er+gApGgAlypy2CevWxtnyj4/WGRhh4fiRZsJHTJ/ub6G9WUhpqJhw6kA51Zx2AQ97LqoXU2Ir41VrW09g81M9bKgtzfmkKu0ytwuXaRRnzXQ4Eaesc07IPK2EOZf6lZGAYjd3w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Dmitry Kozlyuk <dkozlyuk@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "David Marchand <david.marchand@redhat.com>, <bingz@nvidia.com>,\n <stable@dpdk.org>, Matan Azrad <matan@nvidia.com>, Shahaf Shuler\n <shahafs@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Tue, 27 Jul 2021 10:31:19 +0300",
        "Message-ID": "<20210727073121.895620-4-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210727073121.895620-1-dkozlyuk@nvidia.com>",
        "References": "<20210727073121.895620-1-dkozlyuk@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "8fe492b8-e398-4f27-1a8d-08d950d09539",
        "X-MS-TrafficTypeDiagnostic": "MN2PR12MB3214:",
        "X-Microsoft-Antispam-PRVS": "\n <MN2PR12MB3214901D0AAF2BC3BDDD2D31B9E99@MN2PR12MB3214.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:261;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n yTJ2uQdPeQ3djFZopWRrsV2S28BMjSp06F4el9M5mxQHzCZyWuveH5MRd9V92KkbLI3isrXzcFnvjsDbwI/1lXI7LFHZT7SE57LBDFuHiVcaT3Ga3n2nI+gXtvUrl4xkKxx/DHv7zutGHvz24C/WIcP9pomMHh5VHexfN1LlUEUN6s+sMFOHBfkK9I53QewPAAzDmFlSBP6Vk0BpI6dEA1t6wFtcbLkKIoBu0XXFewZr0cFiMb4Kb2q34GV41V/pE4qAtce32NRKOv29HShVDPbcKXcG+4dbJFDz7qqpTr3YO0cxV08dnKMIWTZ7PkMw+rh65gV2D6yInueV9YfyJci+2SreYQBeZRhXVMyx6p5hBEKIGPkcvgK5TqnDgPieDakrFWAIYYIeA9B7FlAytlEcw68AkTFYizmSBm2ESA+laEp61Fs/9RIbMTVOQ8mlXHLy2SneC5+oNP6MBlQvXFFqvlf5NuD14GF5XP9w5c5ttH14qyVji7t05NgN89YS9d5ihRHfU5FGLaRrk0BsP7/glvEF5/noKM9QMlxA2me7PvMFuHAsDzUD9PhSzygRb1vf48qf8XW2ARQBbg1tJGqtvLKHOpFQibnpnIrFX3eQelleTAoroR/mINTJsJqq3+Cjf6C+Ozbu1XbAfsj8/zccp1V+qt0IaFlZjk9twUjraZYsnLGLNrSHwVmtxxbQC95i/ebQ6cBlHvoJTyEyew==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(376002)(396003)(39860400002)(346002)(136003)(36840700001)(46966006)(2906002)(426003)(478600001)(7636003)(8676002)(356005)(70586007)(55016002)(107886003)(86362001)(7696005)(26005)(5660300002)(36906005)(82310400003)(6666004)(4326008)(54906003)(6286002)(6916009)(70206006)(8936002)(186003)(16526019)(316002)(36860700001)(1076003)(83380400001)(336012)(30864003)(2616005)(47076005)(36756003)(82740400003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "27 Jul 2021 07:31:43.4354 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 8fe492b8-e398-4f27-1a8d-08d950d09539",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT017.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MN2PR12MB3214",
        "Subject": "[dpdk-dev] [PATCH 3/4] net/mlx5: preserve indirect actions across\n port restart",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "MLX5 PMD uses reference counting to manage RX queue resources.\nAfter port stop shared RSS actions kept references to RX queues,\npreventing resource release. As a result, internal PMD mempool\nfor such queues had been exhausted after a number of port restarts.\nDiagnostic message from rte_eth_dev_start():\n\n    Rx queue allocation failed: Cannot allocate memory\n\nDereference RX queues used by indirect actions on port stop (detach)\nand restore references on port start (attach) in order to allow RX queue\nresource release, but keep indirect actions across the port restart.\nReplace queue IDs in HW by drop queue ID on detach and restore actual\nqueue IDs on attach.\n\nFixes: 4b61b8774be9 (\"ethdev: introduce indirect flow action\")\nCc: bingz@nvidia.com\nCc: stable@dpdk.org\n\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c    | 194 ++++++++++++++++++++++++++++----\n drivers/net/mlx5/mlx5_flow.h    |   2 +\n drivers/net/mlx5/mlx5_rx.h      |   4 +\n drivers/net/mlx5/mlx5_rxq.c     |  99 ++++++++++++++--\n drivers/net/mlx5/mlx5_trigger.c |  10 ++\n 5 files changed, 275 insertions(+), 34 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex e8d2678877..5343720ec9 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -1524,6 +1524,58 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,\n \treturn 0;\n }\n \n+/**\n+ * Validate queue numbers for device RSS.\n+ *\n+ * @param[in] dev\n+ *   Configured device.\n+ * @param[in] queues\n+ *   Array of queue numbers.\n+ * @param[in] queues_n\n+ *   Size of the @p queues array.\n+ * @param[out] error\n+ *   On error, filled with a textual error description.\n+ * @param[out] queue\n+ *   On error, filled with an offending queue index in @p queues array.\n+ *\n+ * @return\n+ *   0 on success, a negative errno code on error.\n+ */\n+static int\n+mlx5_validate_rss_queues(const struct rte_eth_dev *dev,\n+\t\t\t const uint16_t *queues, uint32_t queues_n,\n+\t\t\t const char **error, uint32_t *queue_idx)\n+{\n+\tconst struct mlx5_priv *priv = dev->data->dev_private;\n+\tenum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i != queues_n; ++i) {\n+\t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\n+\t\tif (queues[i] >= priv->rxqs_n) {\n+\t\t\t*error = \"queue index out of range\";\n+\t\t\t*queue_idx = i;\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tif (!(*priv->rxqs)[queues[i]]) {\n+\t\t\t*error =  \"queue is not configured\";\n+\t\t\t*queue_idx = i;\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\trxq_ctrl = container_of((*priv->rxqs)[queues[i]],\n+\t\t\t\t\tstruct mlx5_rxq_ctrl, rxq);\n+\t\tif (i == 0)\n+\t\t\trxq_type = rxq_ctrl->type;\n+\t\tif (rxq_type != rxq_ctrl->type) {\n+\t\t\t*error = \"combining hairpin and regular RSS queues is not supported\";\n+\t\t\t*queue_idx = i;\n+\t\t\treturn -ENOTSUP;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n /*\n  * Validate the rss action.\n  *\n@@ -1544,8 +1596,9 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_action_rss *rss = action->conf;\n-\tenum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;\n-\tunsigned int i;\n+\tint ret;\n+\tconst char *message;\n+\tuint32_t queue_idx;\n \n \tif (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&\n \t    rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)\n@@ -1609,27 +1662,12 @@ mlx5_validate_action_rss(struct rte_eth_dev *dev,\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n \t\t\t\t\t  NULL, \"No queues configured\");\n-\tfor (i = 0; i != rss->queue_num; ++i) {\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n-\n-\t\tif (rss->queue[i] >= priv->rxqs_n)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, EINVAL,\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t &rss->queue[i], \"queue index out of range\");\n-\t\tif (!(*priv->rxqs)[rss->queue[i]])\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t &rss->queue[i], \"queue is not configured\");\n-\t\trxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],\n-\t\t\t\t\tstruct mlx5_rxq_ctrl, rxq);\n-\t\tif (i == 0)\n-\t\t\trxq_type = rxq_ctrl->type;\n-\t\tif (rxq_type != rxq_ctrl->type)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t &rss->queue[i],\n-\t\t\t\t \"combining hairpin and regular RSS queues is not supported\");\n+\tret = mlx5_validate_rss_queues(dev, rss->queue, rss->queue_num,\n+\t\t\t\t       &message, &queue_idx);\n+\tif (ret != 0) {\n+\t\treturn rte_flow_error_set(error, -ret,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  &rss->queue[queue_idx], message);\n \t}\n \treturn 0;\n }\n@@ -8493,6 +8531,116 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev)\n \treturn ret;\n }\n \n+/**\n+ * Validate existing indirect actions against current device configuration\n+ * and attach them to device resources.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_action_handle_attach(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_indexed_pool *ipool =\n+\t\t\tpriv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];\n+\tstruct mlx5_shared_action_rss *shared_rss, *shared_rss_last;\n+\tint ret = 0;\n+\tuint32_t idx;\n+\n+\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n+\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\t\tconst char *message;\n+\t\tuint32_t queue_idx;\n+\n+\t\tret = mlx5_validate_rss_queues(dev, ind_tbl->queues,\n+\t\t\t\t\t       ind_tbl->queues_n,\n+\t\t\t\t\t       &message, &queue_idx);\n+\t\tif (ret != 0) {\n+\t\t\tDRV_LOG(ERR, \"Port %u cannot use queue %u in RSS: %s\",\n+\t\t\t\tdev->data->port_id, ind_tbl->queues[queue_idx],\n+\t\t\t\tmessage);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\tif (ret != 0)\n+\t\treturn ret;\n+\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n+\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\n+\t\tret = mlx5_ind_table_obj_attach(dev, ind_tbl);\n+\t\tif (ret != 0) {\n+\t\t\tDRV_LOG(ERR, \"Port %u could not attach \"\n+\t\t\t\t\"indirection table obj %p\",\n+\t\t\t\tdev->data->port_id, (void *)ind_tbl);\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\treturn 0;\n+error:\n+\tshared_rss_last = shared_rss;\n+\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n+\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\n+\t\tif (shared_rss == shared_rss_last)\n+\t\t\tbreak;\n+\t\tif (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)\n+\t\t\tDRV_LOG(CRIT, \"Port %u could not detach \"\n+\t\t\t\t\"indirection table obj %p on rollback\",\n+\t\t\t\tdev->data->port_id, (void *)ind_tbl);\n+\t}\n+\treturn ret;\n+}\n+\n+/**\n+ * Detach indirect actions of the device from its resources.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_action_handle_detach(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_indexed_pool *ipool =\n+\t\t\tpriv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];\n+\tstruct mlx5_shared_action_rss *shared_rss, *shared_rss_last;\n+\tint ret = 0;\n+\tuint32_t idx;\n+\n+\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n+\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\n+\t\tret = mlx5_ind_table_obj_detach(dev, ind_tbl);\n+\t\tif (ret != 0) {\n+\t\t\tDRV_LOG(ERR, \"Port %u could not detach \"\n+\t\t\t\t\"indirection table obj %p\",\n+\t\t\t\tdev->data->port_id, (void *)ind_tbl);\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\treturn 0;\n+error:\n+\tshared_rss_last = shared_rss;\n+\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n+\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\n+\t\tif (shared_rss == shared_rss_last)\n+\t\t\tbreak;\n+\t\tif (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)\n+\t\t\tDRV_LOG(CRIT, \"Port %u could not attach \"\n+\t\t\t\t\"indirection table obj %p on rollback\",\n+\t\t\t\tdev->data->port_id, (void *)ind_tbl);\n+\t}\n+\treturn ret;\n+}\n+\n #ifndef HAVE_MLX5DV_DR\n #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))\n #else\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex da39eeb596..251d643f8c 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1575,6 +1575,8 @@ struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare\n void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,\n \t\tstruct mlx5_flow_meter_policy *mtr_policy);\n int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);\n+int mlx5_action_handle_attach(struct rte_eth_dev *dev);\n+int mlx5_action_handle_detach(struct rte_eth_dev *dev);\n int mlx5_action_handle_flush(struct rte_eth_dev *dev);\n void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);\n int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 3f2b99fb65..7319ad0264 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -222,6 +222,10 @@ int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,\n \t\t\t      struct mlx5_ind_table_obj *ind_tbl,\n \t\t\t      uint16_t *queues, const uint32_t queues_n,\n \t\t\t      bool standalone);\n+int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,\n+\t\t\t      struct mlx5_ind_table_obj *ind_tbl);\n+int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,\n+\t\t\t      struct mlx5_ind_table_obj *ind_tbl);\n struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx);\n int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,\n \t\t       void *cb_ctx);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 49165f482e..1140f6067e 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -2024,6 +2024,26 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n \treturn ind_tbl;\n }\n \n+static int\n+mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,\n+\t\t\t\t    struct mlx5_ind_table_obj *ind_tbl)\n+{\n+\tuint32_t refcnt;\n+\n+\trefcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);\n+\tif (refcnt <= 1)\n+\t\treturn 0;\n+\t/*\n+\t * Modification of indirection tables having more than 1\n+\t * reference is unsupported.\n+\t */\n+\tDRV_LOG(DEBUG,\n+\t\t\"Port %u cannot modify indirection table %p (refcnt %u > 1).\",\n+\t\tdev->data->port_id, (void *)ind_tbl, refcnt);\n+\trte_errno = EINVAL;\n+\treturn -rte_errno;\n+}\n+\n /**\n  * Modify an indirection table.\n  *\n@@ -2056,18 +2076,8 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,\n \n \tMLX5_ASSERT(standalone);\n \tRTE_SET_USED(standalone);\n-\tif (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {\n-\t\t/*\n-\t\t * Modification of indirection ntables having more than 1\n-\t\t * reference unsupported. Intended for standalone indirection\n-\t\t * tables only.\n-\t\t */\n-\t\tDRV_LOG(DEBUG,\n-\t\t\t\"Port %u cannot modify indirection table (refcnt> 1).\",\n-\t\t\tdev->data->port_id);\n-\t\trte_errno = EINVAL;\n+\tif (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)\n \t\treturn -rte_errno;\n-\t}\n \tfor (i = 0; i != queues_n; ++i) {\n \t\tif (!mlx5_rxq_get(dev, queues[i])) {\n \t\t\tret = -rte_errno;\n@@ -2093,6 +2103,73 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,\n \treturn ret;\n }\n \n+/**\n+ * Attach an indirection table to its queues.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param ind_table\n+ *   Indirection table to attach.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,\n+\t\t\t  struct mlx5_ind_table_obj *ind_tbl)\n+{\n+\tunsigned int i;\n+\tint ret;\n+\n+\tret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,\n+\t\t\t\t\tind_tbl->queues_n, true);\n+\tif (ret != 0) {\n+\t\tDRV_LOG(ERR, \"Port %u could not modify indirect table obj %p\",\n+\t\t\tdev->data->port_id, (void *)ind_tbl);\n+\t\treturn ret;\n+\t}\n+\tfor (i = 0; i < ind_tbl->queues_n; i++)\n+\t\tmlx5_rxq_get(dev, ind_tbl->queues[i]);\n+\treturn 0;\n+}\n+\n+/**\n+ * Detach an indirection table from its queues.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param ind_table\n+ *   Indirection table to detach.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,\n+\t\t\t  struct mlx5_ind_table_obj *ind_tbl)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?\n+\t\t\t       log2above(ind_tbl->queues_n) :\n+\t\t\t       log2above(priv->config.ind_table_max_size);\n+\tunsigned int i;\n+\tint ret;\n+\n+\tret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\tMLX5_ASSERT(priv->obj_ops.ind_table_modify);\n+\tret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);\n+\tif (ret != 0) {\n+\t\tDRV_LOG(ERR, \"Port %u could not modify indirect table obj %p\",\n+\t\t\tdev->data->port_id, (void *)ind_tbl);\n+\t\treturn ret;\n+\t}\n+\tfor (i = 0; i < ind_tbl->queues_n; i++)\n+\t\tmlx5_rxq_release(dev, ind_tbl->queues[i]);\n+\treturn ret;\n+}\n+\n int\n mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,\n \t\t   void *cb_ctx)\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex a9d5d58fd9..6761a84a68 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -14,6 +14,7 @@\n #include <mlx5_malloc.h>\n \n #include \"mlx5.h\"\n+#include \"mlx5_flow.h\"\n #include \"mlx5_mr.h\"\n #include \"mlx5_rx.h\"\n #include \"mlx5_tx.h\"\n@@ -1115,6 +1116,14 @@ mlx5_dev_start(struct rte_eth_dev *dev)\n \tmlx5_rxq_timestamp_set(dev);\n \t/* Set a mask and offset of scheduling on timestamp into Tx queues. */\n \tmlx5_txq_dynf_timestamp_set(dev);\n+\t/* Attach indirection table objects detached on port stop. */\n+\tret = mlx5_action_handle_attach(dev);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR,\n+\t\t\t\"port %u failed to attach indirect actions: %s\",\n+\t\t\tdev->data->port_id, rte_strerror(rte_errno));\n+\t\tgoto error;\n+\t}\n \t/*\n \t * In non-cached mode, it only needs to start the default mreg copy\n \t * action and no flow created by application exists anymore.\n@@ -1187,6 +1196,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)\n \t/* All RX queue flags will be cleared in the flush interface. */\n \tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);\n \tmlx5_flow_meter_rxq_flush(dev);\n+\tmlx5_action_handle_detach(dev);\n \tmlx5_rx_intr_vec_disable(dev);\n \tpriv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;\n \tpriv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;\n",
    "prefixes": [
        "3/4"
    ]
}