get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97003/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97003,
    "url": "http://patchwork.dpdk.org/api/patches/97003/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210817134441.1966618-11-michaelba@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210817134441.1966618-11-michaelba@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210817134441.1966618-11-michaelba@nvidia.com",
    "date": "2021-08-17T13:44:30",
    "name": "[RFC,10/21] net/mlx5: use context device structure",
    "commit_ref": null,
    "pull_url": null,
    "state": "rfc",
    "archived": true,
    "hash": "b032261c6dd983672eec0d31be5e0ce2631bf800",
    "submitter": {
        "id": 1949,
        "url": "http://patchwork.dpdk.org/api/people/1949/?format=api",
        "name": "Michael Baum",
        "email": "michaelba@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210817134441.1966618-11-michaelba@nvidia.com/mbox/",
    "series": [
        {
            "id": 18314,
            "url": "http://patchwork.dpdk.org/api/series/18314/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=18314",
            "date": "2021-08-17T13:44:20",
            "name": "mlx5: sharing global MR cache between drivers",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/18314/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/97003/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/97003/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9DE41A0548;\n\tTue, 17 Aug 2021 15:47:05 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 87A3741214;\n\tTue, 17 Aug 2021 15:45:40 +0200 (CEST)",
            "from NAM02-DM3-obe.outbound.protection.outlook.com\n (mail-dm3nam07on2077.outbound.protection.outlook.com [40.107.95.77])\n by mails.dpdk.org (Postfix) with ESMTP id 9791F411FB\n for <dev@dpdk.org>; Tue, 17 Aug 2021 15:45:36 +0200 (CEST)",
            "from BN9PR03CA0472.namprd03.prod.outlook.com (2603:10b6:408:139::27)\n by MW3PR12MB4380.namprd12.prod.outlook.com (2603:10b6:303:5a::19)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.17; Tue, 17 Aug\n 2021 13:45:33 +0000",
            "from BN8NAM11FT030.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:139:cafe::c9) by BN9PR03CA0472.outlook.office365.com\n (2603:10b6:408:139::27) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.16 via Frontend\n Transport; Tue, 17 Aug 2021 13:45:33 +0000",
            "from mail.nvidia.com (216.228.112.36) by\n BN8NAM11FT030.mail.protection.outlook.com (10.13.177.146) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4415.14 via Frontend Transport; Tue, 17 Aug 2021 13:45:32 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL101.nvidia.com\n (172.20.187.10) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 17 Aug\n 2021 13:45:31 +0000",
            "from nvidia.com (172.20.187.6) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 17 Aug\n 2021 13:45:29 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=PLCZIlUybfpSQBcDzKzeW/fYIX1akbVLQqu2lkBxj6U0hc4MGXez9WYou4YkdzRrzeuYZebheTrYPkdOyX0kCZEHbOCJbqp/YCEbBn1dq7O01VFrqK0KzahcZlUpChJYD8Pw5nNQd2bGwWAdyr9mnFcUARB/enA01uDjUuEXFYgj2AyhBKXlC5XB1ONkBPfNuFLZUAq0r8fskC38PN2sPdyeBYbgebT8t80q2wa0jQ1M0xtH9jE0gln8kuGquj5SqGc0MiEoxmCW4jtDC+IGhvXVfA785NlEH5R9i8fHgmpeG4+e5hYnaLQQiG2G/Wo+dGOWUW2iCzfER72wQNItRQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=eSq61LMnycilc+5wrnNjt1Rg9/VztTjOpDczQYYXa94=;\n b=CjUHh8/PkezsnIbWf4ANwNTTjJzSKaU7/d/UPx8nIAC8UiJqlHYu6eUYheXSoDdWZlR+YIUbxMygzUSh7cW8H6h5JxPsVFthESS+1XOBQZHUCx49CoxlCBt155ypqXQWA1/MtunwPFa5yjfgUYSRkL32wqY0dQ3eieUsdE6CBH+TNlPuHKLQDKUyxMhUMcKZoFFWlnV0wzobcAOA8VlUlPBXMuFuaxqTSDX4kVicxtVFCoYFJmFabe+OtwaD9hK10ESkYxxLssrExIF/RbmINfbI8flS40sUw4m3o6wd3zXuENu23Ph0WnYA+4FqYOKH9pAVVa1hz3w9FPlT6d44eQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.36) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=eSq61LMnycilc+5wrnNjt1Rg9/VztTjOpDczQYYXa94=;\n b=N9Q4qUC7Ds9gY8p35eQsifwyFaEqpxE8Na2iehwhzPQrMcDmpRmlEp2Cu124zBx1Ux37ngvkJRnF0fZBi4seCRvOJLpdzCqNAHXLWo4tvTeHHMd3CGwuCCQp7CqHGs40cpOjPl3NRlOg+hC/Z5TKswDrwT9SYLtr5hyi9U7/Ffdg8vp828Okp9V45IRbA2i0ssyq8aT1I7xFIQpdMhweNuXl7tmoJEful1SjHaueJxMlXwNLR6aHlPWtzp6JIc3tQYS0DXNHuWAynEZQ50nhHYea7IuaMuJVHG7//50it5hHEn1rrviLmHYYDSpuGsHR7kWgZDVjEb0FAp6gh1OLMg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.36)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.36 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.36; helo=mail.nvidia.com;",
        "From": "Michael Baum <michaelba@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Matan Azrad <matan@nvidia.com>, Raslan Darawsheh <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Tue, 17 Aug 2021 16:44:30 +0300",
        "Message-ID": "<20210817134441.1966618-11-michaelba@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210817134441.1966618-1-michaelba@nvidia.com>",
        "References": "<20210817134441.1966618-1-michaelba@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "b4d787b6-dcc6-4c0b-ec62-08d9618548ee",
        "X-MS-TrafficTypeDiagnostic": "MW3PR12MB4380:",
        "X-Microsoft-Antispam-PRVS": "\n <MW3PR12MB4380BB6C641604AB008D7606CCFE9@MW3PR12MB4380.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:2;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n hjF3O7/CSKWgUOim2VcNRVpvY/Oobm5VlZ9YCLjeICC+qbz18h4SjMGPYBz9iIErmYl7MlF6JiinzYBFUvYNDlhqsb+wRJ1RXZZpX1AHCP0YMmStWL4uusC5cUZB7wkvwZcPrdEpn7+Iq3LwJMnTXB9iU0Qv6S4cCeW547yq9eiLeB0sN2v8onAeFVCN1XDxiXEXfVMd+6zdM/+848hTHXVL6izoOoKhlcEYgizbMTlxWhilb8iPX+EeZ38J4wC3iznvbhAcIE8ZKHRbkppnNMqQ4AXCL/UGA1FB2F2hf4tFry/VYHvdzFanYqACGXkJAibHeuFyKglhr8tyHsujEstSv5Y9ZIIE1Dfa7ROUk3j1BhFy9S749VChTebApprwt1oxB68JzTHWVnG6qQjfGEBj8QVbfp1aj8FBmJuOs8nm4W2UFHc/xoEeY0/jcVVbD1PEM31W3IeDYqbR4CRwXtSiiL1Pgyc1sgypTz0XLMkdyuOf2JwXcE335+e8bWcbNtDr9P0rJtJ+oB+RgbcU4Rl7zFOsgLLJyLkARpP9KjNN7SoJF0U32Vo1Ts0BVJNDXHT8u/V/gCqSzSk3Zy1afYRa/B2ZrqjqgDofvwBCHUnFf3I6rAeNCcMZ+w5c29eO+7fFKMH3XHsCAzI94mIWbjppZM+s3OX9JU+iPC0o4ZyzLQttf+J/b762yd8gFQWxiKTbq1Z+d4xTaGqnuyft18pTJliMqs38Q9gbVh/CKyU=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.36; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid05.nvidia.com; CAT:NONE;\n SFS:(4636009)(396003)(39860400002)(346002)(376002)(136003)(36840700001)(46966006)(70206006)(7636003)(6286002)(70586007)(86362001)(356005)(186003)(7696005)(82740400003)(107886003)(26005)(6916009)(8676002)(2906002)(8936002)(16526019)(82310400003)(47076005)(36860700001)(36756003)(30864003)(336012)(1076003)(54906003)(426003)(5660300002)(55016002)(6666004)(316002)(478600001)(2616005)(4326008)(83380400001)(579004)(559001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "17 Aug 2021 13:45:32.9509 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b4d787b6-dcc6-4c0b-ec62-08d9618548ee",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.36];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT030.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW3PR12MB4380",
        "Subject": "[dpdk-dev] [RFC 10/21] net/mlx5: use context device structure",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Use common context device structure as a sh field.\n\nSigned-off-by: Michael Baum <michaelba@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common.c            |   2 +-\n drivers/common/mlx5/mlx5_common.h            |   6 +-\n drivers/common/mlx5/version.map              |   2 +-\n drivers/common/mlx5/windows/mlx5_common_os.c |   2 +-\n drivers/net/mlx5/linux/mlx5_ethdev_os.c      |   8 +-\n drivers/net/mlx5/linux/mlx5_mp_os.c          |   9 +-\n drivers/net/mlx5/linux/mlx5_os.c             | 432 ++++++++++---------\n drivers/net/mlx5/linux/mlx5_verbs.c          |  55 +--\n drivers/net/mlx5/mlx5.c                      | 103 +++--\n drivers/net/mlx5/mlx5.h                      |  12 +-\n drivers/net/mlx5/mlx5_devx.c                 |  34 +-\n drivers/net/mlx5/mlx5_flow.c                 |   6 +-\n drivers/net/mlx5/mlx5_flow_aso.c             |  24 +-\n drivers/net/mlx5/mlx5_flow_dv.c              |  51 +--\n drivers/net/mlx5/mlx5_flow_verbs.c           |   4 +-\n drivers/net/mlx5/mlx5_mr.c                   |  14 +-\n drivers/net/mlx5/mlx5_txpp.c                 |  17 +-\n drivers/net/mlx5/windows/mlx5_ethdev_os.c    |  14 +-\n drivers/net/mlx5/windows/mlx5_os.c           | 113 ++---\n 19 files changed, 453 insertions(+), 455 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c\nindex be3d0f2627..ffd2c2c129 100644\n--- a/drivers/common/mlx5/mlx5_common.c\n+++ b/drivers/common/mlx5/mlx5_common.c\n@@ -152,7 +152,7 @@ mlx5_common_args_check(const char *key, const char *val, void *opaque)\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n-static int\n+int\n mlx5_parse_db_map_arg(struct rte_devargs *devargs, int *dbnc)\n {\n \tstruct rte_kvargs *kvlist;\ndiff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h\nindex 10061f364f..c4e86c3175 100644\n--- a/drivers/common/mlx5/mlx5_common.h\n+++ b/drivers/common/mlx5/mlx5_common.h\n@@ -459,14 +459,16 @@ __rte_internal\n bool\n mlx5_dev_is_pci(const struct rte_device *dev);\n \n+__rte_internal\n+int\n+mlx5_parse_db_map_arg(struct rte_devargs *devargs, int *dbnc);\n+\n /* mlx5_common_os.c */\n \n int mlx5_os_devx_open_device(struct mlx5_dev_ctx *dev_ctx,\n \t\t\t     struct rte_device *dev, int dbnc,\n \t\t\t     uint32_t classes);\n int mlx5_os_pd_create(struct mlx5_dev_ctx *dev_ctx);\n-__rte_internal\n-struct devx_device_bdf *mlx5_os_get_devx_device(struct rte_device *dev);\n \n \n #endif /* RTE_PMD_MLX5_COMMON_H_ */\ndiff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map\nindex 18856c198e..a1a8bae5bd 100644\n--- a/drivers/common/mlx5/version.map\n+++ b/drivers/common/mlx5/version.map\n@@ -9,6 +9,7 @@ INTERNAL {\n \n \tmlx5_common_init;\n \n+\tmlx5_parse_db_map_arg; # WINDOWS_NO_EXPORT\n \tmlx5_dev_ctx_release;\n \tmlx5_dev_ctx_prepare;\n \n@@ -145,7 +146,6 @@ INTERNAL {\n \tmlx5_os_dealloc_pd;\n \tmlx5_os_dereg_mr;\n \tmlx5_os_get_ibv_dev; # WINDOWS_NO_EXPORT\n-\tmlx5_os_get_devx_device;\n \tmlx5_os_reg_mr;\n \tmlx5_os_umem_dereg;\n \tmlx5_os_umem_reg;\ndiff --git a/drivers/common/mlx5/windows/mlx5_common_os.c b/drivers/common/mlx5/windows/mlx5_common_os.c\nindex 12819383c1..5d178b0452 100644\n--- a/drivers/common/mlx5/windows/mlx5_common_os.c\n+++ b/drivers/common/mlx5/windows/mlx5_common_os.c\n@@ -144,7 +144,7 @@ mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf,\n  * @return\n  *   A device match on success, NULL otherwise and rte_errno is set.\n  */\n-struct devx_device_bdf *\n+static struct devx_device_bdf *\n mlx5_os_get_devx_device(struct rte_device *dev)\n {\n \tint n;\ndiff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c\nindex f34133e2c6..b4bbf841cc 100644\n--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c\n@@ -324,7 +324,7 @@ int\n mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct ibv_context *ctx = priv->sh->ctx;\n+\tstruct ibv_context *ctx = priv->sh->dev_ctx->ctx;\n \tstruct ibv_values_ex values;\n \tint err = 0;\n \n@@ -778,7 +778,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)\n \t\tstruct rte_eth_dev *dev;\n \t\tuint32_t tmp;\n \n-\t\tif (mlx5_glue->get_async_event(sh->ctx, &event))\n+\t\tif (mlx5_glue->get_async_event(sh->dev_ctx->ctx, &event))\n \t\t\tbreak;\n \t\t/* Retrieve and check IB port index. */\n \t\ttmp = (uint32_t)event.element.port_num;\n@@ -987,10 +987,10 @@ mlx5_set_link_up(struct rte_eth_dev *dev)\n int\n mlx5_is_removed(struct rte_eth_dev *dev)\n {\n-\tstruct ibv_device_attr device_attr;\n+\tstruct ibv_device_attr dev_attr;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n-\tif (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)\n+\tif (mlx5_glue->query_device(priv->sh->dev_ctx->ctx, &dev_attr) == EIO)\n \t\treturn 1;\n \treturn 0;\n }\ndiff --git a/drivers/net/mlx5/linux/mlx5_mp_os.c b/drivers/net/mlx5/linux/mlx5_mp_os.c\nindex 3a4aa766f8..53e372694c 100644\n--- a/drivers/net/mlx5/linux/mlx5_mp_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_mp_os.c\n@@ -29,6 +29,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)\n \t\t(const struct mlx5_mp_param *)mp_msg->param;\n \tstruct rte_eth_dev *dev;\n \tstruct mlx5_priv *priv;\n+\tstruct mlx5_dev_ctx *dev_ctx;\n \tstruct mr_cache_entry entry;\n \tuint32_t lkey;\n \tint ret;\n@@ -41,10 +42,11 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)\n \t}\n \tdev = &rte_eth_devices[param->port_id];\n \tpriv = dev->data->dev_private;\n+\tdev_ctx = priv->sh->dev_ctx;\n \tswitch (param->type) {\n \tcase MLX5_MP_REQ_CREATE_MR:\n \t\tmp_init_msg(&priv->mp_id, &mp_res, param->type);\n-\t\tlkey = mlx5_mr_create_primary(priv->sh->pd,\n+\t\tlkey = mlx5_mr_create_primary(dev_ctx->pd,\n \t\t\t\t\t      &priv->sh->share_cache,\n \t\t\t\t\t      &entry, param->args.addr,\n \t\t\t\t\t      priv->config.mr_ext_memseg_en);\n@@ -55,7 +57,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)\n \tcase MLX5_MP_REQ_VERBS_CMD_FD:\n \t\tmp_init_msg(&priv->mp_id, &mp_res, param->type);\n \t\tmp_res.num_fds = 1;\n-\t\tmp_res.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;\n+\t\tmp_res.fds[0] = ((struct ibv_context *)dev_ctx->ctx)->cmd_fd;\n \t\tres->result = 0;\n \t\tret = rte_mp_reply(&mp_res, peer);\n \t\tbreak;\n@@ -202,7 +204,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type)\n \tmp_init_msg(&priv->mp_id, &mp_req, type);\n \tif (type == MLX5_MP_REQ_START_RXTX) {\n \t\tmp_req.num_fds = 1;\n-\t\tmp_req.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;\n+\t\tmp_req.fds[0] =\n+\t\t\t((struct ibv_context *)priv->sh->dev_ctx->ctx)->cmd_fd;\n \t}\n \tret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);\n \tif (ret) {\ndiff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex b4670fad6e..e2a7c3d09c 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -214,7 +214,7 @@ mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)\n static void *\n mlx5_alloc_verbs_buf(size_t size, void *data)\n {\n-\tstruct mlx5_dev_ctx_shared *sh = data;\n+\tstruct mlx5_dev_ctx *dev_ctx = data;\n \tvoid *ret;\n \tsize_t alignment = rte_mem_page_size();\n \tif (alignment == (size_t)-1) {\n@@ -224,7 +224,7 @@ mlx5_alloc_verbs_buf(size_t size, void *data)\n \t}\n \n \tMLX5_ASSERT(data != NULL);\n-\tret = mlx5_malloc(0, size, alignment, sh->numa_node);\n+\tret = mlx5_malloc(0, size, alignment, dev_ctx->numa_node);\n \tif (!ret && size)\n \t\trte_errno = ENOMEM;\n \treturn ret;\n@@ -290,7 +290,7 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)\n \t\t\t metadata_reg_c_0, 0xffff);\n \t}\n #endif\n-\tmatcher = mlx5_glue->dv_create_flow_matcher(priv->sh->ctx,\n+\tmatcher = mlx5_glue->dv_create_flow_matcher(priv->sh->dev_ctx->ctx,\n \t\t\t\t\t\t    &dv_attr, tbl);\n \tif (matcher) {\n \t\tpriv->sh->misc5_cap = 1;\n@@ -389,7 +389,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \tvoid *domain;\n \n \t/* Reference counter is zero, we should initialize structures. */\n-\tdomain = mlx5_glue->dr_create_domain(sh->ctx,\n+\tdomain = mlx5_glue->dr_create_domain(sh->dev_ctx->ctx,\n \t\t\t\t\t     MLX5DV_DR_DOMAIN_TYPE_NIC_RX);\n \tif (!domain) {\n \t\tDRV_LOG(ERR, \"ingress mlx5dv_dr_create_domain failed\");\n@@ -397,7 +397,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \t\tgoto error;\n \t}\n \tsh->rx_domain = domain;\n-\tdomain = mlx5_glue->dr_create_domain(sh->ctx,\n+\tdomain = mlx5_glue->dr_create_domain(sh->dev_ctx->ctx,\n \t\t\t\t\t     MLX5DV_DR_DOMAIN_TYPE_NIC_TX);\n \tif (!domain) {\n \t\tDRV_LOG(ERR, \"egress mlx5dv_dr_create_domain failed\");\n@@ -407,8 +407,8 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \tsh->tx_domain = domain;\n #ifdef HAVE_MLX5DV_DR_ESWITCH\n \tif (priv->config.dv_esw_en) {\n-\t\tdomain  = mlx5_glue->dr_create_domain\n-\t\t\t(sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);\n+\t\tdomain = mlx5_glue->dr_create_domain(sh->dev_ctx->ctx,\n+\t\t\t\t\t\t     MLX5DV_DR_DOMAIN_TYPE_FDB);\n \t\tif (!domain) {\n \t\t\tDRV_LOG(ERR, \"FDB mlx5dv_dr_create_domain failed\");\n \t\t\terr = errno;\n@@ -816,7 +816,7 @@ static void\n mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tvoid *ctx = priv->sh->ctx;\n+\tvoid *ctx = priv->sh->dev_ctx->ctx;\n \n \tpriv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);\n \tif (!priv->q_counters) {\n@@ -833,7 +833,7 @@ mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)\n \t\t\t\t\t\t    .wq_type = IBV_WQT_RQ,\n \t\t\t\t\t\t    .max_wr = 1,\n \t\t\t\t\t\t    .max_sge = 1,\n-\t\t\t\t\t\t    .pd = priv->sh->pd,\n+\t\t\t\t\t\t    .pd = priv->sh->dev_ctx->pd,\n \t\t\t\t\t\t    .cq = cq,\n \t\t\t\t\t\t});\n \t\t\tif (wq) {\n@@ -934,6 +934,8 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,\n  *\n  * @param dpdk_dev\n  *   Backing DPDK device.\n+ * @param dev_ctx\n+ *   Pointer to the context device data structure.\n  * @param spawn\n  *   Verbs device parameters (name, port, switch_info) to spawn.\n  * @param config\n@@ -950,6 +952,7 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,\n  */\n static struct rte_eth_dev *\n mlx5_dev_spawn(struct rte_device *dpdk_dev,\n+\t       struct mlx5_dev_ctx *dev_ctx,\n \t       struct mlx5_dev_spawn_data *spawn,\n \t       struct mlx5_dev_config *config,\n \t       struct rte_eth_devargs *eth_da)\n@@ -1073,10 +1076,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\tconfig->dv_xmeta_en = MLX5_XMETA_MODE_META16;\n \t}\n \tmlx5_malloc_mem_select(config->sys_mem_en);\n-\tsh = mlx5_alloc_shared_dev_ctx(spawn, config);\n+\tsh = mlx5_alloc_shared_dev_ctx(spawn, dev_ctx, config);\n \tif (!sh)\n \t\treturn NULL;\n-\tconfig->devx = sh->devx;\n #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR\n \tconfig->dest_tir = 1;\n #endif\n@@ -1093,7 +1095,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n \tdv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;\n #endif\n-\tmlx5_glue->dv_query_device(sh->ctx, &dv_attr);\n+\tmlx5_glue->dv_query_device(sh->dev_ctx->ctx, &dv_attr);\n \tif (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {\n \t\tif (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {\n \t\t\tDRV_LOG(DEBUG, \"enhanced MPW is supported\");\n@@ -1170,7 +1172,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n #endif\n \tconfig->mpls_en = mpls_en;\n \t/* Check port status. */\n-\terr = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);\n+\terr = mlx5_glue->query_port(sh->dev_ctx->ctx, spawn->phys_port,\n+\t\t\t\t    &port_attr);\n \tif (err) {\n \t\tDRV_LOG(ERR, \"port query failed: %s\", strerror(err));\n \t\tgoto error;\n@@ -1220,7 +1223,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t * register is defined by mask.\n \t */\n \tif (switch_info->representor || switch_info->master) {\n-\t\terr = mlx5_glue->devx_port_query(sh->ctx,\n+\t\terr = mlx5_glue->devx_port_query(sh->dev_ctx->ctx,\n \t\t\t\t\t\t spawn->phys_port,\n \t\t\t\t\t\t &vport_info);\n \t\tif (err) {\n@@ -1377,7 +1380,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\tconfig->mps == MLX5_MPW ? \"legacy \" : \"\",\n \t\tconfig->mps != MLX5_MPW_DISABLED ? \"enabled\" : \"disabled\");\n \tif (config->devx) {\n-\t\terr = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);\n+\t\terr = mlx5_devx_cmd_query_hca_attr(sh->dev_ctx->ctx,\n+\t\t\t\t\t\t   &config->hca_attr);\n \t\tif (err) {\n \t\t\terr = -err;\n \t\t\tgoto error;\n@@ -1600,7 +1604,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \n \t\terr = config->hca_attr.access_register_user ?\n \t\t\tmlx5_devx_cmd_register_read\n-\t\t\t\t(sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,\n+\t\t\t\t(sh->dev_ctx->ctx, MLX5_REGISTER_ID_MTUTC, 0,\n \t\t\t\treg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;\n \t\tif (!err) {\n \t\t\tuint32_t ts_mode;\n@@ -1741,12 +1745,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tif (!priv->mtr_profile_tbl)\n \t\tgoto error;\n \t/* Hint libmlx5 to use PMD allocator for data plane resources */\n-\tmlx5_glue->dv_set_context_attr(sh->ctx,\n+\tmlx5_glue->dv_set_context_attr(sh->dev_ctx->ctx,\n \t\t\tMLX5DV_CTX_ATTR_BUF_ALLOCATORS,\n \t\t\t(void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){\n \t\t\t\t.alloc = &mlx5_alloc_verbs_buf,\n \t\t\t\t.free = &mlx5_free_verbs_buf,\n-\t\t\t\t.data = sh,\n+\t\t\t\t.data = dev_ctx,\n \t\t\t}));\n \t/* Bring Ethernet device up. */\n \tDRV_LOG(DEBUG, \"port %u forcing Ethernet interface up\",\n@@ -1923,9 +1927,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\teth_dev->data->dev_private = NULL;\n \t}\n \tif (eth_dev != NULL) {\n-\t\t/* mac_addrs must not be freed alone because part of\n+\t\t/*\n+\t\t * mac_addrs must not be freed alone because part of\n \t\t * dev_private\n-\t\t **/\n+\t\t */\n \t\teth_dev->data->mac_addrs = NULL;\n \t\trte_eth_dev_release_port(eth_dev);\n \t}\n@@ -2144,6 +2149,8 @@ mlx5_os_config_default(struct mlx5_dev_config *config)\n  *\n  * @param[in] pci_dev\n  *   PCI device information.\n+ * @param dev_ctx\n+ *   Pointer to the context device data structure.\n  * @param[in] req_eth_da\n  *   Requested ethdev device argument.\n  * @param[in] owner_id\n@@ -2154,8 +2161,9 @@ mlx5_os_config_default(struct mlx5_dev_config *config)\n  */\n static int\n mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n+\t\t     struct mlx5_dev_ctx *dev_ctx,\n \t\t     struct rte_eth_devargs *req_eth_da,\n-\t\t     uint16_t owner_id)\n+\t\t     uint16_t owner_id, uint8_t devx)\n {\n \tstruct ibv_device **ibv_list;\n \t/*\n@@ -2181,13 +2189,14 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \t *   < 0 - no bonding device (single one)\n \t *  >= 0 - bonding device (value is slave PF index)\n \t */\n-\tint bd = -1;\n+\tint bd;\n \tstruct mlx5_dev_spawn_data *list = NULL;\n \tstruct mlx5_dev_config dev_config;\n \tunsigned int dev_config_vf;\n \tstruct rte_eth_devargs eth_da = *req_eth_da;\n \tstruct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */\n \tstruct mlx5_bond_info bond_info;\n+\tconst char *ibdev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);\n \tint ret = -1;\n \n \terrno = 0;\n@@ -2206,38 +2215,22 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \tint nl_rdma = mlx5_nl_init(NETLINK_RDMA);\n \tunsigned int i;\n \n-\twhile (ret-- > 0) {\n-\t\tstruct rte_pci_addr pci_addr;\n+\tbd = mlx5_device_bond_pci_match(ibdev_name, &owner_pci, nl_rdma,\n+\t\t\t\t\towner_id, &bond_info);\n+\tif (bd >= 0) {\n+\t\t/* Amend owner pci address if owner PF ID specified. */\n+\t\tif (eth_da.nb_representor_ports)\n+\t\t\towner_pci.function += owner_id;\n+\t\tDRV_LOG(INFO,\n+\t\t\t\"PCI information matches for slave %d bonding device \\\"%s\\\".\",\n+\t\t\tbd, ibdev_name);\n+\t\tnd++;\n+\t} else {\n+\t\twhile (ret-- > 0) {\n+\t\t\tstruct rte_pci_addr pci_addr;\n \n-\t\tDRV_LOG(DEBUG, \"checking device \\\"%s\\\"\", ibv_list[ret]->name);\n-\t\tbd = mlx5_device_bond_pci_match(ibv_list[ret]->name, &owner_pci,\n-\t\t\t\t\t\tnl_rdma, owner_id, &bond_info);\n-\t\tif (bd >= 0) {\n-\t\t\t/*\n-\t\t\t * Bonding device detected. Only one match is allowed,\n-\t\t\t * the bonding is supported over multi-port IB device,\n-\t\t\t * there should be no matches on representor PCI\n-\t\t\t * functions or non VF LAG bonding devices with\n-\t\t\t * specified address.\n-\t\t\t */\n-\t\t\tif (nd) {\n-\t\t\t\tDRV_LOG(ERR,\n-\t\t\t\t\t\"multiple PCI match on bonding device\"\n-\t\t\t\t\t\"\\\"%s\\\" found\", ibv_list[ret]->name);\n-\t\t\t\trte_errno = ENOENT;\n-\t\t\t\tret = -rte_errno;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\t\t\t/* Amend owner pci address if owner PF ID specified. */\n-\t\t\tif (eth_da.nb_representor_ports)\n-\t\t\t\towner_pci.function += owner_id;\n-\t\t\tDRV_LOG(INFO,\n-\t\t\t\t\"PCI information matches for slave %d bonding device \\\"%s\\\"\",\n-\t\t\t\tbd, ibv_list[ret]->name);\n-\t\t\tibv_match[nd++] = ibv_list[ret];\n-\t\t\tbreak;\n-\t\t} else {\n-\t\t\t/* Bonding device not found. */\n+\t\t\tDRV_LOG(DEBUG, \"checking device \\\"%s\\\"\",\n+\t\t\t\tibv_list[ret]->name);\n \t\t\tif (mlx5_get_pci_addr(ibv_list[ret]->ibdev_path,\n \t\t\t\t\t      &pci_addr))\n \t\t\t\tcontinue;\n@@ -2246,22 +2239,26 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \t\t\t    owner_pci.devid != pci_addr.devid ||\n \t\t\t    owner_pci.function != pci_addr.function)\n \t\t\t\tcontinue;\n-\t\t\tDRV_LOG(INFO, \"PCI information matches for device \\\"%s\\\"\",\n+\t\t\tDRV_LOG(INFO,\n+\t\t\t\t\"PCI information matches for device \\\"%s\\\"\",\n \t\t\t\tibv_list[ret]->name);\n \t\t\tibv_match[nd++] = ibv_list[ret];\n \t\t}\n \t}\n \tibv_match[nd] = NULL;\n-\tif (!nd) {\n-\t\t/* No device matches, just complain and bail out. */\n-\t\tDRV_LOG(WARNING,\n-\t\t\t\"no Verbs device matches PCI device \" PCI_PRI_FMT \",\"\n-\t\t\t\" are kernel drivers loaded?\",\n-\t\t\towner_pci.domain, owner_pci.bus,\n-\t\t\towner_pci.devid, owner_pci.function);\n-\t\trte_errno = ENOENT;\n-\t\tret = -rte_errno;\n-\t\tgoto exit;\n+\tif (bd >= 0 && nd > 1) {\n+\t\t/*\n+\t\t * Bonding device detected. Only one match is allowed, the\n+\t\t * bonding is supported over multi-port IB device, there should\n+\t\t * be no matches on representor PCI functions or non VF LAG\n+\t\t * bonding devices with specified address.\n+\t\t */\n+\t\tDRV_LOG(ERR,\n+\t\t\t\"Multiple PCI match on bonding device \\\"%s\\\" found.\",\n+\t\t\tibdev_name);\n+\t\t\trte_errno = ENOENT;\n+\t\t\tret = -rte_errno;\n+\t\t\tgoto exit;\n \t}\n \tif (nd == 1) {\n \t\t/*\n@@ -2270,11 +2267,11 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \t\t * number and check the representors existence.\n \t\t */\n \t\tif (nl_rdma >= 0)\n-\t\t\tnp = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);\n+\t\t\tnp = mlx5_nl_portnum(nl_rdma, ibdev_name);\n \t\tif (!np)\n \t\t\tDRV_LOG(WARNING,\n \t\t\t\t\"Cannot get IB device \\\"%s\\\" ports number.\",\n-\t\t\t\tibv_match[0]->name);\n+\t\t\t\tibdev_name);\n \t\tif (bd >= 0 && !np) {\n \t\t\tDRV_LOG(ERR, \"Cannot get ports for bonding device.\");\n \t\t\trte_errno = ENOENT;\n@@ -2306,15 +2303,12 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \t\t\tlist[ns].bond_info = &bond_info;\n \t\t\tlist[ns].max_port = np;\n \t\t\tlist[ns].phys_port = i;\n-\t\t\tlist[ns].phys_dev = ibv_match[0];\n-\t\t\tlist[ns].phys_dev_name = ibv_match[0]->name;\n+\t\t\tlist[ns].phys_dev_name = ibdev_name;\n \t\t\tlist[ns].eth_dev = NULL;\n \t\t\tlist[ns].pci_dev = pci_dev;\n \t\t\tlist[ns].pf_bond = bd;\n-\t\t\tlist[ns].ifindex = mlx5_nl_ifindex\n-\t\t\t\t(nl_rdma,\n-\t\t\t\tmlx5_os_get_dev_device_name\n-\t\t\t\t\t\t(list[ns].phys_dev), i);\n+\t\t\tlist[ns].ifindex = mlx5_nl_ifindex(nl_rdma,\n+\t\t\t\t\t\t\t   ibdev_name, i);\n \t\t\tif (!list[ns].ifindex) {\n \t\t\t\t/*\n \t\t\t\t * No network interface index found for the\n@@ -2403,17 +2397,15 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \t\t\tlist[ns].bond_info = NULL;\n \t\t\tlist[ns].max_port = 1;\n \t\t\tlist[ns].phys_port = 1;\n-\t\t\tlist[ns].phys_dev = ibv_match[i];\n-\t\t\tlist[ns].phys_dev_name = ibv_match[i]->name;\n+\t\t\tlist[ns].phys_dev_name = ibdev_name;\n \t\t\tlist[ns].eth_dev = NULL;\n \t\t\tlist[ns].pci_dev = pci_dev;\n \t\t\tlist[ns].pf_bond = -1;\n \t\t\tlist[ns].ifindex = 0;\n \t\t\tif (nl_rdma >= 0)\n-\t\t\t\tlist[ns].ifindex = mlx5_nl_ifindex\n-\t\t\t\t(nl_rdma,\n-\t\t\t\tmlx5_os_get_dev_device_name\n-\t\t\t\t\t\t(list[ns].phys_dev), 1);\n+\t\t\t\tlist[ns].ifindex = mlx5_nl_ifindex(nl_rdma,\n+\t\t\t\t\t\t\t\t   ibdev_name,\n+\t\t\t\t\t\t\t\t   1);\n \t\t\tif (!list[ns].ifindex) {\n \t\t\t\tchar ifname[IF_NAMESIZE];\n \n@@ -2477,7 +2469,7 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \t\t\t\t * May be SRIOV is not enabled or there is no\n \t\t\t\t * representors.\n \t\t\t\t */\n-\t\t\t\tDRV_LOG(INFO, \"no E-Switch support detected\");\n+\t\t\t\tDRV_LOG(INFO, \"No E-Switch support detected.\");\n \t\t\t\tns++;\n \t\t\t\tbreak;\n \t\t\t}\n@@ -2546,12 +2538,11 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,\n \n \t\t/* Default configuration. */\n \t\tmlx5_os_config_default(&dev_config);\n+\t\tdev_config.devx = devx;\n \t\tdev_config.vf = dev_config_vf;\n \t\tdev_config.allow_duplicate_pattern = 1;\n-\t\tlist[i].numa_node = pci_dev->device.numa_node;\n-\t\tlist[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,\n-\t\t\t\t\t\t &list[i],\n-\t\t\t\t\t\t &dev_config,\n+\t\tlist[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, dev_ctx,\n+\t\t\t\t\t\t &list[i], &dev_config,\n \t\t\t\t\t\t &eth_da);\n \t\tif (!list[i].eth_dev) {\n \t\t\tif (rte_errno != EBUSY && rte_errno != EEXIST)\n@@ -2671,7 +2662,8 @@ mlx5_os_parse_eth_devargs(struct rte_device *dev,\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_os_pci_probe(struct rte_pci_device *pci_dev)\n+mlx5_os_pci_probe(struct rte_pci_device *pci_dev, struct mlx5_dev_ctx *dev_ctx,\n+\t\t  uint8_t devx)\n {\n \tstruct rte_eth_devargs eth_da = { .nb_ports = 0 };\n \tint ret = 0;\n@@ -2684,8 +2676,8 @@ mlx5_os_pci_probe(struct rte_pci_device *pci_dev)\n \tif (eth_da.nb_ports > 0) {\n \t\t/* Iterate all port if devargs pf is range: \"pf[0-1]vf[...]\". */\n \t\tfor (p = 0; p < eth_da.nb_ports; p++) {\n-\t\t\tret = mlx5_os_pci_probe_pf(pci_dev, &eth_da,\n-\t\t\t\t\t\t   eth_da.ports[p]);\n+\t\t\tret = mlx5_os_pci_probe_pf(pci_dev, dev_ctx, &eth_da,\n+\t\t\t\t\t\t   eth_da.ports[p], devx);\n \t\t\tif (ret)\n \t\t\t\tbreak;\n \t\t}\n@@ -2698,14 +2690,15 @@ mlx5_os_pci_probe(struct rte_pci_device *pci_dev)\n \t\t\tmlx5_net_remove(&pci_dev->device);\n \t\t}\n \t} else {\n-\t\tret = mlx5_os_pci_probe_pf(pci_dev, &eth_da, 0);\n+\t\tret = mlx5_os_pci_probe_pf(pci_dev, dev_ctx, &eth_da, 0, devx);\n \t}\n \treturn ret;\n }\n \n /* Probe a single SF device on auxiliary bus, no representor support. */\n static int\n-mlx5_os_auxiliary_probe(struct rte_device *dev)\n+mlx5_os_auxiliary_probe(struct rte_device *dev, struct mlx5_dev_ctx *dev_ctx,\n+\t\t\tuint8_t devx)\n {\n \tstruct rte_eth_devargs eth_da = { .nb_ports = 0 };\n \tstruct mlx5_dev_config config;\n@@ -2721,22 +2714,19 @@ mlx5_os_auxiliary_probe(struct rte_device *dev)\n \t/* Set default config data. */\n \tmlx5_os_config_default(&config);\n \tconfig.sf = 1;\n+\tconfig.devx = devx;\n \t/* Init spawn data. */\n \tspawn.max_port = 1;\n \tspawn.phys_port = 1;\n-\tspawn.phys_dev = mlx5_os_get_ibv_dev(dev);\n-\tif (spawn.phys_dev == NULL)\n-\t\treturn -rte_errno;\n-\tspawn.phys_dev_name = mlx5_os_get_dev_device_name(spawn.phys_dev);\n+\tspawn.phys_dev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);\n \tret = mlx5_auxiliary_get_ifindex(dev->name);\n \tif (ret < 0) {\n \t\tDRV_LOG(ERR, \"failed to get ethdev ifindex: %s\", dev->name);\n \t\treturn ret;\n \t}\n \tspawn.ifindex = ret;\n-\tspawn.numa_node = dev->numa_node;\n \t/* Spawn device. */\n-\teth_dev = mlx5_dev_spawn(dev, &spawn, &config, &eth_da);\n+\teth_dev = mlx5_dev_spawn(dev, dev_ctx, &spawn, &config, &eth_da);\n \tif (eth_dev == NULL)\n \t\treturn -rte_errno;\n \t/* Post create. */\n@@ -2750,38 +2740,8 @@ mlx5_os_auxiliary_probe(struct rte_device *dev)\n \treturn 0;\n }\n \n-/**\n- * Net class driver callback to probe a device.\n- *\n- * This function probe PCI bus device(s) or a single SF on auxiliary bus.\n- *\n- * @param[in] dev\n- *   Pointer to the generic device.\n- *\n- * @return\n- *   0 on success, the function cannot fail.\n- */\n-int\n-mlx5_os_net_probe(struct rte_device *dev)\n-{\n-\tint ret;\n-\n-\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n-\t\tmlx5_pmd_socket_init();\n-\tret = mlx5_init_once();\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"unable to init PMD global data: %s\",\n-\t\t\tstrerror(rte_errno));\n-\t\treturn -rte_errno;\n-\t}\n-\tif (mlx5_dev_is_pci(dev))\n-\t\treturn mlx5_os_pci_probe(RTE_DEV_TO_PCI(dev));\n-\telse\n-\t\treturn mlx5_os_auxiliary_probe(dev);\n-}\n-\n static int\n-mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)\n+mlx5_config_doorbell_mapping_env(int dbnc)\n {\n \tchar *env;\n \tint value;\n@@ -2790,11 +2750,11 @@ mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)\n \t/* Get environment variable to store. */\n \tenv = getenv(MLX5_SHUT_UP_BF);\n \tvalue = env ? !!strcmp(env, \"0\") : MLX5_ARG_UNSET;\n-\tif (config->dbnc == MLX5_ARG_UNSET)\n+\tif (dbnc == MLX5_ARG_UNSET)\n \t\tsetenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);\n \telse\n \t\tsetenv(MLX5_SHUT_UP_BF,\n-\t\t       config->dbnc == MLX5_TXDB_NCACHED ? \"1\" : \"0\", 1);\n+\t\t       dbnc == MLX5_TXDB_NCACHED ? \"1\" : \"0\", 1);\n \treturn value;\n }\n \n@@ -2810,104 +2770,163 @@ mlx5_restore_doorbell_mapping_env(int value)\n }\n \n /**\n- * Extract pdn of PD object using DV API.\n+ * Function API to open IB device using Verbs.\n+ *\n+ * This function calls the Linux glue APIs to open a device.\n  *\n- * @param[in] pd\n- *   Pointer to the verbs PD object.\n- * @param[out] pdn\n- *   Pointer to the PD object number variable.\n+ * @param dev_ctx\n+ *   Pointer to the context device data structure.\n+ * @param dev\n+ *   Pointer to the generic device.\n+ * @param dbnc\n+ *   Device argument help configure the environment variable.\n  *\n  * @return\n- *   0 on success, error value otherwise.\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n-int\n-mlx5_os_get_pdn(void *pd, uint32_t *pdn)\n+static int\n+mlx5_verbs_open_device(struct mlx5_dev_ctx *dev_ctx, struct rte_device *dev,\n+\t\t       int dbnc)\n {\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tstruct mlx5dv_obj obj;\n-\tstruct mlx5dv_pd pd_info;\n-\tint ret = 0;\n+\tstruct ibv_device *ibv;\n+\tstruct ibv_context *ctx = NULL;\n+\tint dbmap_env;\n \n-\tobj.pd.in = pd;\n-\tobj.pd.out = &pd_info;\n-\tret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);\n-\tif (ret) {\n-\t\tDRV_LOG(DEBUG, \"Fail to get PD object info\");\n+\tibv = mlx5_os_get_ibv_dev(dev);\n+\tif (!ibv)\n+\t\treturn -rte_errno;\n+\tDRV_LOG(INFO, \"Dev information matches for device \\\"%s\\\".\", ibv->name);\n+\t/*\n+\t * Configure environment variable \"MLX5_BF_SHUT_UP\" before the device\n+\t * creation. The rdma_core library checks the variable at device\n+\t * creation and stores the result internally.\n+\t */\n+\tdbmap_env = mlx5_config_doorbell_mapping_env(dbnc);\n+\t/* Try to open IB device with Verbs. */\n+\terrno = 0;\n+\tctx = mlx5_glue->open_device(ibv);\n+\t/*\n+\t * The environment variable is not needed anymore, all device creation\n+\t * attempts are completed.\n+\t */\n+\tmlx5_restore_doorbell_mapping_env(dbmap_env);\n+\tif (!ctx) {\n+\t\tDRV_LOG(ERR, \"Failed to open IB device \\\"%s\\\".\", ibv->name);\n+\t\trte_errno = errno ? errno : ENODEV;\n+\t\treturn -rte_errno;\n+\t}\n+\t/* Hint libmlx5 to use PMD allocator for data plane resources */\n+\tmlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,\n+\t\t\t    (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){\n+\t\t\t\t    .alloc = &mlx5_alloc_verbs_buf,\n+\t\t\t\t    .free = &mlx5_free_verbs_buf,\n+\t\t\t\t    .data = dev_ctx,\n+\t\t\t    }));\n+\tdev_ctx->ctx = ctx;\n+\treturn 0;\n+}\n+\n+/**\n+ * Initialize context device and allocate all its resources.\n+ *\n+ * @param dev_ctx\n+ *   Pointer to the context device data structure.\n+ * @param dev\n+ *   Pointer to mlx5 device structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_verbs_dev_ctx_prepare(struct mlx5_dev_ctx *dev_ctx, struct rte_device *dev)\n+{\n+\tint dbnc = MLX5_ARG_UNSET;\n+\tint ret;\n+\n+\t/*\n+\t * Parse Tx doorbell mapping parameter. It helps to configure\n+\t * environment variable \"MLX5_BF_SHUT_UP\" before the device creation.\n+\t */\n+\tret = mlx5_parse_db_map_arg(dev->devargs, &dbnc);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\t/* Open device using Verbs. */\n+\tret = mlx5_verbs_open_device(dev_ctx, dev, dbnc);\n+\tif (ret < 0)\n \t\treturn ret;\n+\t/* Allocate Protection Domain object. */\n+\tdev_ctx->pd = mlx5_glue->alloc_pd(dev_ctx->ctx);\n+\tif (dev_ctx->pd == NULL) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate PD.\");\n+\t\trte_errno = errno ? errno : ENOMEM;\n+\t\tclaim_zero(mlx5_glue->close_device(dev_ctx->ctx));\n+\t\tdev_ctx->ctx = NULL;\n+\t\treturn -rte_errno;\n \t}\n-\t*pdn = pd_info.pdn;\n \treturn 0;\n-#else\n-\t(void)pd;\n-\t(void)pdn;\n-\treturn -ENOTSUP;\n-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */\n }\n \n+\n /**\n- * Function API to open IB device.\n+ * Net class driver callback to probe a device.\n  *\n- * This function calls the Linux glue APIs to open a device.\n+ * This function probe PCI bus device(s) or a single SF on auxiliary bus.\n  *\n- * @param[in] spawn\n- *   Pointer to the IB device attributes (name, port, etc).\n- * @param[out] config\n- *   Pointer to device configuration structure.\n- * @param[out] sh\n- *   Pointer to shared context structure.\n+ * @param[in] dev\n+ *   Pointer to the generic device.\n  *\n  * @return\n- *   0 on success, a positive error value otherwise.\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n int\n-mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,\n-\t\t     const struct mlx5_dev_config *config,\n-\t\t     struct mlx5_dev_ctx_shared *sh)\n+mlx5_os_net_probe(struct rte_device *dev)\n {\n-\tint dbmap_env;\n-\tint err = 0;\n+\tstruct mlx5_dev_ctx *dev_ctx;\n+\tuint8_t devx = 0;\n+\tint ret;\n \n-\tpthread_mutex_init(&sh->txpp.mutex, NULL);\n+\tdev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx),\n+\t\t\t      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\tif (dev_ctx == NULL) {\n+\t\tDRV_LOG(ERR, \"Device context allocation failure.\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n \t/*\n-\t * Configure environment variable \"MLX5_BF_SHUT_UP\"\n-\t * before the device creation. The rdma_core library\n-\t * checks the variable at device creation and\n-\t * stores the result internally.\n+\t * Initialize context device and allocate all its resources.\n+\t * Try to do it with DV first, then usual Verbs.\n \t */\n-\tdbmap_env = mlx5_config_doorbell_mapping_env(config);\n-\t/* Try to open IB device with DV first, then usual Verbs. */\n-\terrno = 0;\n-\tsh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);\n-\tif (sh->ctx) {\n-\t\tsh->devx = 1;\n-\t\tDRV_LOG(DEBUG, \"DevX is supported\");\n-\t\t/* The device is created, no need for environment. */\n-\t\tmlx5_restore_doorbell_mapping_env(dbmap_env);\n+\tret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_ETH);\n+\tif (ret < 0) {\n+\t\tgoto error;\n+\t} else if (dev_ctx->ctx) {\n+\t\tdevx = 1;\n+\t\tDRV_LOG(DEBUG, \"DevX is supported.\");\n \t} else {\n-\t\t/* The environment variable is still configured. */\n-\t\tsh->ctx = mlx5_glue->open_device(spawn->phys_dev);\n-\t\terr = errno ? errno : ENODEV;\n-\t\t/*\n-\t\t * The environment variable is not needed anymore,\n-\t\t * all device creation attempts are completed.\n-\t\t */\n-\t\tmlx5_restore_doorbell_mapping_env(dbmap_env);\n-\t\tif (!sh->ctx)\n-\t\t\treturn err;\n-\t\tDRV_LOG(DEBUG, \"DevX is NOT supported\");\n-\t\terr = 0;\n-\t}\n-\tif (!err && sh->ctx) {\n-\t\t/* Hint libmlx5 to use PMD allocator for data plane resources */\n-\t\tmlx5_glue->dv_set_context_attr(sh->ctx,\n-\t\t\tMLX5DV_CTX_ATTR_BUF_ALLOCATORS,\n-\t\t\t(void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){\n-\t\t\t\t.alloc = &mlx5_alloc_verbs_buf,\n-\t\t\t\t.free = &mlx5_free_verbs_buf,\n-\t\t\t\t.data = sh,\n-\t\t\t}));\n+\t\tret = mlx5_verbs_dev_ctx_prepare(dev_ctx, dev);\n+\t\tif (ret < 0)\n+\t\t\tgoto error;\n+\t\tDRV_LOG(DEBUG, \"DevX is NOT supported.\");\n \t}\n-\treturn err;\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\tmlx5_pmd_socket_init();\n+\tret = mlx5_init_once();\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"unable to init PMD global data: %s\",\n+\t\t\tstrerror(rte_errno));\n+\t\tgoto error;\n+\t}\n+\tif (mlx5_dev_is_pci(dev))\n+\t\tret = mlx5_os_pci_probe(RTE_DEV_TO_PCI(dev), dev_ctx, devx);\n+\telse\n+\t\tret = mlx5_os_auxiliary_probe(dev, dev_ctx, devx);\n+\tif (ret)\n+\t\tgoto error;\n+\treturn ret;\n+error:\n+\tmlx5_dev_ctx_release(dev_ctx);\n+\tmlx5_free(dev_ctx);\n+\treturn ret;\n }\n \n /**\n@@ -2921,18 +2940,18 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,\n void\n mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)\n {\n+\tstruct ibv_context *ctx = sh->dev_ctx->ctx;\n \tint ret;\n \tint flags;\n \n \tsh->intr_handle.fd = -1;\n-\tflags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);\n-\tret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,\n-\t\t    F_SETFL, flags | O_NONBLOCK);\n+\tflags = fcntl(ctx->async_fd, F_GETFL);\n+\tret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);\n \tif (ret) {\n \t\tDRV_LOG(INFO, \"failed to change file descriptor async event\"\n \t\t\t\" queue\");\n \t} else {\n-\t\tsh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;\n+\t\tsh->intr_handle.fd = ctx->async_fd;\n \t\tsh->intr_handle.type = RTE_INTR_HANDLE_EXT;\n \t\tif (rte_intr_callback_register(&sh->intr_handle,\n \t\t\t\t\tmlx5_dev_interrupt_handler, sh)) {\n@@ -2943,8 +2962,7 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)\n \tif (sh->devx) {\n #ifdef HAVE_IBV_DEVX_ASYNC\n \t\tsh->intr_handle_devx.fd = -1;\n-\t\tsh->devx_comp =\n-\t\t\t(void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);\n+\t\tsh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);\n \t\tstruct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;\n \t\tif (!devx_comp) {\n \t\t\tDRV_LOG(INFO, \"failed to allocate devx_comp.\");\ndiff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c\nindex d4fa202ac4..7c266981cd 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.c\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.c\n@@ -249,9 +249,9 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\tcq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;\n \t}\n #endif\n-\treturn mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,\n-\t\t\t\t\t\t\t      &cq_attr.ibv,\n-\t\t\t\t\t\t\t      &cq_attr.mlx5));\n+\treturn mlx5_glue->cq_ex_to_cq\n+\t\t\t(mlx5_glue->dv_create_cq(priv->sh->dev_ctx->ctx,\n+\t\t\t\t\t\t &cq_attr.ibv, &cq_attr.mlx5));\n }\n \n /**\n@@ -288,7 +288,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\t.max_wr = wqe_n >> rxq_data->sges_n,\n \t\t/* Max number of scatter/gather elements in a WR. */\n \t\t.max_sge = 1 << rxq_data->sges_n,\n-\t\t.pd = priv->sh->pd,\n+\t\t.pd = priv->sh->dev_ctx->pd,\n \t\t.cq = rxq_obj->ibv_cq,\n \t\t.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,\n \t\t.create_flags = (rxq_data->vlan_strip ?\n@@ -323,10 +323,11 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\t.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,\n \t\t};\n \t}\n-\trxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,\n-\t\t\t\t\t      &wq_attr.mlx5);\n+\trxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->dev_ctx->ctx,\n+\t\t\t\t\t      &wq_attr.ibv, &wq_attr.mlx5);\n #else\n-\trxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);\n+\trxq_obj->wq = mlx5_glue->create_wq(priv->sh->dev_ctx->ctx,\n+\t\t\t\t\t   &wq_attr.ibv);\n #endif\n \tif (rxq_obj->wq) {\n \t\t/*\n@@ -378,8 +379,8 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \tMLX5_ASSERT(tmpl);\n \ttmpl->rxq_ctrl = rxq_ctrl;\n \tif (rxq_ctrl->irq) {\n-\t\ttmpl->ibv_channel =\n-\t\t\t\tmlx5_glue->create_comp_channel(priv->sh->ctx);\n+\t\ttmpl->ibv_channel = mlx5_glue->create_comp_channel\n+\t\t\t\t\t\t       (priv->sh->dev_ctx->ctx);\n \t\tif (!tmpl->ibv_channel) {\n \t\t\tDRV_LOG(ERR, \"Port %u: comp channel creation failure.\",\n \t\t\t\tdev->data->port_id);\n@@ -542,12 +543,13 @@ mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,\n \t/* Finalise indirection table. */\n \tfor (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)\n \t\twq[i] = wq[j];\n-\tind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,\n-\t\t\t\t\t&(struct ibv_rwq_ind_table_init_attr){\n+\tind_tbl->ind_table = mlx5_glue->create_rwq_ind_table\n+\t\t\t\t\t(priv->sh->dev_ctx->ctx,\n+\t\t\t\t\t &(struct ibv_rwq_ind_table_init_attr){\n \t\t\t\t\t\t.log_ind_tbl_size = log_n,\n \t\t\t\t\t\t.ind_tbl = wq,\n \t\t\t\t\t\t.comp_mask = 0,\n-\t\t\t\t\t});\n+\t\t\t\t\t });\n \tif (!ind_tbl->ind_table) {\n \t\trte_errno = errno;\n \t\treturn -rte_errno;\n@@ -609,7 +611,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,\n \t}\n #endif\n \tqp = mlx5_glue->dv_create_qp\n-\t\t\t(priv->sh->ctx,\n+\t\t\t(priv->sh->dev_ctx->ctx,\n \t\t\t &(struct ibv_qp_init_attr_ex){\n \t\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n \t\t\t\t.comp_mask =\n@@ -625,12 +627,12 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,\n \t\t\t\t\t.rx_hash_fields_mask = hash_fields,\n \t\t\t\t},\n \t\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n-\t\t\t\t.pd = priv->sh->pd,\n+\t\t\t\t.pd = priv->sh->dev_ctx->pd,\n \t\t\t  },\n \t\t\t  &qp_init_attr);\n #else\n \tqp = mlx5_glue->create_qp_ex\n-\t\t\t(priv->sh->ctx,\n+\t\t\t(priv->sh->dev_ctx->ctx,\n \t\t\t &(struct ibv_qp_init_attr_ex){\n \t\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n \t\t\t\t.comp_mask =\n@@ -646,7 +648,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,\n \t\t\t\t\t.rx_hash_fields_mask = hash_fields,\n \t\t\t\t},\n \t\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n-\t\t\t\t.pd = priv->sh->pd,\n+\t\t\t\t.pd = priv->sh->dev_ctx->pd,\n \t\t\t });\n #endif\n \tif (!qp) {\n@@ -715,7 +717,7 @@ static int\n mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct ibv_context *ctx = priv->sh->ctx;\n+\tstruct ibv_context *ctx = priv->sh->dev_ctx->ctx;\n \tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n \n \tif (rxq)\n@@ -739,7 +741,7 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)\n \t\t\t\t\t\t    .wq_type = IBV_WQT_RQ,\n \t\t\t\t\t\t    .max_wr = 1,\n \t\t\t\t\t\t    .max_sge = 1,\n-\t\t\t\t\t\t    .pd = priv->sh->pd,\n+\t\t\t\t\t\t    .pd = priv->sh->dev_ctx->pd,\n \t\t\t\t\t\t    .cq = rxq->ibv_cq,\n \t\t\t\t\t      });\n \tif (!rxq->wq) {\n@@ -779,7 +781,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)\n \t\tgoto error;\n \trxq = priv->drop_queue.rxq;\n \tind_tbl = mlx5_glue->create_rwq_ind_table\n-\t\t\t\t(priv->sh->ctx,\n+\t\t\t\t(priv->sh->dev_ctx->ctx,\n \t\t\t\t &(struct ibv_rwq_ind_table_init_attr){\n \t\t\t\t\t.log_ind_tbl_size = 0,\n \t\t\t\t\t.ind_tbl = (struct ibv_wq **)&rxq->wq,\n@@ -792,7 +794,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)\n \t\trte_errno = errno;\n \t\tgoto error;\n \t}\n-\thrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx,\n+\thrxq->qp = mlx5_glue->create_qp_ex(priv->sh->dev_ctx->ctx,\n \t\t &(struct ibv_qp_init_attr_ex){\n \t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n \t\t\t.comp_mask = IBV_QP_INIT_ATTR_PD |\n@@ -805,7 +807,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)\n \t\t\t\t.rx_hash_fields_mask = 0,\n \t\t\t\t},\n \t\t\t.rwq_ind_tbl = ind_tbl,\n-\t\t\t.pd = priv->sh->pd\n+\t\t\t.pd = priv->sh->dev_ctx->pd\n \t\t });\n \tif (!hrxq->qp) {\n \t\tDRV_LOG(DEBUG, \"Port %u cannot allocate QP for drop queue.\",\n@@ -893,7 +895,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)\n \tqp_attr.qp_type = IBV_QPT_RAW_PACKET,\n \t/* Do *NOT* enable this, completions events are managed per Tx burst. */\n \tqp_attr.sq_sig_all = 0;\n-\tqp_attr.pd = priv->sh->pd;\n+\tqp_attr.pd = priv->sh->dev_ctx->pd;\n \tqp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;\n \tif (txq_data->inlen_send)\n \t\tqp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;\n@@ -901,7 +903,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\tqp_attr.max_tso_header = txq_ctrl->max_tso_header;\n \t\tqp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;\n \t}\n-\tqp_obj = mlx5_glue->create_qp_ex(priv->sh->ctx, &qp_attr);\n+\tqp_obj = mlx5_glue->create_qp_ex(priv->sh->dev_ctx->ctx, &qp_attr);\n \tif (qp_obj == NULL) {\n \t\tDRV_LOG(ERR, \"Port %u Tx queue %u QP creation failure.\",\n \t\t\tdev->data->port_id, idx);\n@@ -947,7 +949,8 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t}\n \tcqe_n = desc / MLX5_TX_COMP_THRESH +\n \t\t1 + MLX5_TX_COMP_THRESH_INLINE_DIV;\n-\ttxq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);\n+\ttxq_obj->cq = mlx5_glue->create_cq(priv->sh->dev_ctx->ctx, cqe_n,\n+\t\t\t\t\t   NULL, NULL, 0);\n \tif (txq_obj->cq == NULL) {\n \t\tDRV_LOG(ERR, \"Port %u Tx queue %u CQ creation failure.\",\n \t\t\tdev->data->port_id, idx);\n@@ -1070,7 +1073,7 @@ mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)\n #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n-\tstruct ibv_context *ctx = sh->ctx;\n+\tstruct ibv_context *ctx = sh->dev_ctx->ctx;\n \tstruct mlx5dv_qp_init_attr qp_init_attr = {0};\n \tstruct {\n \t\tstruct ibv_cq_init_attr_ex ibv;\n@@ -1114,7 +1117,7 @@ mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)\n \t\t\t\t&(struct ibv_qp_init_attr_ex){\n \t\t\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n \t\t\t\t\t.comp_mask = IBV_QP_INIT_ATTR_PD,\n-\t\t\t\t\t.pd = sh->pd,\n+\t\t\t\t\t.pd = sh->dev_ctx->pd,\n \t\t\t\t\t.send_cq = sh->self_lb.ibv_cq,\n \t\t\t\t\t.recv_cq = sh->self_lb.ibv_cq,\n \t\t\t\t\t.cap.max_recv_wr = 1,\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 08c9a6ec6f..f5f325d35a 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -910,7 +910,8 @@ mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)\n \t * start after the common header that with the length of a DW(u32).\n \t */\n \tnode.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);\n-\tprf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);\n+\tprf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->dev_ctx->ctx,\n+\t\t\t\t\t\t    &node);\n \tif (!prf->obj) {\n \t\tDRV_LOG(ERR, \"Failed to create flex parser node object.\");\n \t\treturn (rte_errno == 0) ? -ENODEV : -rte_errno;\n@@ -967,6 +968,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n \tuint32_t uar_mapping, retry;\n \tint err = 0;\n \tvoid *base_addr;\n+\tvoid *ctx = sh->dev_ctx->ctx;\n \n \tfor (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {\n #ifdef MLX5DV_UAR_ALLOC_TYPE_NC\n@@ -985,7 +987,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n \t\t */\n \t\tuar_mapping = 0;\n #endif\n-\t\tsh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping);\n+\t\tsh->tx_uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);\n #ifdef MLX5DV_UAR_ALLOC_TYPE_NC\n \t\tif (!sh->tx_uar &&\n \t\t    uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {\n@@ -1004,7 +1006,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n \t\t\tDRV_LOG(DEBUG, \"Failed to allocate Tx DevX UAR (BF)\");\n \t\t\tuar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;\n \t\t\tsh->tx_uar = mlx5_glue->devx_alloc_uar\n-\t\t\t\t\t\t\t(sh->ctx, uar_mapping);\n+\t\t\t\t\t\t\t     (ctx, uar_mapping);\n \t\t} else if (!sh->tx_uar &&\n \t\t\t   uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {\n \t\t\tif (config->dbnc == MLX5_TXDB_NCACHED)\n@@ -1017,7 +1019,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n \t\t\tDRV_LOG(DEBUG, \"Failed to allocate Tx DevX UAR (NC)\");\n \t\t\tuar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;\n \t\t\tsh->tx_uar = mlx5_glue->devx_alloc_uar\n-\t\t\t\t\t\t\t(sh->ctx, uar_mapping);\n+\t\t\t\t\t\t\t     (ctx, uar_mapping);\n \t\t}\n #endif\n \t\tif (!sh->tx_uar) {\n@@ -1044,8 +1046,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n \t}\n \tfor (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {\n \t\tuar_mapping = 0;\n-\t\tsh->devx_rx_uar = mlx5_glue->devx_alloc_uar\n-\t\t\t\t\t\t\t(sh->ctx, uar_mapping);\n+\t\tsh->devx_rx_uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);\n #ifdef MLX5DV_UAR_ALLOC_TYPE_NC\n \t\tif (!sh->devx_rx_uar &&\n \t\t    uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {\n@@ -1057,7 +1058,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n \t\t\tDRV_LOG(DEBUG, \"Failed to allocate Rx DevX UAR (BF)\");\n \t\t\tuar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;\n \t\t\tsh->devx_rx_uar = mlx5_glue->devx_alloc_uar\n-\t\t\t\t\t\t\t(sh->ctx, uar_mapping);\n+\t\t\t\t\t\t\t     (ctx, uar_mapping);\n \t\t}\n #endif\n \t\tif (!sh->devx_rx_uar) {\n@@ -1098,6 +1099,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n  *\n  * @param[in] spawn\n  *   Pointer to the device attributes (name, port, etc).\n+ * @param dev_ctx\n+ *   Pointer to the context device data structure.\n  * @param[in] config\n  *   Pointer to device configuration structure.\n  *\n@@ -1107,6 +1110,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,\n  */\n struct mlx5_dev_ctx_shared *\n mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n+\t\t\t  struct mlx5_dev_ctx *dev_ctx,\n \t\t\t  const struct mlx5_dev_config *config)\n {\n \tstruct mlx5_dev_ctx_shared *sh;\n@@ -1137,13 +1141,13 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \t\trte_errno  = ENOMEM;\n \t\tgoto exit;\n \t}\n-\tsh->numa_node = spawn->numa_node;\n+\tsh->devx = config->devx;\n+\tsh->numa_node = dev_ctx->numa_node;\n \tif (spawn->bond_info)\n \t\tsh->bond = *spawn->bond_info;\n-\terr = mlx5_os_open_device(spawn, config, sh);\n-\tif (!sh->ctx)\n-\t\tgoto error;\n-\terr = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);\n+\tpthread_mutex_init(&sh->txpp.mutex, NULL);\n+\tsh->dev_ctx = dev_ctx;\n+\terr = mlx5_os_get_dev_attr(sh->dev_ctx->ctx, &sh->device_attr);\n \tif (err) {\n \t\tDRV_LOG(DEBUG, \"mlx5_os_get_dev_attr() failed\");\n \t\tgoto error;\n@@ -1151,39 +1155,27 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \tsh->refcnt = 1;\n \tsh->max_port = spawn->max_port;\n \tsh->reclaim_mode = config->reclaim_mode;\n-\tstrncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),\n+\tstrncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->dev_ctx->ctx),\n \t\tsizeof(sh->ibdev_name) - 1);\n-\tstrncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx),\n+\tstrncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->dev_ctx->ctx),\n \t\tsizeof(sh->ibdev_path) - 1);\n \t/*\n-\t * Setting port_id to max unallowed value means\n-\t * there is no interrupt subhandler installed for\n-\t * the given port index i.\n+\t * Setting port_id to max unallowed value means there is no interrupt\n+\t * subhandler installed for the given port index i.\n \t */\n \tfor (i = 0; i < sh->max_port; i++) {\n \t\tsh->port[i].ih_port_id = RTE_MAX_ETHPORTS;\n \t\tsh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;\n \t}\n-\tsh->pd = mlx5_os_alloc_pd(sh->ctx);\n-\tif (sh->pd == NULL) {\n-\t\tDRV_LOG(ERR, \"PD allocation failure\");\n-\t\terr = ENOMEM;\n-\t\tgoto error;\n-\t}\n \tif (sh->devx) {\n-\t\terr = mlx5_os_get_pdn(sh->pd, &sh->pdn);\n-\t\tif (err) {\n-\t\t\tDRV_LOG(ERR, \"Fail to extract pdn from PD\");\n-\t\t\tgoto error;\n-\t\t}\n-\t\tsh->td = mlx5_devx_cmd_create_td(sh->ctx);\n+\t\tsh->td = mlx5_devx_cmd_create_td(sh->dev_ctx->ctx);\n \t\tif (!sh->td) {\n \t\t\tDRV_LOG(ERR, \"TD allocation failure\");\n \t\t\terr = ENOMEM;\n \t\t\tgoto error;\n \t\t}\n \t\ttis_attr.transport_domain = sh->td->id;\n-\t\tsh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);\n+\t\tsh->tis = mlx5_devx_cmd_create_tis(sh->dev_ctx->ctx, &tis_attr);\n \t\tif (!sh->tis) {\n \t\t\tDRV_LOG(ERR, \"TIS allocation failure\");\n \t\t\terr = ENOMEM;\n@@ -1263,10 +1255,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n \t\tmlx5_glue->devx_free_uar(sh->devx_rx_uar);\n \tif (sh->tx_uar)\n \t\tmlx5_glue->devx_free_uar(sh->tx_uar);\n-\tif (sh->pd)\n-\t\tclaim_zero(mlx5_os_dealloc_pd(sh->pd));\n-\tif (sh->ctx)\n-\t\tclaim_zero(mlx5_glue->close_device(sh->ctx));\n \tmlx5_free(sh);\n \tMLX5_ASSERT(err > 0);\n \trte_errno = err;\n@@ -1278,7 +1266,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n  * all allocated resources and close handles.\n  *\n  * @param[in] sh\n- *   Pointer to mlx5_dev_ctx_shared object to free\n+ *   Pointer to mlx5_dev_ctx_shared object to free.\n  */\n void\n mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)\n@@ -1318,7 +1306,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)\n \t/*\n \t *  Ensure there is no async event handler installed.\n \t *  Only primary process handles async device events.\n-\t **/\n+\t */\n \tmlx5_flow_counters_mng_close(sh);\n \tif (sh->aso_age_mng) {\n \t\tmlx5_flow_aso_age_mng_close(sh);\n@@ -1336,16 +1324,12 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)\n \t\tmlx5_glue->devx_free_uar(sh->tx_uar);\n \t\tsh->tx_uar = NULL;\n \t}\n-\tif (sh->pd)\n-\t\tclaim_zero(mlx5_os_dealloc_pd(sh->pd));\n \tif (sh->tis)\n \t\tclaim_zero(mlx5_devx_cmd_destroy(sh->tis));\n \tif (sh->td)\n \t\tclaim_zero(mlx5_devx_cmd_destroy(sh->td));\n \tif (sh->devx_rx_uar)\n \t\tmlx5_glue->devx_free_uar(sh->devx_rx_uar);\n-\tif (sh->ctx)\n-\t\tclaim_zero(mlx5_glue->close_device(sh->ctx));\n \tMLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);\n \tpthread_mutex_destroy(&sh->txpp.mutex);\n \tmlx5_free(sh);\n@@ -1548,10 +1532,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t}\n \tif (!priv->sh)\n \t\treturn 0;\n-\tDRV_LOG(DEBUG, \"port %u closing device \\\"%s\\\"\",\n-\t\tdev->data->port_id,\n-\t\t((priv->sh->ctx != NULL) ?\n-\t\tmlx5_os_get_ctx_device_name(priv->sh->ctx) : \"\"));\n+\tDRV_LOG(DEBUG, \"port %u closing device \\\"%s\\\"\", dev->data->port_id,\n+\t\t((priv->sh->dev_ctx->ctx != NULL) ? priv->sh->ibdev_name : \"\"));\n \t/*\n \t * If default mreg copy action is removed at the stop stage,\n \t * the search will return none and nothing will be done anymore.\n@@ -2374,6 +2356,33 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)\n \treturn port_id;\n }\n \n+/**\n+ * Finds the device context that match the device.\n+ * The existence of multiple ethdev per pci device is only with representors.\n+ * On such case, it is enough to get only one of the ports as they all share\n+ * the same device context.\n+ *\n+ * @param dev\n+ *   Pointer to the device.\n+ *\n+ * @return\n+ *   Pointer to the device context if found, NULL otherwise.\n+ */\n+static struct mlx5_dev_ctx *\n+mlx5_get_dev_ctx(struct rte_device *dev)\n+{\n+\tstruct mlx5_priv *priv;\n+\tuint16_t port_id;\n+\n+\tport_id = rte_eth_find_next_of(0, dev);\n+\tif (port_id == RTE_MAX_ETHPORTS)\n+\t\treturn NULL;\n+\tpriv = rte_eth_devices[port_id].data->dev_private;\n+\tif (priv == NULL)\n+\t\treturn NULL;\n+\treturn priv->sh->dev_ctx;\n+}\n+\n /**\n  * Callback to remove a device.\n  *\n@@ -2388,6 +2397,7 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)\n int\n mlx5_net_remove(struct rte_device *dev)\n {\n+\tstruct mlx5_dev_ctx *dev_ctx = mlx5_get_dev_ctx(dev);\n \tuint16_t port_id;\n \tint ret = 0;\n \n@@ -2401,6 +2411,11 @@ mlx5_net_remove(struct rte_device *dev)\n \t\telse\n \t\t\tret |= rte_eth_dev_close(port_id);\n \t}\n+\n+\tif (dev_ctx) {\n+\t\tmlx5_dev_ctx_release(dev_ctx);\n+\t\tmlx5_free(dev_ctx);\n+\t}\n \treturn ret == 0 ? 0 : -EIO;\n }\n \ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 9a8e34535c..1e52b9ac9a 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1140,9 +1140,7 @@ struct mlx5_dev_ctx_shared {\n \tuint32_t reclaim_mode:1; /* Reclaim memory. */\n \tuint32_t max_port; /* Maximal IB device port index. */\n \tstruct mlx5_bond_info bond; /* Bonding information. */\n-\tvoid *ctx; /* Verbs/DV/DevX context. */\n-\tvoid *pd; /* Protection Domain. */\n-\tuint32_t pdn; /* Protection Domain number. */\n+\tstruct mlx5_dev_ctx *dev_ctx; /* Device context. */\n \tuint32_t tdn; /* Transport Domain number. */\n \tchar ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */\n \tchar ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */\n@@ -1497,7 +1495,8 @@ void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);\n int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);\n struct mlx5_dev_ctx_shared *\n mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,\n-\t\t\t   const struct mlx5_dev_config *config);\n+\t\t\t  struct mlx5_dev_ctx *dev_ctx,\n+\t\t\t  const struct mlx5_dev_config *config);\n void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);\n void mlx5_free_table_hash_list(struct mlx5_priv *priv);\n int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);\n@@ -1766,13 +1765,10 @@ int mlx5_flow_meter_flush(struct rte_eth_dev *dev,\n void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);\n \n /* mlx5_os.c */\n+\n struct rte_pci_driver;\n int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);\n void mlx5_os_free_shared_dr(struct mlx5_priv *priv);\n-int mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,\n-\t\t\t const struct mlx5_dev_config *config,\n-\t\t\t struct mlx5_dev_ctx_shared *sh);\n-int mlx5_os_get_pdn(void *pd, uint32_t *pdn);\n int mlx5_os_net_probe(struct rte_device *dev);\n void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);\n void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex a1db53577a..3cafd46837 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -276,12 +276,12 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)\n \trq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?\n \t\t\t\t\t\tMLX5_WQ_END_PAD_MODE_ALIGN :\n \t\t\t\t\t\tMLX5_WQ_END_PAD_MODE_NONE;\n-\trq_attr.wq_attr.pd = priv->sh->pdn;\n+\trq_attr.wq_attr.pd = priv->sh->dev_ctx->pdn;\n \trq_attr.counter_set_id = priv->counter_set_id;\n \t/* Create RQ using DevX API. */\n-\treturn mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,\n-\t\t\t\t   wqe_size, log_desc_n, &rq_attr,\n-\t\t\t\t   rxq_ctrl->socket);\n+\treturn mlx5_devx_rq_create(priv->sh->dev_ctx->ctx,\n+\t\t\t\t   &rxq_ctrl->obj->rq_obj, wqe_size, log_desc_n,\n+\t\t\t\t   &rq_attr, rxq_ctrl->socket);\n }\n \n /**\n@@ -365,8 +365,8 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)\n \tcq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);\n \tlog_cqe_n = log2above(cqe_n);\n \t/* Create CQ using DevX API. */\n-\tret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n,\n-\t\t\t\t  &cq_attr, sh->numa_node);\n+\tret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &rxq_ctrl->obj->cq_obj,\n+\t\t\t\t  log_cqe_n, &cq_attr, sh->numa_node);\n \tif (ret)\n \t\treturn ret;\n \tcq_obj = &rxq_ctrl->obj->cq_obj;\n@@ -442,7 +442,7 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\tattr.wq_attr.log_hairpin_data_sz -\n \t\t\tMLX5_HAIRPIN_QUEUE_STRIDE;\n \tattr.counter_set_id = priv->counter_set_id;\n-\ttmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,\n+\ttmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->dev_ctx->ctx, &attr,\n \t\t\t\t\t   rxq_ctrl->socket);\n \tif (!tmpl->rq) {\n \t\tDRV_LOG(ERR,\n@@ -486,8 +486,7 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\t  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;\n \n \t\ttmpl->devx_channel = mlx5_os_devx_create_event_channel\n-\t\t\t\t\t\t\t\t(priv->sh->ctx,\n-\t\t\t\t\t\t\t\t devx_ev_flag);\n+\t\t\t\t\t (priv->sh->dev_ctx->ctx, devx_ev_flag);\n \t\tif (!tmpl->devx_channel) {\n \t\t\trte_errno = errno;\n \t\t\tDRV_LOG(ERR, \"Failed to create event channel %d.\",\n@@ -602,7 +601,8 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,\n \t\t\t\t\t\t\tind_tbl->queues_n);\n \tif (!rqt_attr)\n \t\treturn -rte_errno;\n-\tind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);\n+\tind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->dev_ctx->ctx,\n+\t\t\t\t\t\trqt_attr);\n \tmlx5_free(rqt_attr);\n \tif (!ind_tbl->rqt) {\n \t\tDRV_LOG(ERR, \"Port %u cannot create DevX RQT.\",\n@@ -770,7 +770,7 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,\n \n \tmlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,\n \t\t\t       hrxq->ind_table, tunnel, &tir_attr);\n-\thrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);\n+\thrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->dev_ctx->ctx, &tir_attr);\n \tif (!hrxq->tir) {\n \t\tDRV_LOG(ERR, \"Port %u cannot create DevX TIR.\",\n \t\t\tdev->data->port_id);\n@@ -936,7 +936,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\tattr.wq_attr.log_hairpin_data_sz -\n \t\t\tMLX5_HAIRPIN_QUEUE_STRIDE;\n \tattr.tis_num = priv->sh->tis->id;\n-\ttmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);\n+\ttmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->dev_ctx->ctx, &attr);\n \tif (!tmpl->sq) {\n \t\tDRV_LOG(ERR,\n \t\t\t\"Port %u tx hairpin queue %u can't create SQ object.\",\n@@ -994,15 +994,15 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,\n \t\t.tis_lst_sz = 1,\n \t\t.tis_num = priv->sh->tis->id,\n \t\t.wq_attr = (struct mlx5_devx_wq_attr){\n-\t\t\t.pd = priv->sh->pdn,\n+\t\t\t.pd = priv->sh->dev_ctx->pdn,\n \t\t\t.uar_page =\n \t\t\t\t mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),\n \t\t},\n \t\t.ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),\n \t};\n \t/* Create Send Queue object with DevX. */\n-\treturn mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,\n-\t\t\t\t   &sq_attr, priv->sh->numa_node);\n+\treturn mlx5_devx_sq_create(priv->sh->dev_ctx->ctx, &txq_obj->sq_obj,\n+\t\t\t\t   log_desc_n, &sq_attr, priv->sh->numa_node);\n }\n #endif\n \n@@ -1058,8 +1058,8 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\treturn 0;\n \t}\n \t/* Create completion queue object with DevX. */\n-\tret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,\n-\t\t\t\t  &cq_attr, priv->sh->numa_node);\n+\tret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &txq_obj->cq_obj,\n+\t\t\t\t  log_desc_n, &cq_attr, sh->numa_node);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Port %u Tx queue %u CQ creation failure.\",\n \t\t\tdev->data->port_id, idx);\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 4762fa0f5f..b97790cf38 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -7604,7 +7604,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)\n \t}\n \tmem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;\n \tsize = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;\n-\tmem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,\n+\tmem_mng->umem = mlx5_os_umem_reg(sh->dev_ctx->ctx, mem, size,\n \t\t\t\t\t\t IBV_ACCESS_LOCAL_WRITE);\n \tif (!mem_mng->umem) {\n \t\trte_errno = errno;\n@@ -7615,10 +7615,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)\n \tmkey_attr.addr = (uintptr_t)mem;\n \tmkey_attr.size = size;\n \tmkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);\n-\tmkey_attr.pd = sh->pdn;\n+\tmkey_attr.pd = sh->dev_ctx->pdn;\n \tmkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;\n \tmkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;\n-\tmem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);\n+\tmem_mng->dm = mlx5_devx_cmd_mkey_create(sh->dev_ctx->ctx, &mkey_attr);\n \tif (!mem_mng->dm) {\n \t\tmlx5_os_umem_dereg(mem_mng->umem);\n \t\trte_errno = errno;\ndiff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c\nindex e11327a11b..6b90d0d7c1 100644\n--- a/drivers/net/mlx5/mlx5_flow_aso.c\n+++ b/drivers/net/mlx5/mlx5_flow_aso.c\n@@ -103,7 +103,7 @@ mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,\n \t\tDRV_LOG(ERR, \"Failed to create ASO bits mem for MR.\");\n \t\treturn -1;\n \t}\n-\tret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);\n+\tret = sh->share_cache.reg_mr_cb(sh->dev_ctx->pd, mr->addr, length, mr);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to create direct Mkey.\");\n \t\tmlx5_free(mr->addr);\n@@ -309,24 +309,27 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,\n \t\t    enum mlx5_access_aso_opc_mod aso_opc_mod)\n {\n \tuint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;\n+\tstruct mlx5_dev_ctx *dev_ctx = sh->dev_ctx;\n \n \tswitch (aso_opc_mod) {\n \tcase ASO_OPC_MOD_FLOW_HIT:\n \t\tif (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *\n \t\t\t\t    sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))\n \t\t\treturn -1;\n-\t\tif (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,\n-\t\t\t\t  sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,\n-\t\t\t\t  sh->sq_ts_format)) {\n+\t\tif (mlx5_aso_sq_create(dev_ctx->ctx, &sh->aso_age_mng->aso_sq,\n+\t\t\t\t       0, sh->tx_uar, dev_ctx->pdn,\n+\t\t\t\t       MLX5_ASO_QUEUE_LOG_DESC,\n+\t\t\t\t       sh->sq_ts_format)) {\n \t\t\tmlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);\n \t\t\treturn -1;\n \t\t}\n \t\tmlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);\n \t\tbreak;\n \tcase ASO_OPC_MOD_POLICER:\n-\t\tif (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,\n-\t\t\t\t  sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,\n-\t\t\t\t  sh->sq_ts_format))\n+\t\tif (mlx5_aso_sq_create(dev_ctx->ctx, &sh->mtrmng->pools_mng.sq,\n+\t\t\t\t       0, sh->tx_uar, dev_ctx->pdn,\n+\t\t\t\t       MLX5_ASO_QUEUE_LOG_DESC,\n+\t\t\t\t       sh->sq_ts_format))\n \t\t\treturn -1;\n \t\tmlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);\n \t\tbreak;\n@@ -335,9 +338,10 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,\n \t\tif (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,\n \t\t\t\t    &sh->ct_mng->aso_sq.mr, 0))\n \t\t\treturn -1;\n-\t\tif (mlx5_aso_sq_create(sh->ctx, &sh->ct_mng->aso_sq, 0,\n-\t\t\t\tsh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,\n-\t\t\t\tsh->sq_ts_format)) {\n+\t\tif (mlx5_aso_sq_create(dev_ctx->ctx, &sh->ct_mng->aso_sq, 0,\n+\t\t\t\t       sh->tx_uar, dev_ctx->pdn,\n+\t\t\t\t       MLX5_ASO_QUEUE_LOG_DESC,\n+\t\t\t\t       sh->sq_ts_format)) {\n \t\t\tmlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);\n \t\t\treturn -1;\n \t\t}\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 5bb6d89a3f..6a336ac128 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -3684,8 +3684,8 @@ flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)\n \t}\n \t*resource = *ctx_resource;\n \tresource->idx = idx;\n-\tret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,\n-\t\t\t\t\t\t\t      resource,\n+\tret = mlx5_flow_os_create_flow_action_packet_reformat(sh->dev_ctx->ctx,\n+\t\t\t\t\t\t\t      domain, resource,\n \t\t\t\t\t\t\t     &resource->action);\n \tif (ret) {\n \t\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);\n@@ -5485,7 +5485,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)\n \telse\n \t\tns = sh->rx_domain;\n \tret = mlx5_flow_os_create_flow_action_modify_header\n-\t\t\t\t\t(sh->ctx, ns, entry,\n+\t\t\t\t\t(sh->dev_ctx->ctx, ns, entry,\n \t\t\t\t\t data_len, &entry->action);\n \tif (ret) {\n \t\tmlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);\n@@ -6096,6 +6096,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tstruct mlx5_dev_ctx *dev_ctx = priv->sh->dev_ctx;\n \tstruct mlx5_flow_counter_pool *pool;\n \tstruct mlx5_counters tmp_tq;\n \tstruct mlx5_devx_obj *dcs = NULL;\n@@ -6107,7 +6108,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,\n \n \tif (fallback) {\n \t\t/* bulk_bitmap must be 0 for single counter allocation. */\n-\t\tdcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);\n+\t\tdcs = mlx5_devx_cmd_flow_counter_alloc(dev_ctx->ctx, 0);\n \t\tif (!dcs)\n \t\t\treturn NULL;\n \t\tpool = flow_dv_find_pool_by_id(cmng, dcs->id);\n@@ -6125,7 +6126,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,\n \t\t*cnt_free = cnt;\n \t\treturn pool;\n \t}\n-\tdcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);\n+\tdcs = mlx5_devx_cmd_flow_counter_alloc(dev_ctx->ctx, 0x4);\n \tif (!dcs) {\n \t\trte_errno = ENODATA;\n \t\treturn NULL;\n@@ -6477,16 +6478,17 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,\n \t\t\t     struct mlx5_aso_mtr **mtr_free)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_aso_mtr_pools_mng *pools_mng =\n-\t\t\t\t&priv->sh->mtrmng->pools_mng;\n+\tstruct mlx5_dev_ctx *dev_ctx = priv->sh->dev_ctx;\n+\tstruct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;\n \tstruct mlx5_aso_mtr_pool *pool = NULL;\n \tstruct mlx5_devx_obj *dcs = NULL;\n \tuint32_t i;\n \tuint32_t log_obj_size;\n \n \tlog_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);\n-\tdcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,\n-\t\t\tpriv->sh->pdn, log_obj_size);\n+\tdcs = mlx5_devx_cmd_create_flow_meter_aso_obj(dev_ctx->ctx,\n+\t\t\t\t\t\t      dev_ctx->pdn,\n+\t\t\t\t\t\t      log_obj_size);\n \tif (!dcs) {\n \t\trte_errno = ENODATA;\n \t\treturn NULL;\n@@ -6508,8 +6510,7 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,\n \tpools_mng->n_valid++;\n \tfor (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {\n \t\tpool->mtrs[i].offset = i;\n-\t\tLIST_INSERT_HEAD(&pools_mng->meters,\n-\t\t\t\t\t\t&pool->mtrs[i], next);\n+\t\tLIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);\n \t}\n \tpool->mtrs[0].offset = 0;\n \t*mtr_free = &pool->mtrs[0];\n@@ -9181,7 +9182,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,\n \t\t}\n \t} else {\n \t\t/* Create a GENEVE TLV object and resource. */\n-\t\tobj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,\n+\t\tobj = mlx5_devx_cmd_create_geneve_tlv_option(sh->dev_ctx->ctx,\n \t\t\t\tgeneve_opt_v->option_class,\n \t\t\t\tgeneve_opt_v->option_type,\n \t\t\t\tgeneve_opt_v->option_len);\n@@ -10539,7 +10540,8 @@ flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)\n \tdv_attr.priority = ref->priority;\n \tif (tbl->is_egress)\n \t\tdv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;\n-\tret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,\n+\tret = mlx5_flow_os_create_flow_matcher(sh->dev_ctx->ctx, &dv_attr,\n+\t\t\t\t\t       tbl->tbl.obj,\n \t\t\t\t\t       &resource->matcher_object);\n \tif (ret) {\n \t\tmlx5_free(resource);\n@@ -11958,8 +11960,8 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev,\n \tstruct mlx5_devx_obj *obj = NULL;\n \tuint32_t i;\n \n-\tobj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,\n-\t\t\t\t\t\t    priv->sh->pdn);\n+\tobj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->dev_ctx->ctx,\n+\t\t\t\t\t\t    priv->sh->dev_ctx->pdn);\n \tif (!obj) {\n \t\trte_errno = ENODATA;\n \t\tDRV_LOG(ERR, \"Failed to create flow_hit_aso_obj using DevX.\");\n@@ -12371,13 +12373,15 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;\n+\tstruct mlx5_dev_ctx *dev_ctx = priv->sh->dev_ctx;\n \tstruct mlx5_aso_ct_pool *pool = NULL;\n \tstruct mlx5_devx_obj *obj = NULL;\n \tuint32_t i;\n \tuint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);\n \n-\tobj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,\n-\t\t\t\t\t\tpriv->sh->pdn, log_obj_size);\n+\tobj = mlx5_devx_cmd_create_conn_track_offload_obj(dev_ctx->ctx,\n+\t\t\t\t\t\t\t  dev_ctx->pdn,\n+\t\t\t\t\t\t\t  log_obj_size);\n \tif (!obj) {\n \t\trte_errno = ENODATA;\n \t\tDRV_LOG(ERR, \"Failed to create conn_track_offload_obj using DevX.\");\n@@ -17123,8 +17127,7 @@ flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,\n \t\t\tbreak;\n \t\tcase MLX5_FLOW_FATE_QUEUE:\n \t\t\tsub_policy = mtr_policy->sub_policys[domain][0];\n-\t\t\t__flow_dv_destroy_sub_policy_rules(dev,\n-\t\t\t\t\t\t\t   sub_policy);\n+\t\t\t__flow_dv_destroy_sub_policy_rules(dev, sub_policy);\n \t\t\tbreak;\n \t\tdefault:\n \t\t\t/*Other actions without queue and do nothing*/\n@@ -17173,8 +17176,8 @@ mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)\n \t\tgoto err;\n \tdv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);\n \t__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);\n-\tret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,\n-\t\t\t\t\t       &matcher);\n+\tret = mlx5_flow_os_create_flow_matcher(sh->dev_ctx->ctx, &dv_attr,\n+\t\t\t\t\t       tbl->obj, &matcher);\n \tif (ret)\n \t\tgoto err;\n \t__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);\n@@ -17242,7 +17245,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)\n \t\t\t\t\t0, 0, 0, NULL);\n \tif (!tbl)\n \t\tgoto err;\n-\tdcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);\n+\tdcs = mlx5_devx_cmd_flow_counter_alloc(sh->dev_ctx->ctx, 0x4);\n \tif (!dcs)\n \t\tgoto err;\n \tret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,\n@@ -17251,8 +17254,8 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)\n \t\tgoto err;\n \tdv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);\n \t__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);\n-\tret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,\n-\t\t\t\t\t       &matcher);\n+\tret = mlx5_flow_os_create_flow_matcher(sh->dev_ctx->ctx, &dv_attr,\n+\t\t\t\t\t       tbl->obj, &matcher);\n \tif (ret)\n \t\tgoto err;\n \t__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex b93fd4d2c9..2c132a8c16 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -198,7 +198,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,\n {\n #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct ibv_context *ctx = priv->sh->ctx;\n+\tstruct ibv_context *ctx = priv->sh->dev_ctx->ctx;\n \tstruct ibv_counter_set_init_attr init = {\n \t\t\t .counter_set_id = counter->shared_info.id};\n \n@@ -210,7 +210,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,\n \treturn 0;\n #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct ibv_context *ctx = priv->sh->ctx;\n+\tstruct ibv_context *ctx = priv->sh->dev_ctx->ctx;\n \tstruct ibv_counters_init_attr init = {0};\n \tstruct ibv_counter_attach_attr attach;\n \tint ret;\ndiff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c\nindex 44afda731f..b7297f22fe 100644\n--- a/drivers/net/mlx5/mlx5_mr.c\n+++ b/drivers/net/mlx5/mlx5_mr.c\n@@ -84,7 +84,7 @@ mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)\n \tstruct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;\n \tstruct mlx5_priv *priv = rxq_ctrl->priv;\n \n-\treturn mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,\n+\treturn mlx5_mr_addr2mr_bh(priv->sh->dev_ctx->pd, &priv->mp_id,\n \t\t\t\t  &priv->sh->share_cache, mr_ctrl, addr,\n \t\t\t\t  priv->config.mr_ext_memseg_en);\n }\n@@ -108,7 +108,7 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)\n \tstruct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;\n \tstruct mlx5_priv *priv = txq_ctrl->priv;\n \n-\treturn mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,\n+\treturn mlx5_mr_addr2mr_bh(priv->sh->dev_ctx->pd, &priv->mp_id,\n \t\t\t\t  &priv->sh->share_cache, mr_ctrl, addr,\n \t\t\t\t  priv->config.mr_ext_memseg_en);\n }\n@@ -177,7 +177,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,\n \t\treturn;\n \tDRV_LOG(DEBUG, \"port %u register MR for chunk #%d of mempool (%s)\",\n \t\tdev->data->port_id, mem_idx, mp->name);\n-\tmr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,\n+\tmr = mlx5_create_mr_ext(sh->dev_ctx->pd, addr, len, mp->socket_id,\n \t\t\t\tsh->share_cache.reg_mr_cb);\n \tif (!mr) {\n \t\tDRV_LOG(WARNING,\n@@ -193,7 +193,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,\n \tmlx5_mr_insert_cache(&sh->share_cache, mr);\n \trte_rwlock_write_unlock(&sh->share_cache.rwlock);\n \t/* Insert to the local cache table */\n-\tmlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache,\n+\tmlx5_mr_addr2mr_bh(sh->dev_ctx->pd, &priv->mp_id, &sh->share_cache,\n \t\t\t   mr_ctrl, addr, priv->config.mr_ext_memseg_en);\n }\n \n@@ -253,8 +253,8 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,\n \t}\n \tpriv = dev->data->dev_private;\n \tsh = priv->sh;\n-\tmr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,\n-\t\t\t\tsh->share_cache.reg_mr_cb);\n+\tmr = mlx5_create_mr_ext(sh->dev_ctx->pd, (uintptr_t)addr, len,\n+\t\t\t\tSOCKET_ID_ANY, sh->share_cache.reg_mr_cb);\n \tif (!mr) {\n \t\tDRV_LOG(WARNING,\n \t\t\t\"port %u unable to dma map\", dev->data->port_id);\n@@ -409,7 +409,7 @@ mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,\n \tif (data->ret < 0)\n \t\treturn;\n \t/* Register address of the chunk and update local caches. */\n-\tlkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,\n+\tlkey = mlx5_mr_addr2mr_bh(priv->sh->dev_ctx->pd, &priv->mp_id,\n \t\t\t\t  &priv->sh->share_cache, data->mr_ctrl,\n \t\t\t\t  (uintptr_t)memhdr->addr,\n \t\t\t\t  priv->config.mr_ext_memseg_en);\ndiff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c\nindex 4f6da9f2d1..ff1c3d204c 100644\n--- a/drivers/net/mlx5/mlx5_txpp.c\n+++ b/drivers/net/mlx5/mlx5_txpp.c\n@@ -49,7 +49,7 @@ static int\n mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)\n {\n \tMLX5_ASSERT(!sh->txpp.echan);\n-\tsh->txpp.echan = mlx5_os_devx_create_event_channel(sh->ctx,\n+\tsh->txpp.echan = mlx5_os_devx_create_event_channel(sh->dev_ctx->ctx,\n \t\t\tMLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);\n \tif (!sh->txpp.echan) {\n \t\trte_errno = errno;\n@@ -104,7 +104,7 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)\n \tMLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,\n \t\t sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);\n \tsh->txpp.pp = mlx5_glue->dv_alloc_pp\n-\t\t\t\t(sh->ctx, sizeof(pp), &pp,\n+\t\t\t\t(sh->dev_ctx->ctx, sizeof(pp), &pp,\n \t\t\t\t MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);\n \tif (sh->txpp.pp == NULL) {\n \t\tDRV_LOG(ERR, \"Failed to allocate packet pacing index.\");\n@@ -232,7 +232,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)\n \t\t.tis_lst_sz = 1,\n \t\t.tis_num = sh->tis->id,\n \t\t.wq_attr = (struct mlx5_devx_wq_attr){\n-\t\t\t.pd = sh->pdn,\n+\t\t\t.pd = sh->dev_ctx->pdn,\n \t\t\t.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),\n \t\t},\n \t\t.ts_format = mlx5_ts_format_conv(sh->sq_ts_format),\n@@ -245,7 +245,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)\n \tint ret;\n \n \t/* Create completion queue object for Rearm Queue. */\n-\tret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,\n+\tret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &wq->cq_obj,\n \t\t\t\t  log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,\n \t\t\t\t  sh->numa_node);\n \tif (ret) {\n@@ -259,7 +259,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)\n \t/* Create send queue object for Rearm Queue. */\n \tsq_attr.cqn = wq->cq_obj.cq->id;\n \t/* There should be no WQE leftovers in the cyclic queue. */\n-\tret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj,\n+\tret = mlx5_devx_sq_create(sh->dev_ctx->ctx, &wq->sq_obj,\n \t\t\t\t  log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,\n \t\t\t\t  sh->numa_node);\n \tif (ret) {\n@@ -409,7 +409,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)\n \tsh->txpp.ts_p = 0;\n \tsh->txpp.ts_n = 0;\n \t/* Create completion queue object for Clock Queue. */\n-\tret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,\n+\tret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &wq->cq_obj,\n \t\t\t\t  log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,\n \t\t\t\t  sh->numa_node);\n \tif (ret) {\n@@ -444,9 +444,10 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)\n \tsq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;\n \tsq_attr.wq_attr.cd_slave = 1;\n \tsq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);\n-\tsq_attr.wq_attr.pd = sh->pdn;\n+\tsq_attr.wq_attr.pd = sh->dev_ctx->pdn;\n \tsq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);\n-\tret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj, log2above(wq->sq_size),\n+\tret = mlx5_devx_sq_create(sh->dev_ctx->ctx, &wq->sq_obj,\n+\t\t\t\t  log2above(wq->sq_size),\n \t\t\t\t  &sq_attr, sh->numa_node);\n \tif (ret) {\n \t\trte_errno = errno;\ndiff --git a/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/drivers/net/mlx5/windows/mlx5_ethdev_os.c\nindex c709dd19be..352dfa9331 100644\n--- a/drivers/net/mlx5/windows/mlx5_ethdev_os.c\n+++ b/drivers/net/mlx5/windows/mlx5_ethdev_os.c\n@@ -38,7 +38,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN])\n \t\treturn -rte_errno;\n \t}\n \tpriv = dev->data->dev_private;\n-\tcontext_obj = (mlx5_context_st *)priv->sh->ctx;\n+\tcontext_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;\n \tmemcpy(mac, context_obj->mlx5_dev.eth_mac, RTE_ETHER_ADDR_LEN);\n \treturn 0;\n }\n@@ -66,7 +66,7 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE])\n \t\treturn -rte_errno;\n \t}\n \tpriv = dev->data->dev_private;\n-\tcontext_obj = (mlx5_context_st *)priv->sh->ctx;\n+\tcontext_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;\n \tstrncpy(*ifname, context_obj->mlx5_dev.name, MLX5_NAMESIZE);\n \treturn 0;\n }\n@@ -93,7 +93,7 @@ mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)\n \t\treturn -rte_errno;\n \t}\n \tpriv = dev->data->dev_private;\n-\tcontext_obj = (mlx5_context_st *)priv->sh->ctx;\n+\tcontext_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;\n \t*mtu = context_obj->mlx5_dev.mtu_bytes;\n \treturn 0;\n }\n@@ -253,7 +253,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)\n \t\treturn -rte_errno;\n \t}\n \tpriv = dev->data->dev_private;\n-\tcontext_obj = (mlx5_context_st *)priv->sh->ctx;\n+\tcontext_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;\n \tdev_link.link_speed = context_obj->mlx5_dev.link_speed / (1000 * 1000);\n \tdev_link.link_status =\n \t      (context_obj->mlx5_dev.link_state == 1 && !mlx5_is_removed(dev))\n@@ -359,7 +359,8 @@ mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)\n \tint err;\n \tstruct mlx5_devx_clock mlx5_clock;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tmlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;\n+\tmlx5_context_st *context_obj =\n+\t\t\t(mlx5_context_st *)priv->sh->dev_ctx->ctx;\n \n \terr = mlx5_glue->query_rt_values(context_obj, &mlx5_clock);\n \tif (err != 0) {\n@@ -383,7 +384,8 @@ int\n mlx5_is_removed(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tmlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;\n+\tmlx5_context_st *context_obj =\n+\t\t\t(mlx5_context_st *)priv->sh->dev_ctx->ctx;\n \n \tif (*context_obj->shutdown_event_obj.p_flag)\n \t\treturn 1;\ndiff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c\nindex 2f5c29662e..f6a7fbaca1 100644\n--- a/drivers/net/mlx5/windows/mlx5_os.c\n+++ b/drivers/net/mlx5/windows/mlx5_os.c\n@@ -240,50 +240,6 @@ mlx5_os_set_nonblock_channel_fd(int fd)\n \treturn -ENOTSUP;\n }\n \n-/**\n- * Function API open device under Windows\n- *\n- * This function calls the Windows glue APIs to open a device.\n- *\n- * @param[in] spawn\n- *   Pointer to the device attributes (name, port, etc).\n- * @param[out] config\n- *   Pointer to device configuration structure.\n- * @param[out] sh\n- *   Pointer to shared context structure.\n- *\n- * @return\n- *   0 on success, a positive error value otherwise.\n- */\n-int\n-mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,\n-\t\t const struct mlx5_dev_config *config,\n-\t\t struct mlx5_dev_ctx_shared *sh)\n-{\n-\tRTE_SET_USED(config);\n-\tint err = 0;\n-\tstruct mlx5_context *mlx5_ctx;\n-\n-\tpthread_mutex_init(&sh->txpp.mutex, NULL);\n-\t/* Set numa node from pci probe */\n-\tsh->numa_node = spawn->pci_dev->device.numa_node;\n-\n-\t/* Try to open device with DevX */\n-\trte_errno = 0;\n-\tsh->ctx = mlx5_glue->open_device(spawn->phys_dev);\n-\tif (!sh->ctx) {\n-\t\tDRV_LOG(ERR, \"open_device failed\");\n-\t\terr = errno;\n-\t\treturn err;\n-\t}\n-\tsh->devx = 1;\n-\tmlx5_ctx = (struct mlx5_context *)sh->ctx;\n-\terr = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev);\n-\tif (err)\n-\t\tDRV_LOG(ERR, \"Failed to query device context fields.\");\n-\treturn err;\n-}\n-\n /**\n  * DV flow counter mode detect and config.\n  *\n@@ -328,6 +284,8 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)\n  *\n  * @param dpdk_dev\n  *   Backing DPDK device.\n+ * @param dev_ctx\n+ *   Pointer to the context device data structure.\n  * @param spawn\n  *   Verbs device parameters (name, port, switch_info) to spawn.\n  * @param config\n@@ -341,6 +299,7 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)\n  */\n static struct rte_eth_dev *\n mlx5_dev_spawn(struct rte_device *dpdk_dev,\n+\t       struct mlx5_dev_ctx *dev_ctx,\n \t       struct mlx5_dev_spawn_data *spawn,\n \t       struct mlx5_dev_config *config)\n {\n@@ -378,21 +337,20 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\tgoto error;\n \t}\n \tmlx5_malloc_mem_select(config->sys_mem_en);\n-\tsh = mlx5_alloc_shared_dev_ctx(spawn, config);\n+\tsh = mlx5_alloc_shared_dev_ctx(spawn, dev_ctx, config);\n \tif (!sh)\n \t\treturn NULL;\n-\tconfig->devx = sh->devx;\n \t/* Initialize the shutdown event in mlx5_dev_spawn to\n \t * support mlx5_is_removed for Windows.\n \t */\n-\terr = mlx5_glue->devx_init_showdown_event(sh->ctx);\n+\terr = mlx5_glue->devx_init_showdown_event(sh->dev_ctx->ctx);\n \tif (err) {\n \t\tDRV_LOG(ERR, \"failed to init showdown event: %s\",\n \t\t\tstrerror(errno));\n \t\tgoto error;\n \t}\n \tDRV_LOG(DEBUG, \"MPW isn't supported\");\n-\tmlx5_os_get_dev_attr(sh->ctx, &device_attr);\n+\tmlx5_os_get_dev_attr(sh->dev_ctx->ctx, &device_attr);\n \tconfig->swp = 0;\n \tconfig->ind_table_max_size =\n \t\tsh->device_attr.max_rwq_indirection_table_size;\n@@ -485,7 +443,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\tconfig->cqe_comp = 0;\n \t}\n \tif (config->devx) {\n-\t\terr = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);\n+\t\terr = mlx5_devx_cmd_query_hca_attr(sh->dev_ctx->ctx,\n+\t\t\t\t\t\t   &config->hca_attr);\n \t\tif (err) {\n \t\t\terr = -err;\n \t\t\tgoto error;\n@@ -508,7 +467,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \n \t\terr = config->hca_attr.access_register_user ?\n \t\t\tmlx5_devx_cmd_register_read\n-\t\t\t\t(sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,\n+\t\t\t\t(sh->dev_ctx->ctx, MLX5_REGISTER_ID_MTUTC, 0,\n \t\t\t\treg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;\n \t\tif (!err) {\n \t\t\tuint32_t ts_mode;\n@@ -701,7 +660,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tif (eth_dev != NULL) {\n \t\t/* mac_addrs must not be freed alone because part of\n \t\t * dev_private\n-\t\t **/\n+\t\t */\n \t\teth_dev->data->mac_addrs = NULL;\n \t\trte_eth_dev_release_port(eth_dev);\n \t}\n@@ -919,15 +878,13 @@ int\n mlx5_os_net_probe(struct rte_device *dev)\n {\n \tstruct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);\n+\tstruct mlx5_dev_ctx *dev_ctx;\n \tstruct mlx5_dev_spawn_data spawn = { .pf_bond = -1 };\n-\tstruct devx_device_bdf *devx_bdf_match = mlx5_os_get_devx_device(dev);\n \tstruct mlx5_dev_config dev_config;\n \tunsigned int dev_config_vf;\n \tint ret;\n \tuint32_t restore;\n \n-\tif (devx_bdf_match == NULL)\n-\t\treturn -rte_errno;\n \tif (rte_eal_process_type() == RTE_PROC_SECONDARY) {\n \t\tDRV_LOG(ERR, \"Secondary process is not supported on Windows.\");\n \t\treturn -ENOTSUP;\n@@ -938,11 +895,20 @@ mlx5_os_net_probe(struct rte_device *dev)\n \t\t\tstrerror(rte_errno));\n \t\treturn -rte_errno;\n \t}\n+\tdev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx),\n+\t\t\t      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\tif (dev_ctx == NULL) {\n+\t\tDRV_LOG(ERR, \"Device context allocation failure.\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\tret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_ETH);\n+\tif (ret < 0)\n+\t\tgoto error;\n \tmemset(&spawn.info, 0, sizeof(spawn.info));\n \tspawn.max_port = 1;\n \tspawn.phys_port = 1;\n-\tspawn.phys_dev = devx_bdf_match;\n-\tspawn.phys_dev_name = mlx5_os_get_dev_device_name(devx_bdf_match);\n+\tspawn.phys_dev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);\n \tspawn.eth_dev = NULL;\n \tspawn.pci_dev = pci_dev;\n \tspawn.ifindex = -1; /* Spawn will assign */\n@@ -972,6 +938,7 @@ mlx5_os_net_probe(struct rte_device *dev)\n \t/* Default configuration. */\n \tmemset(&dev_config, 0, sizeof(struct mlx5_dev_config));\n \tdev_config.vf = dev_config_vf;\n+\tdev_config.devx = 1;\n \tdev_config.mps = 0;\n \tdev_config.dbnc = MLX5_ARG_UNSET;\n \tdev_config.rx_vec_en = 1;\n@@ -987,16 +954,21 @@ mlx5_os_net_probe(struct rte_device *dev)\n \tdev_config.dv_flow_en = 1;\n \tdev_config.decap_en = 0;\n \tdev_config.log_hp_size = MLX5_ARG_UNSET;\n-\tspawn.numa_node = pci_dev->device.numa_node;\n-\tspawn.eth_dev = mlx5_dev_spawn(dev, &spawn, &dev_config);\n-\tif (!spawn.eth_dev)\n-\t\treturn -rte_errno;\n+\tspawn.eth_dev = mlx5_dev_spawn(dev, dev_ctx, &spawn, &dev_config);\n+\tif (!spawn.eth_dev) {\n+\t\tret = -rte_errno;\n+\t\tgoto error;\n+\t}\n \trestore = spawn.eth_dev->data->dev_flags;\n \trte_eth_copy_pci_info(spawn.eth_dev, pci_dev);\n \t/* Restore non-PCI flags cleared by the above call. */\n \tspawn.eth_dev->data->dev_flags |= restore;\n \trte_eth_dev_probing_finish(spawn.eth_dev);\n \treturn 0;\n+error:\n+\tmlx5_dev_ctx_release(dev_ctx);\n+\tmlx5_free(dev_ctx);\n+\treturn ret;\n }\n \n /**\n@@ -1016,25 +988,4 @@ mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,\n \t*dereg_mr_cb = mlx5_os_dereg_mr;\n }\n \n-/**\n- * Extract pdn of PD object using DevX\n- *\n- * @param[in] pd\n- *   Pointer to the DevX PD object.\n- * @param[out] pdn\n- *   Pointer to the PD object number variable.\n- *\n- * @return\n- *   0 on success, error value otherwise.\n- */\n-int\n-mlx5_os_get_pdn(void *pd, uint32_t *pdn)\n-{\n-\tif (!pd)\n-\t\treturn -EINVAL;\n-\n-\t*pdn = ((struct mlx5_pd *)pd)->pdn;\n-\treturn 0;\n-}\n-\n const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};\n",
    "prefixes": [
        "RFC",
        "10/21"
    ]
}