get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97004/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97004,
    "url": "http://patchwork.dpdk.org/api/patches/97004/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210817134441.1966618-14-michaelba@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210817134441.1966618-14-michaelba@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210817134441.1966618-14-michaelba@nvidia.com",
    "date": "2021-08-17T13:44:33",
    "name": "[RFC,13/21] vdpa/mlx5: use context device structure",
    "commit_ref": null,
    "pull_url": null,
    "state": "rfc",
    "archived": true,
    "hash": "635f39cad25f7f3e040c2ec5740e9090577ad824",
    "submitter": {
        "id": 1949,
        "url": "http://patchwork.dpdk.org/api/people/1949/?format=api",
        "name": "Michael Baum",
        "email": "michaelba@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210817134441.1966618-14-michaelba@nvidia.com/mbox/",
    "series": [
        {
            "id": 18314,
            "url": "http://patchwork.dpdk.org/api/series/18314/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=18314",
            "date": "2021-08-17T13:44:20",
            "name": "mlx5: sharing global MR cache between drivers",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/18314/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/97004/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/97004/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BFE1EA0548;\n\tTue, 17 Aug 2021 15:47:15 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 09C7D41232;\n\tTue, 17 Aug 2021 15:45:42 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2051.outbound.protection.outlook.com [40.107.236.51])\n by mails.dpdk.org (Postfix) with ESMTP id 152E3411F3\n for <dev@dpdk.org>; Tue, 17 Aug 2021 15:45:40 +0200 (CEST)",
            "from MW4PR04CA0282.namprd04.prod.outlook.com (2603:10b6:303:89::17)\n by BYAPR12MB3477.namprd12.prod.outlook.com (2603:10b6:a03:ac::13)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.22; Tue, 17 Aug\n 2021 13:45:38 +0000",
            "from CO1NAM11FT019.eop-nam11.prod.protection.outlook.com\n (2603:10b6:303:89:cafe::59) by MW4PR04CA0282.outlook.office365.com\n (2603:10b6:303:89::17) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.16 via Frontend\n Transport; Tue, 17 Aug 2021 13:45:38 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n CO1NAM11FT019.mail.protection.outlook.com (10.13.175.57) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4415.14 via Frontend Transport; Tue, 17 Aug 2021 13:45:37 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 17 Aug\n 2021 13:45:36 +0000",
            "from nvidia.com (172.20.187.6) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 17 Aug\n 2021 13:45:34 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=e4f/F3JpnLm8h87BNm9XyhS1mKcXFFBlqIOzvD8BfY8bw6ouGENYMjN0c5nf/Stl2+DhVuq40FhHSRvFgJSkl6Kse8VC9/AnbaERxUbULIDK1QXuCU0aMSG2fuhmtFqL6GcHLZnUUW7YvehJnSjGHoDrr/6aNEQWhylUEiSe447nKB12TMwV+RyFMJNQ1+6Kf1bFzp8rU9nFzVVGvc6lgKPwan3scbkSPZT9e4367tLmLjtyL2ytB6LqXlCOEXJpAh6xVk58W733Ka/DC3qQrAxiDOnXsYIXz+/s8rRAMIUFjKDioVRhxTcMhQ0G/uKTglBLShvpxH8Z2/7OluhXYg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=vbXZOh//WAcWzFXpKAS77cN7UvYUWJq3k+ows9xhuv8=;\n b=NahiTU7xPKxuHAm8u0E3+eTCeKPK6tpierJ9eWhu5s+JKEf4jOM8JzIPjqTdC6FgfNLRZ9gpuclSEtKWVtxeIvaGvkBnWHfvEZUM9XMdJpEJVAXb5VSgtvu5FIByTIe1k1ZRbXJcVSOvcXFQJhbCM/SmWvYaOah+alkFlb1jq+vwbSf31PPCvpM6E4RPAvd1V98B4XM9M/TKQghxrPUAuGHSxznISZqad4dBx9to8SnrPXStj0RqOFRODVN9lBt0b8uKJE4+enIqZLLSuYjo0EJsiteUVWoDGzrwccdxlPtRmiGI59ts5zMn/02ajLqeK7AOGVlVJqV9Vkjr6KdXpg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=vbXZOh//WAcWzFXpKAS77cN7UvYUWJq3k+ows9xhuv8=;\n b=jxud3CocBtieLPSrR5AYiFNvDykRPaCv2EN8MZBslQec6rcRWR+cvMNG5HHUw4q91DUKSHETn7HNfeiH/ggsIWTq7PiVuF7n0UrLATuzVwhX3RizyA/IqXOoPzQyySPbFyzRd+fbJJhNwpKbZrbla00cJRI8zkoPqQkaIAxojuBzV+A6vykh0n988rcFzd2wiDCup5M6O5Ihhcz1Iw6mnIACifZjJZay/mycB6TxbEtOczHlXBflkHeaosx/gOKtWyX/e9by4v7BP9Up9hyDW/xrpDPIPXRs9sShu0xnvyBQBW0WwDbYD5//EnoKDnc488DG9LQhqvD28ok+joOCCw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Michael Baum <michaelba@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Matan Azrad <matan@nvidia.com>, Raslan Darawsheh <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Tue, 17 Aug 2021 16:44:33 +0300",
        "Message-ID": "<20210817134441.1966618-14-michaelba@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210817134441.1966618-1-michaelba@nvidia.com>",
        "References": "<20210817134441.1966618-1-michaelba@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "3ee4cda5-7324-4143-da51-08d961854baf",
        "X-MS-TrafficTypeDiagnostic": "BYAPR12MB3477:",
        "X-Microsoft-Antispam-PRVS": "\n <BYAPR12MB34771934AE7C43B625EA561ACCFE9@BYAPR12MB3477.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:2;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n +sz/qS+b922EqgqJycEuo70WGWhNeGGDBvl7nZeKYGb5Q06uR279Im5llui5cfjxWXIagQUSOy+z8XD6Vg0sO9MD77NO3aWBnui90qdm9yFdxBEpZBp1kpGvplVThqAuRYR+Uf5PjOJJavNWW6e6YxSuBBXYc2r6ydK03t8P5fwdOS9U1SuFNryHOaHETSgGsZQSNCP0uzInuTvaKbgJvRwJK5jXTi6ySTDdXfg/fFFqcidhG57f+0TyDNc0OWdW5zalmn139ENocaYouRloOD7WijNY0PGh870QKObp93VJKziQu61+heefpxKpS+dfvInTIMgD1Hj7yvLtuRMfZ1YN2/kfjQjVmsp8EifjcC1W69bxuv1A36Dckh50h8aCsi8rmdv0LZWhVJNi+LF4ikfanfF4tzGY/m7XPt7zQaKV11weKgsuDVtQUtXvchBR2yyxJwQJ1+0g6+P6yKg9rxbZj3GRUd/kCYjGdq8IToHioXMfsgz6a4WsXzb3rlqiRYNPhVtM0HvKXtpzeQGvDWLIhfQxo7my+9lIVZPEw2Dj6fliUs3D3TciHuTiPNI9sYh0FI2xkt9N8CdiwsI+W6iC/C2633RjMZlgesp9IOkZOE2YkUYcz60b9N4t6d/jlElkR1H7CGtGb4O9hnNCYXolSCsZV4ZXhC+K0tHW859GybVNJ1byK8nWZgk3Y34l11+v2BKsb2+lkFFGaGSCdQ==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(7636003)(86362001)(36860700001)(107886003)(1076003)(2906002)(6286002)(316002)(4326008)(82310400003)(54906003)(36756003)(7696005)(16526019)(186003)(26005)(83380400001)(70586007)(336012)(47076005)(5660300002)(6916009)(2616005)(508600001)(426003)(8936002)(55016002)(70206006)(8676002)(356005)(30864003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "17 Aug 2021 13:45:37.6167 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 3ee4cda5-7324-4143-da51-08d961854baf",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT019.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR12MB3477",
        "Subject": "[dpdk-dev] [RFC 13/21] vdpa/mlx5: use context device structure",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Use common context device structure as a priv field.\n\nSigned-off-by: Michael Baum <michaelba@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c       | 185 ++++------------------------\n drivers/vdpa/mlx5/mlx5_vdpa.h       |   4 +-\n drivers/vdpa/mlx5/mlx5_vdpa_event.c |  19 +--\n drivers/vdpa/mlx5/mlx5_vdpa_lm.c    |   6 +-\n drivers/vdpa/mlx5/mlx5_vdpa_mem.c   |  13 +-\n drivers/vdpa/mlx5/mlx5_vdpa_steer.c |  10 +-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  16 +--\n 7 files changed, 61 insertions(+), 192 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex 6d17d7a6f3..f773ac8711 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -189,37 +189,6 @@ mlx5_vdpa_features_set(int vid)\n \treturn 0;\n }\n \n-static int\n-mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tpriv->pd = mlx5_glue->alloc_pd(priv->ctx);\n-\tif (priv->pd == NULL) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate PD.\");\n-\t\treturn errno ? -errno : -ENOMEM;\n-\t}\n-\tstruct mlx5dv_obj obj;\n-\tstruct mlx5dv_pd pd_info;\n-\tint ret = 0;\n-\n-\tobj.pd.in = priv->pd;\n-\tobj.pd.out = &pd_info;\n-\tret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"Fail to get PD object info.\");\n-\t\tmlx5_glue->dealloc_pd(priv->pd);\n-\t\tpriv->pd = NULL;\n-\t\treturn -errno;\n-\t}\n-\tpriv->pdn = pd_info.pdn;\n-\treturn 0;\n-#else\n-\t(void)priv;\n-\tDRV_LOG(ERR, \"Cannot get pdn - no DV support.\");\n-\treturn -ENOTSUP;\n-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */\n-}\n-\n static int\n mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)\n {\n@@ -238,7 +207,8 @@ mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)\n \t\tDRV_LOG(DEBUG, \"Vhost MTU is 0.\");\n \t\treturn ret;\n \t}\n-\tret = mlx5_get_ifname_sysfs(priv->ctx->device->ibdev_path,\n+\tret = mlx5_get_ifname_sysfs(mlx5_os_get_ctx_device_path\n+\t\t\t\t\t\t\t   (priv->dev_ctx->ctx),\n \t\t\t\t    request.ifr_name);\n \tif (ret) {\n \t\tDRV_LOG(DEBUG, \"Cannot get kernel IF name - %d.\", ret);\n@@ -289,10 +259,6 @@ mlx5_vdpa_dev_close(int vid)\n \tmlx5_vdpa_virtqs_release(priv);\n \tmlx5_vdpa_event_qp_global_release(priv);\n \tmlx5_vdpa_mem_dereg(priv);\n-\tif (priv->pd) {\n-\t\tclaim_zero(mlx5_glue->dealloc_pd(priv->pd));\n-\t\tpriv->pd = NULL;\n-\t}\n \tpriv->configured = 0;\n \tpriv->vid = 0;\n \t/* The mutex may stay locked after event thread cancel - initiate it. */\n@@ -320,8 +286,7 @@ mlx5_vdpa_dev_config(int vid)\n \tif (mlx5_vdpa_mtu_set(priv))\n \t\tDRV_LOG(WARNING, \"MTU cannot be set on device %s.\",\n \t\t\t\tvdev->device->name);\n-\tif (mlx5_vdpa_pd_create(priv) || mlx5_vdpa_mem_register(priv) ||\n-\t    mlx5_vdpa_err_event_setup(priv) ||\n+\tif (mlx5_vdpa_mem_register(priv) || mlx5_vdpa_err_event_setup(priv) ||\n \t    mlx5_vdpa_virtqs_prepare(priv) || mlx5_vdpa_steer_setup(priv) ||\n \t    mlx5_vdpa_cqe_event_setup(priv)) {\n \t\tmlx5_vdpa_dev_close(vid);\n@@ -343,7 +308,7 @@ mlx5_vdpa_get_device_fd(int vid)\n \t\tDRV_LOG(ERR, \"Invalid vDPA device: %s.\", vdev->device->name);\n \t\treturn -EINVAL;\n \t}\n-\treturn priv->ctx->cmd_fd;\n+\treturn ((struct ibv_context *)priv->dev_ctx->ctx)->cmd_fd;\n }\n \n static int\n@@ -472,98 +437,6 @@ static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {\n \t.reset_stats = mlx5_vdpa_reset_stats,\n };\n \n-/* Try to disable ROCE by Netlink\\Devlink. */\n-static int\n-mlx5_vdpa_nl_roce_disable(const char *addr)\n-{\n-\tint nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);\n-\tint devlink_id;\n-\tint enable;\n-\tint ret;\n-\n-\tif (nlsk_fd < 0)\n-\t\treturn nlsk_fd;\n-\tdevlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);\n-\tif (devlink_id < 0) {\n-\t\tret = devlink_id;\n-\t\tDRV_LOG(DEBUG, \"Failed to get devlink id for ROCE operations by\"\n-\t\t\t\" Netlink.\");\n-\t\tgoto close;\n-\t}\n-\tret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);\n-\tif (ret) {\n-\t\tDRV_LOG(DEBUG, \"Failed to get ROCE enable by Netlink: %d.\",\n-\t\t\tret);\n-\t\tgoto close;\n-\t} else if (!enable) {\n-\t\tDRV_LOG(INFO, \"ROCE has already disabled(Netlink).\");\n-\t\tgoto close;\n-\t}\n-\tret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);\n-\tif (ret)\n-\t\tDRV_LOG(DEBUG, \"Failed to disable ROCE by Netlink: %d.\", ret);\n-\telse\n-\t\tDRV_LOG(INFO, \"ROCE is disabled by Netlink successfully.\");\n-close:\n-\tclose(nlsk_fd);\n-\treturn ret;\n-}\n-\n-/* Try to disable ROCE by sysfs. */\n-static int\n-mlx5_vdpa_sys_roce_disable(const char *addr)\n-{\n-\tFILE *file_o;\n-\tint enable;\n-\tint ret;\n-\n-\tMKSTR(file_p, \"/sys/bus/pci/devices/%s/roce_enable\", addr);\n-\tfile_o = fopen(file_p, \"rb\");\n-\tif (!file_o) {\n-\t\trte_errno = ENOTSUP;\n-\t\treturn -ENOTSUP;\n-\t}\n-\tret = fscanf(file_o, \"%d\", &enable);\n-\tif (ret != 1) {\n-\t\trte_errno = EINVAL;\n-\t\tret = EINVAL;\n-\t\tgoto close;\n-\t} else if (!enable) {\n-\t\tret = 0;\n-\t\tDRV_LOG(INFO, \"ROCE has already disabled(sysfs).\");\n-\t\tgoto close;\n-\t}\n-\tfclose(file_o);\n-\tfile_o = fopen(file_p, \"wb\");\n-\tif (!file_o) {\n-\t\trte_errno = ENOTSUP;\n-\t\treturn -ENOTSUP;\n-\t}\n-\tfprintf(file_o, \"0\\n\");\n-\tret = 0;\n-close:\n-\tif (ret)\n-\t\tDRV_LOG(DEBUG, \"Failed to disable ROCE by sysfs: %d.\", ret);\n-\telse\n-\t\tDRV_LOG(INFO, \"ROCE is disabled by sysfs successfully.\");\n-\tfclose(file_o);\n-\treturn ret;\n-}\n-\n-static int\n-mlx5_vdpa_roce_disable(struct rte_device *dev)\n-{\n-\tchar pci_addr[PCI_PRI_STR_SIZE] = { 0 };\n-\n-\tif (mlx5_dev_to_pci_str(dev, pci_addr, sizeof(pci_addr)) < 0)\n-\t\treturn -rte_errno;\n-\t/* Firstly try to disable ROCE by Netlink and fallback to sysfs. */\n-\tif (mlx5_vdpa_nl_roce_disable(pci_addr) != 0 &&\n-\t    mlx5_vdpa_sys_roce_disable(pci_addr) != 0)\n-\t\treturn -rte_errno;\n-\treturn 0;\n-}\n-\n static int\n mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)\n {\n@@ -632,39 +505,26 @@ mlx5_vdpa_config_get(struct rte_devargs *devargs, struct mlx5_vdpa_priv *priv)\n static int\n mlx5_vdpa_dev_probe(struct rte_device *dev)\n {\n-\tstruct ibv_device *ibv;\n \tstruct mlx5_vdpa_priv *priv = NULL;\n-\tstruct ibv_context *ctx = NULL;\n+\tstruct mlx5_dev_ctx *dev_ctx = NULL;\n \tstruct mlx5_hca_attr attr;\n-\tint retry;\n \tint ret;\n \n-\tif (mlx5_vdpa_roce_disable(dev) != 0) {\n-\t\tDRV_LOG(WARNING, \"Failed to disable ROCE for \\\"%s\\\".\",\n-\t\t\tdev->name);\n-\t\treturn -rte_errno;\n-\t}\n-\t/* Wait for the IB device to appear again after reload. */\n-\tfor (retry = MLX5_VDPA_MAX_RETRIES; retry > 0; --retry) {\n-\t\tibv = mlx5_os_get_ibv_dev(dev);\n-\t\tif (ibv != NULL)\n-\t\t\tbreak;\n-\t\tusleep(MLX5_VDPA_USEC);\n-\t}\n-\tif (ibv == NULL) {\n-\t\tDRV_LOG(ERR, \"Cannot get IB device after disabling RoCE for \"\n-\t\t\t\t\"\\\"%s\\\", retries exceed %d.\",\n-\t\t\t\tdev->name, MLX5_VDPA_MAX_RETRIES);\n-\t\trte_errno = EAGAIN;\n+\tdev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx),\n+\t\t\t      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\tif (dev_ctx == NULL) {\n+\t\tDRV_LOG(ERR, \"Device context allocation failure.\");\n+\t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n-\tctx = mlx5_glue->dv_open_device(ibv);\n-\tif (!ctx) {\n-\t\tDRV_LOG(ERR, \"Failed to open IB device \\\"%s\\\".\", ibv->name);\n+\tret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_VDPA);\n+\tif (ret < 0) {\n+\t\tDRV_LOG(ERR, \"Failed to create device context.\");\n+\t\tmlx5_free(dev_ctx);\n \t\trte_errno = ENODEV;\n \t\treturn -rte_errno;\n \t}\n-\tret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);\n+\tret = mlx5_devx_cmd_query_hca_attr(dev_ctx->ctx, &attr);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Unable to read HCA capabilities.\");\n \t\trte_errno = ENOTSUP;\n@@ -692,8 +552,8 @@ mlx5_vdpa_dev_probe(struct rte_device *dev)\n \tpriv->qp_ts_format = attr.qp_ts_format;\n \tif (attr.num_lag_ports == 0)\n \t\tpriv->num_lag_ports = 1;\n-\tpriv->ctx = ctx;\n-\tpriv->var = mlx5_glue->dv_alloc_var(ctx, 0);\n+\tpriv->dev_ctx = dev_ctx;\n+\tpriv->var = mlx5_glue->dv_alloc_var(dev_ctx->ctx, 0);\n \tif (!priv->var) {\n \t\tDRV_LOG(ERR, \"Failed to allocate VAR %u.\", errno);\n \t\tgoto error;\n@@ -718,8 +578,10 @@ mlx5_vdpa_dev_probe(struct rte_device *dev)\n \t\t\tmlx5_glue->dv_free_var(priv->var);\n \t\trte_free(priv);\n \t}\n-\tif (ctx)\n-\t\tmlx5_glue->close_device(ctx);\n+\tif (dev_ctx) {\n+\t\tmlx5_dev_ctx_release(dev_ctx);\n+\t\tmlx5_free(dev_ctx);\n+\t}\n \treturn -rte_errno;\n }\n \n@@ -748,7 +610,10 @@ mlx5_vdpa_dev_remove(struct rte_device *dev)\n \t\t}\n \t\tif (priv->vdev)\n \t\t\trte_vdpa_unregister_device(priv->vdev);\n-\t\tmlx5_glue->close_device(priv->ctx);\n+\t\tif (priv->dev_ctx) {\n+\t\t\tmlx5_dev_ctx_release(priv->dev_ctx);\n+\t\t\tmlx5_free(priv->dev_ctx);\n+\t\t}\n \t\tpthread_mutex_destroy(&priv->vq_config_lock);\n \t\trte_free(priv);\n \t}\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex 2a04e36607..dc9ba1c3c2 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -132,10 +132,8 @@ struct mlx5_vdpa_priv {\n \tuint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */\n \tstruct rte_vdpa_device *vdev; /* vDPA device. */\n \tint vid; /* vhost device id. */\n-\tstruct ibv_context *ctx; /* Device context. */\n+\tstruct mlx5_dev_ctx *dev_ctx; /* Device context. */\n \tstruct mlx5_hca_vdpa_attr caps;\n-\tuint32_t pdn; /* Protection Domain number. */\n-\tstruct ibv_pd *pd;\n \tuint32_t gpa_mkey_index;\n \tstruct ibv_mr *null_mr;\n \tstruct rte_vhost_memory *vmem;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex 3541c652ce..056a3c2936 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -48,7 +48,7 @@ mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)\n {\n \tif (priv->eventc)\n \t\treturn 0;\n-\tpriv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,\n+\tpriv->eventc = mlx5_os_devx_create_event_channel(priv->dev_ctx->ctx,\n \t\t\t   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);\n \tif (!priv->eventc) {\n \t\trte_errno = errno;\n@@ -61,7 +61,7 @@ mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)\n \t * registers writings, it is safe to allocate UAR with any\n \t * memory mapping type.\n \t */\n-\tpriv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);\n+\tpriv->uar = mlx5_devx_alloc_uar(priv->dev_ctx->ctx, -1);\n \tif (!priv->uar) {\n \t\trte_errno = errno;\n \t\tDRV_LOG(ERR, \"Failed to allocate UAR.\");\n@@ -115,8 +115,8 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,\n \tuint16_t event_nums[1] = {0};\n \tint ret;\n \n-\tret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, log_desc_n, &attr,\n-\t\t\t\t  SOCKET_ID_ANY);\n+\tret = mlx5_devx_cq_create(priv->dev_ctx->ctx, &cq->cq_obj, log_desc_n,\n+\t\t\t\t  &attr, SOCKET_ID_ANY);\n \tif (ret)\n \t\tgoto error;\n \tcq->cq_ci = 0;\n@@ -397,7 +397,8 @@ mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)\n \tint flags;\n \n \t/* Setup device event channel. */\n-\tpriv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);\n+\tpriv->err_chnl =\n+\t\tmlx5_glue->devx_create_event_channel(priv->dev_ctx->ctx, 0);\n \tif (!priv->err_chnl) {\n \t\trte_errno = errno;\n \t\tDRV_LOG(ERR, \"Failed to create device event channel %d.\",\n@@ -598,9 +599,9 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t\treturn -1;\n \tif (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))\n \t\treturn -1;\n-\tattr.pd = priv->pdn;\n+\tattr.pd = priv->dev_ctx->pdn;\n \tattr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);\n-\teqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);\n+\teqp->fw_qp = mlx5_devx_cmd_create_qp(priv->dev_ctx->ctx, &attr);\n \tif (!eqp->fw_qp) {\n \t\tDRV_LOG(ERR, \"Failed to create FW QP(%u).\", rte_errno);\n \t\tgoto error;\n@@ -611,7 +612,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\teqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,\n+\teqp->umem_obj = mlx5_glue->devx_umem_reg(priv->dev_ctx->ctx,\n \t\t\t\t\t       (void *)(uintptr_t)eqp->umem_buf,\n \t\t\t\t\t       umem_size,\n \t\t\t\t\t       IBV_ACCESS_LOCAL_WRITE);\n@@ -631,7 +632,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \tattr.dbr_umem_id = eqp->umem_obj->umem_id;\n \tattr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);\n \tattr.dbr_address = RTE_BIT64(log_desc_n) * MLX5_WSEG_SIZE;\n-\teqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);\n+\teqp->sw_qp = mlx5_devx_cmd_create_qp(priv->dev_ctx->ctx, &attr);\n \tif (!eqp->sw_qp) {\n \t\tDRV_LOG(ERR, \"Failed to create SW QP(%u).\", rte_errno);\n \t\tgoto error;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\nindex f391813745..1e9a946708 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\n@@ -39,7 +39,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,\n \tstruct mlx5_devx_mkey_attr mkey_attr = {\n \t\t\t.addr = (uintptr_t)log_base,\n \t\t\t.size = log_size,\n-\t\t\t.pd = priv->pdn,\n+\t\t\t.pd = priv->dev_ctx->pdn,\n \t\t\t.pg_access = 1,\n \t};\n \tstruct mlx5_devx_virtq_attr attr = {\n@@ -54,7 +54,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,\n \t\tDRV_LOG(ERR, \"Failed to allocate mem for lm mr.\");\n \t\treturn -1;\n \t}\n-\tmr->umem = mlx5_glue->devx_umem_reg(priv->ctx,\n+\tmr->umem = mlx5_glue->devx_umem_reg(priv->dev_ctx->ctx,\n \t\t\t\t\t    (void *)(uintptr_t)log_base,\n \t\t\t\t\t    log_size, IBV_ACCESS_LOCAL_WRITE);\n \tif (!mr->umem) {\n@@ -62,7 +62,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,\n \t\tgoto err;\n \t}\n \tmkey_attr.umem_id = mr->umem->umem_id;\n-\tmr->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);\n+\tmr->mkey = mlx5_devx_cmd_mkey_create(priv->dev_ctx->ctx, &mkey_attr);\n \tif (!mr->mkey) {\n \t\tDRV_LOG(ERR, \"Failed to create Mkey for lm.\");\n \t\tgoto err;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\nindex a13bde5a0b..bec83eddde 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c\n@@ -193,7 +193,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)\n \tif (!mem)\n \t\treturn -rte_errno;\n \tpriv->vmem = mem;\n-\tpriv->null_mr = mlx5_glue->alloc_null_mr(priv->pd);\n+\tpriv->null_mr = mlx5_glue->alloc_null_mr(priv->dev_ctx->pd);\n \tif (!priv->null_mr) {\n \t\tDRV_LOG(ERR, \"Failed to allocate null MR.\");\n \t\tret = -errno;\n@@ -209,7 +209,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)\n \t\t\tDRV_LOG(ERR, \"Failed to allocate mem entry memory.\");\n \t\t\tgoto error;\n \t\t}\n-\t\tentry->umem = mlx5_glue->devx_umem_reg(priv->ctx,\n+\t\tentry->umem = mlx5_glue->devx_umem_reg(priv->dev_ctx->ctx,\n \t\t\t\t\t (void *)(uintptr_t)reg->host_user_addr,\n \t\t\t\t\t     reg->size, IBV_ACCESS_LOCAL_WRITE);\n \t\tif (!entry->umem) {\n@@ -220,9 +220,10 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)\n \t\tmkey_attr.addr = (uintptr_t)(reg->guest_phys_addr);\n \t\tmkey_attr.size = reg->size;\n \t\tmkey_attr.umem_id = entry->umem->umem_id;\n-\t\tmkey_attr.pd = priv->pdn;\n+\t\tmkey_attr.pd = priv->dev_ctx->pdn;\n \t\tmkey_attr.pg_access = 1;\n-\t\tentry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);\n+\t\tentry->mkey = mlx5_devx_cmd_mkey_create(priv->dev_ctx->ctx,\n+\t\t\t\t\t\t\t&mkey_attr);\n \t\tif (!entry->mkey) {\n \t\t\tDRV_LOG(ERR, \"Failed to create direct Mkey.\");\n \t\t\tret = -rte_errno;\n@@ -267,7 +268,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)\n \t}\n \tmkey_attr.addr = (uintptr_t)(mem->regions[0].guest_phys_addr);\n \tmkey_attr.size = mem_size;\n-\tmkey_attr.pd = priv->pdn;\n+\tmkey_attr.pd = priv->dev_ctx->pdn;\n \tmkey_attr.umem_id = 0;\n \t/* Must be zero for KLM mode. */\n \tmkey_attr.log_entity_size = mode == MLX5_MKC_ACCESS_MODE_KLM_FBS ?\n@@ -281,7 +282,7 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)\n \t\tret = -ENOMEM;\n \t\tgoto error;\n \t}\n-\tentry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);\n+\tentry->mkey = mlx5_devx_cmd_mkey_create(priv->dev_ctx->ctx, &mkey_attr);\n \tif (!entry->mkey) {\n \t\tDRV_LOG(ERR, \"Failed to create indirect Mkey.\");\n \t\tret = -rte_errno;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\nindex 383f003966..ae2ca9ccac 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n@@ -98,7 +98,8 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)\n \tattr->rqt_max_size = rqt_n;\n \tattr->rqt_actual_size = rqt_n;\n \tif (!priv->steer.rqt) {\n-\t\tpriv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->ctx, attr);\n+\t\tpriv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->dev_ctx->ctx,\n+\t\t\t\t\t\t\t   attr);\n \t\tif (!priv->steer.rqt) {\n \t\t\tDRV_LOG(ERR, \"Failed to create RQT.\");\n \t\t\tret = -rte_errno;\n@@ -116,6 +117,7 @@ static int __rte_unused\n mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)\n {\n #ifdef HAVE_MLX5DV_DR\n+\tstruct ibv_context *ctx = priv->dev_ctx->ctx;\n \tstruct mlx5_devx_tir_attr tir_att = {\n \t\t.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT,\n \t\t.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ,\n@@ -204,12 +206,12 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)\n \t\ttir_att.rx_hash_field_selector_outer.selected_fields =\n \t\t\t\t\t\t\t\t  vars[i][HASH];\n \t\tpriv->steer.rss[i].matcher = mlx5_glue->dv_create_flow_matcher\n-\t\t\t\t\t (priv->ctx, &dv_attr, priv->steer.tbl);\n+\t\t\t\t\t       (ctx, &dv_attr, priv->steer.tbl);\n \t\tif (!priv->steer.rss[i].matcher) {\n \t\t\tDRV_LOG(ERR, \"Failed to create matcher %d.\", i);\n \t\t\tgoto error;\n \t\t}\n-\t\tpriv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(priv->ctx,\n+\t\tpriv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(ctx,\n \t\t\t\t\t\t\t\t  &tir_att);\n \t\tif (!priv->steer.rss[i].tir) {\n \t\t\tDRV_LOG(ERR, \"Failed to create TIR %d.\", i);\n@@ -268,7 +270,7 @@ int\n mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)\n {\n #ifdef HAVE_MLX5DV_DR\n-\tpriv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,\n+\tpriv->steer.domain = mlx5_glue->dr_create_domain(priv->dev_ctx->ctx,\n \t\t\t\t\t\t  MLX5DV_DR_DOMAIN_TYPE_NIC_RX);\n \tif (!priv->steer.domain) {\n \t\tDRV_LOG(ERR, \"Failed to create Rx domain.\");\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex f530646058..d7c2d70947 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -250,7 +250,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \tif (priv->caps.queue_counters_valid) {\n \t\tif (!virtq->counters)\n \t\t\tvirtq->counters = mlx5_devx_cmd_create_virtio_q_counters\n-\t\t\t\t\t\t\t\t(priv->ctx);\n+\t\t\t\t\t\t\t   (priv->dev_ctx->ctx);\n \t\tif (!virtq->counters) {\n \t\t\tDRV_LOG(ERR, \"Failed to create virtq couners for virtq\"\n \t\t\t\t\" %d.\", index);\n@@ -269,7 +269,8 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t\t\t\t\" %u.\", i, index);\n \t\t\tgoto error;\n \t\t}\n-\t\tvirtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->ctx,\n+\t\tvirtq->umems[i].obj = mlx5_glue->devx_umem_reg\n+\t\t\t\t\t\t       (priv->dev_ctx->ctx,\n \t\t\t\t\t\t\tvirtq->umems[i].buf,\n \t\t\t\t\t\t\tvirtq->umems[i].size,\n \t\t\t\t\t\t\tIBV_ACCESS_LOCAL_WRITE);\n@@ -322,11 +323,11 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \tattr.mkey = priv->gpa_mkey_index;\n \tattr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;\n \tattr.queue_index = index;\n-\tattr.pd = priv->pdn;\n+\tattr.pd = priv->dev_ctx->pdn;\n \tattr.hw_latency_mode = priv->hw_latency_mode;\n \tattr.hw_max_latency_us = priv->hw_max_latency_us;\n \tattr.hw_max_pending_comp = priv->hw_max_pending_comp;\n-\tvirtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);\n+\tvirtq->virtq = mlx5_devx_cmd_create_virtq(priv->dev_ctx->ctx, &attr);\n \tvirtq->priv = priv;\n \tif (!virtq->virtq)\n \t\tgoto error;\n@@ -434,6 +435,7 @@ int\n mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n {\n \tstruct mlx5_devx_tis_attr tis_attr = {0};\n+\tstruct ibv_context *ctx = priv->dev_ctx->ctx;\n \tuint32_t i;\n \tuint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);\n \tint ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);\n@@ -457,7 +459,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t}\n \t/* Always map the entire page. */\n \tpriv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |\n-\t\t\t\t   PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,\n+\t\t\t\t   PROT_WRITE, MAP_SHARED, ctx->cmd_fd,\n \t\t\t\t   priv->var->mmap_off);\n \tif (priv->virtq_db_addr == MAP_FAILED) {\n \t\tDRV_LOG(ERR, \"Failed to map doorbell page %u.\", errno);\n@@ -467,7 +469,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\tDRV_LOG(DEBUG, \"VAR address of doorbell mapping is %p.\",\n \t\t\tpriv->virtq_db_addr);\n \t}\n-\tpriv->td = mlx5_devx_cmd_create_td(priv->ctx);\n+\tpriv->td = mlx5_devx_cmd_create_td(ctx);\n \tif (!priv->td) {\n \t\tDRV_LOG(ERR, \"Failed to create transport domain.\");\n \t\treturn -rte_errno;\n@@ -476,7 +478,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \tfor (i = 0; i < priv->num_lag_ports; i++) {\n \t\t/* 0 is auto affinity, non-zero value to propose port. */\n \t\ttis_attr.lag_tx_port_affinity = i + 1;\n-\t\tpriv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);\n+\t\tpriv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);\n \t\tif (!priv->tiss[i]) {\n \t\t\tDRV_LOG(ERR, \"Failed to create TIS %u.\", i);\n \t\t\tgoto error;\n",
    "prefixes": [
        "RFC",
        "13/21"
    ]
}