get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/104662/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 104662,
    "url": "http://patchwork.dpdk.org/api/patches/104662/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211124183409.11181-1-eagostini@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211124183409.11181-1-eagostini@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211124183409.11181-1-eagostini@nvidia.com",
    "date": "2021-11-24T18:34:09",
    "name": "[v2] gpu/cuda: set rte_errno",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "250300e41bba1ac9d1177b330185f4dbc80e4fb5",
    "submitter": {
        "id": 1571,
        "url": "http://patchwork.dpdk.org/api/people/1571/?format=api",
        "name": "Elena Agostini",
        "email": "eagostini@nvidia.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211124183409.11181-1-eagostini@nvidia.com/mbox/",
    "series": [
        {
            "id": 20743,
            "url": "http://patchwork.dpdk.org/api/series/20743/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=20743",
            "date": "2021-11-24T18:34:09",
            "name": "[v2] gpu/cuda: set rte_errno",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/20743/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/104662/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/104662/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6F8C6A0C52;\n\tWed, 24 Nov 2021 11:23:58 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 5A47441177;\n\tWed, 24 Nov 2021 11:23:58 +0100 (CET)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2088.outbound.protection.outlook.com [40.107.243.88])\n by mails.dpdk.org (Postfix) with ESMTP id 5CCF041165\n for <dev@dpdk.org>; Wed, 24 Nov 2021 11:23:57 +0100 (CET)",
            "from BN6PR11CA0020.namprd11.prod.outlook.com (2603:10b6:405:2::30)\n by MN2PR12MB3632.namprd12.prod.outlook.com (2603:10b6:208:c1::31) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4713.22; Wed, 24 Nov\n 2021 10:23:55 +0000",
            "from BN8NAM11FT003.eop-nam11.prod.protection.outlook.com\n (2603:10b6:405:2:cafe::99) by BN6PR11CA0020.outlook.office365.com\n (2603:10b6:405:2::30) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4734.21 via Frontend\n Transport; Wed, 24 Nov 2021 10:23:55 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT003.mail.protection.outlook.com (10.13.177.90) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4713.20 via Frontend Transport; Wed, 24 Nov 2021 10:23:54 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 24 Nov\n 2021 10:23:44 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=HX7/7LekkEnFPljRZtXpcdYLwOu3eqK/WReA67+uI8s5KvxJwE2triyd1rzeqD2IBaRRLeTmLaWaitP39+oF0YPXOcEmJT5T57vORGDZLuEg8NxfgC2Y9NBI+p2Ox3lvilrgz0E9yTq67H+PF64osJidd13KClWu9wIE4V39tLHOaaTUUmlChzKywdr7or9Xqlx840rlMKVwduBSE6y10F8KKJ3bNSd5x9i+i4K7v11vxarNj2tHSMcv2ftCpnRWrfLIoJj+HKr9jejvoYQB/P57CqLBSAMz620SeLqYV04ssZ8LAanz77imUxNAiLDTjvomVX9/hw3XQXervtsRaw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=eL4MFaF3lVSUJGASO0KO1OzIO18+4dM7jqBn3jmbM2w=;\n b=kRP3BGV3tj3Xk0oyQvRfi9qrkSOuRpnt7dkrLaQKqGPuacG3cthDdrgjMd3ulWbHt9waf+usvCbJrlrC4f4AxYJTVxcebXLwmkZm6hZn3THHiTKnAWtZ7IygeXXVHIxhLw7d7kspZZa1SeZ00RGi1GrIcfnScD6yvJ9whgeU8nwgPavfkSOJQMnAE0964Pj91/sdPHTDv97nM/8UwGhZn9ufqdsiPYChPDe3emaCXCuzZao/YSqjjva3JqpVf1d2jj9zcXDDrplpZf25vIChlPqjFABAXErhZGFBYlFWnZR/WCbvBxrP9awQ659P3gk8z7r0+2Xw4x52oTeliNysrQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=eL4MFaF3lVSUJGASO0KO1OzIO18+4dM7jqBn3jmbM2w=;\n b=mqRZmC7p/HHnTXIyV151XKndnJc1BowLga/Sd1Q+yTqCkHMUA2Z110qdCNizKdtTSrTQINlUGDhtKpkt2cLz46IHNcHDg0ktXciViHaysGtL2auPEZBt8lneKoXH7z8w9A0QP4PVoq1kf9Q27y2DEElrcU7hX9NxkcuOm98mYSbCzPNdDjNxMH7MqwFUeXoGisqytBHEYhCyl568wZc0G658PvWv1gDO3W54qiGC1d+lJF5T9/podetW+9zDjkMrQg5MNTXHXZk9t0WjWheWtIGfmnzOwRsYF3hRhbZQbvVg4azRcHeo1RVCKlwLSpSuor0/gvFUE2mzBneQVNoZwQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "<eagostini@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Elena Agostini <eagostini@nvidia.com>",
        "Subject": "[PATCH v2] gpu/cuda: set rte_errno",
        "Date": "Wed, 24 Nov 2021 18:34:09 +0000",
        "Message-ID": "<20211124183409.11181-1-eagostini@nvidia.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20211118204527.26524-1-eagostini@nvidia.com>",
        "References": "<20211118204527.26524-1-eagostini@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "c710fedf-84cc-44d0-6948-08d9af3484d7",
        "X-MS-TrafficTypeDiagnostic": "MN2PR12MB3632:",
        "X-Microsoft-Antispam-PRVS": "\n <MN2PR12MB3632558294D15814D00C0581CD619@MN2PR12MB3632.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:561;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n SbC4HIzjAFs5kJoH+ZMJacvX+CrigL1OU+ZGWBZJ7BgOEh5g8QA8AyHNkBw4GDeVw9qlB7bqk8Te4H0IgwcGTkG8fnHk0BsPkLcrFu2KhVJ7BYz0tXEY4SFMj/J/ERbdU7pdIknG/YcqSKRDknLwZ4Qr+WfYgyJS9UDpksFZvbo1VTDTbUQL4opYdAOALqMLkNW4kOl9eRs/8o9A2TVzC+0IEoYAAkWHa8a968upjXf7d4rZaNI8GszhXq6enRwmyGen94xO0lTMAW/FzKO+vpmz4uilIn7vCeRw4kvDZ+5t14CEGACklVw0la8+fCoLvp4kmcHUiY41iG/CjWH6W1e8QNTKB9m6PtaOXClUQ1n+nwlengq9lKeXrLzwlXukNN+mlJJ3zCKZ7XETjmkvXAlRpyo+1BTyorn6456DkTxnAZTa7SdsbN0QH9psb5rWZpuB1/RWrQXvZfr6CThNNMas7FRVBKXY9YG0gJUTrLJFg2ZJ59oRz2NGqhTj5ZhTKCO6Z8uQx4H+t/MzV8jWC2nhrgIMDxjWWZ8FSFrL0gkmu4SrOz+5tdJliBB4K+Bde95rOR6cwtswALytCZ8golb/ZK9S55Wj7jAnvKSQbFTWNRaTyajSDq0uUgA683rVLcOxd1DKiJ16RlUVjAtO8fqcK7nmbsFTH4N4tQKaJb6WFJD0Ao6zYksRg3zZ4I8h8NUvHPYuHtqFeQ6HOLj5xw==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(7696005)(426003)(5660300002)(70206006)(508600001)(83380400001)(6916009)(336012)(82310400004)(55016003)(70586007)(6666004)(36860700001)(1076003)(186003)(47076005)(2876002)(36756003)(16526019)(4326008)(7636003)(107886003)(86362001)(2616005)(8676002)(8936002)(2906002)(6286002)(30864003)(356005)(316002)(26005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "24 Nov 2021 10:23:54.2001 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n c710fedf-84cc-44d0-6948-08d9af3484d7",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT003.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MN2PR12MB3632",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Elena Agostini <eagostini@nvidia.com>\n\nSet correct rte_errno variable in gpu/cuda and return\n-rte_errno in case of error.\n\nrte_errno values are compliant with the gpudev library\ndocumentation.\n\nFixes: 1306a73b1958 (\"gpu/cuda: introduce CUDA driver\")\n\nSigned-off-by: Elena Agostini <eagostini@nvidia.com>\n---\n drivers/gpu/cuda/cuda.c | 184 +++++++++++++++++++++++++++-------------\n 1 file changed, 123 insertions(+), 61 deletions(-)",
    "diff": "diff --git a/drivers/gpu/cuda/cuda.c b/drivers/gpu/cuda/cuda.c\nindex a4869da186..882df08e56 100644\n--- a/drivers/gpu/cuda/cuda.c\n+++ b/drivers/gpu/cuda/cuda.c\n@@ -464,8 +464,10 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \tCUcontext current_ctx;\n \tCUcontext input_ctx;\n \n-\tif (dev == NULL)\n-\t\treturn -ENODEV;\n+\tif (dev == NULL) {\n+\t\trte_errno = ENODEV;\n+\t\treturn -rte_errno;\n+\t}\n \n \t/* Child initialization time probably called by rte_gpu_add_child() */\n \tif (dev->mpshared->info.parent != RTE_GPU_ID_NONE &&\n@@ -476,7 +478,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuCtxGetCurrent failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\t/* Set child ctx as current ctx */\n@@ -486,7 +489,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuCtxSetCurrent input failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\t/*\n@@ -505,8 +509,10 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \t\t\t\t(uint32_t)affinityPrm.param.smCount.val;\n \n \t\tret = rte_gpu_info_get(dev->mpshared->info.parent, &parent_info);\n-\t\tif (ret)\n-\t\t\treturn -ENODEV;\n+\t\tif (ret) {\n+\t\t\trte_errno = ENODEV;\n+\t\t\treturn -rte_errno;\n+\t\t}\n \t\tdev->mpshared->info.total_memory = parent_info.total_memory;\n \n \t\t/*\n@@ -517,7 +523,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \t\t\t\tRTE_CACHE_LINE_SIZE);\n \t\tif (dev->mpshared->dev_private == NULL) {\n \t\t\trte_cuda_log(ERR, \"Failed to allocate memory for GPU process private\");\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\tprivate = (struct cuda_info *)dev->mpshared->dev_private;\n@@ -527,7 +534,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuCtxGetDevice failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\tres = pfn_cuDeviceGetName(private->gpu_name,\n@@ -536,7 +544,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuDeviceGetName failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\t/* Restore original ctx as current ctx */\n@@ -545,7 +554,8 @@ cuda_dev_info_get(struct rte_gpu *dev, struct rte_gpu_info *info)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuCtxSetCurrent current failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \t}\n \n@@ -576,7 +586,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxGetCurrent failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* Set child ctx as current ctx */\n@@ -586,13 +597,16 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxSetCurrent input failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* Get next memory list item */\n \tmem_alloc_list_tail = mem_list_add_item();\n-\tif (mem_alloc_list_tail == NULL)\n-\t\treturn -ENOMEM;\n+\tif (mem_alloc_list_tail == NULL) {\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n+\t}\n \n \t/* Allocate memory */\n \tmem_alloc_list_tail->size = size;\n@@ -602,7 +616,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxSetCurrent current failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* GPUDirect RDMA attribute required */\n@@ -613,7 +628,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)\n \t\trte_cuda_log(ERR, \"Could not set SYNC MEMOP attribute for \"\n \t\t\t\t\"GPU memory at  %\"PRIu32\", err %d\",\n \t\t\t\t(uint32_t)mem_alloc_list_tail->ptr_d, res);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tmem_alloc_list_tail->pkey = get_hash_from_ptr((void *)mem_alloc_list_tail->ptr_d);\n@@ -629,7 +645,8 @@ cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxSetCurrent current failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t*ptr = (void *)mem_alloc_list_tail->ptr_d;\n@@ -656,7 +673,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxGetCurrent failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* Set child ctx as current ctx */\n@@ -666,13 +684,16 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxSetCurrent input failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* Get next memory list item */\n \tmem_alloc_list_tail = mem_list_add_item();\n-\tif (mem_alloc_list_tail == NULL)\n-\t\treturn -ENOMEM;\n+\tif (mem_alloc_list_tail == NULL) {\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n+\t}\n \n \t/* Allocate memory */\n \tmem_alloc_list_tail->size = size;\n@@ -688,7 +709,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)\n \t\t\t\terr_string,\n \t\t\t\tmem_alloc_list_tail->ptr_h,\n \t\t\t\tmem_alloc_list_tail->size);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tres = pfn_cuDeviceGetAttribute(&(use_ptr_h),\n@@ -698,7 +720,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuDeviceGetAttribute failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tif (use_ptr_h == 0) {\n@@ -708,13 +731,15 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuMemHostGetDevicePointer failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\tif ((uintptr_t)mem_alloc_list_tail->ptr_d !=\n \t\t\t\t(uintptr_t)mem_alloc_list_tail->ptr_h) {\n \t\t\trte_cuda_log(ERR, \"Host input pointer is different wrt GPU registered pointer\");\n-\t\t\treturn -ENOTSUP;\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn -rte_errno;\n \t\t}\n \t} else {\n \t\tmem_alloc_list_tail->ptr_d = (CUdeviceptr)mem_alloc_list_tail->ptr_h;\n@@ -727,7 +752,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)\n \tif (res != 0) {\n \t\trte_cuda_log(ERR, \"Could not set SYNC MEMOP attribute for GPU memory at %\"PRIu32\n \t\t\t\t\", err %d\", (uint32_t)mem_alloc_list_tail->ptr_d, res);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tmem_alloc_list_tail->pkey = get_hash_from_ptr((void *)mem_alloc_list_tail->ptr_h);\n@@ -742,7 +768,8 @@ cuda_mem_register(struct rte_gpu *dev, size_t size, void *ptr)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxSetCurrent current failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \treturn 0;\n@@ -764,7 +791,8 @@ cuda_mem_free(struct rte_gpu *dev, void *ptr)\n \tmem_item = mem_list_find_item(hk);\n \tif (mem_item == NULL) {\n \t\trte_cuda_log(ERR, \"Memory address 0x%p not found in driver memory\", ptr);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tif (mem_item->mtype == GPU_MEM) {\n@@ -773,7 +801,8 @@ cuda_mem_free(struct rte_gpu *dev, void *ptr)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuMemFree current failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\treturn mem_list_del_item(hk);\n@@ -800,7 +829,8 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)\n \tmem_item = mem_list_find_item(hk);\n \tif (mem_item == NULL) {\n \t\trte_cuda_log(ERR, \"Memory address 0x%p not found in driver memory\", ptr);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tif (mem_item->mtype == CPU_REGISTERED) {\n@@ -809,7 +839,8 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuMemHostUnregister current failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\treturn mem_list_del_item(hk);\n@@ -817,7 +848,8 @@ cuda_mem_unregister(struct rte_gpu *dev, void *ptr)\n \n \trte_cuda_log(ERR, \"Memory type %d not supported\", mem_item->mtype);\n \n-\treturn -EPERM;\n+\trte_errno = EPERM;\n+\treturn -rte_errno;\n }\n \n static int\n@@ -840,8 +872,10 @@ cuda_wmb(struct rte_gpu *dev)\n \tCUcontext input_ctx;\n \tstruct cuda_info *private;\n \n-\tif (dev == NULL)\n-\t\treturn -ENODEV;\n+\tif (dev == NULL) {\n+\t\trte_errno = ENODEV;\n+\t\treturn -rte_errno;\n+\t}\n \n \tprivate = (struct cuda_info *)dev->mpshared->dev_private;\n \n@@ -860,7 +894,9 @@ cuda_wmb(struct rte_gpu *dev)\n \t\t */\n \t\trte_cuda_log(WARNING, \"Can't flush GDR writes with cuFlushGPUDirectRDMAWrites CUDA function.\"\n \t\t\t\t\"Application needs to use alternative methods.\");\n-\t\treturn -ENOTSUP;\n+\n+\t\trte_errno = ENOTSUP;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* Store current ctx */\n@@ -869,7 +905,8 @@ cuda_wmb(struct rte_gpu *dev)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxGetCurrent failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* Set child ctx as current ctx */\n@@ -879,7 +916,8 @@ cuda_wmb(struct rte_gpu *dev)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxSetCurrent input failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tres = pfn_cuFlushGPUDirectRDMAWrites(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX,\n@@ -888,7 +926,8 @@ cuda_wmb(struct rte_gpu *dev)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuFlushGPUDirectRDMAWrites current failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/* Restore original ctx as current ctx */\n@@ -897,7 +936,8 @@ cuda_wmb(struct rte_gpu *dev)\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuCtxSetCurrent current failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \treturn 0;\n@@ -917,15 +957,18 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \n \tif (pci_dev == NULL) {\n \t\trte_cuda_log(ERR, \"NULL PCI device\");\n-\t\treturn -EINVAL;\n+\t\trte_errno = ENODEV;\n+\t\treturn -rte_errno;\n \t}\n \n \trte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));\n \n \t/* Allocate memory to be used privately by drivers */\n \tdev = rte_gpu_allocate(pci_dev->device.name);\n-\tif (dev == NULL)\n-\t\treturn -ENODEV;\n+\tif (dev == NULL) {\n+\t\trte_errno = ENODEV;\n+\t\treturn -rte_errno;\n+\t}\n \n \t/* Initialize values only for the first CUDA driver call */\n \tif (dev->mpshared->info.dev_id == 0) {\n@@ -936,13 +979,15 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\t/* Load libcuda.so library */\n \t\tif (cuda_loader()) {\n \t\t\trte_cuda_log(ERR, \"CUDA Driver library not found\");\n-\t\t\treturn -ENOTSUP;\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\t/* Load initial CUDA functions */\n \t\tif (cuda_sym_func_loader()) {\n \t\t\trte_cuda_log(ERR, \"CUDA functions not found in library\");\n-\t\t\treturn -ENOTSUP;\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\t/*\n@@ -955,7 +1000,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\tres = sym_cuDriverGetVersion(&cuda_driver_version);\n \t\tif (res != 0) {\n \t\t\trte_cuda_log(ERR, \"cuDriverGetVersion failed with %d\", res);\n-\t\t\treturn -ENOTSUP;\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\tif (cuda_driver_version < CUDA_DRIVER_MIN_VERSION) {\n@@ -963,12 +1009,14 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\t\t\t\t\"Minimum requirement is %d\",\n \t\t\t\t\tcuda_driver_version,\n \t\t\t\t\tCUDA_DRIVER_MIN_VERSION);\n-\t\t\treturn -ENOTSUP;\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\tif (cuda_pfn_func_loader()) {\n \t\t\trte_cuda_log(ERR, \"CUDA PFN functions not found in library\");\n-\t\t\treturn -ENOTSUP;\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn -rte_errno;\n \t\t}\n \t}\n \n@@ -982,7 +1030,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuDeviceGetByPCIBusId name %s failed with %d: %s\",\n \t\t\t\tdev->device->name, res, err_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tres = pfn_cuDevicePrimaryCtxRetain(&pctx, cu_dev_id);\n@@ -990,19 +1039,22 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuDevicePrimaryCtxRetain name %s failed with %d: %s\",\n \t\t\t\tdev->device->name, res, err_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tres = pfn_cuCtxGetApiVersion(pctx, &cuda_api_version);\n \tif (res != 0) {\n \t\trte_cuda_log(ERR, \"cuCtxGetApiVersion failed with %d\", res);\n-\t\treturn -ENOTSUP;\n+\t\trte_errno = ENOTSUP;\n+\t\treturn -rte_errno;\n \t}\n \n \tif (cuda_api_version < CUDA_API_MIN_VERSION) {\n \t\trte_cuda_log(ERR, \"CUDA API version found is %d Minimum requirement is %d\",\n \t\t\t\tcuda_api_version, CUDA_API_MIN_VERSION);\n-\t\treturn -ENOTSUP;\n+\t\trte_errno = ENOTSUP;\n+\t\treturn -rte_errno;\n \t}\n \n \tdev->mpshared->info.context = (uint64_t)pctx;\n@@ -1019,7 +1071,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuDeviceGetAttribute failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \tdev->mpshared->info.processor_count = (uint32_t)processor_count;\n \n@@ -1029,7 +1082,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuDeviceTotalMem failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \t/*\n@@ -1040,7 +1094,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\t\tRTE_CACHE_LINE_SIZE);\n \tif (dev->mpshared->dev_private == NULL) {\n \t\trte_cuda_log(ERR, \"Failed to allocate memory for GPU process private\");\n-\t\treturn -ENOMEM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tprivate = (struct cuda_info *)dev->mpshared->dev_private;\n@@ -1052,7 +1107,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuDeviceGetName failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tres = pfn_cuDeviceGetAttribute(&(private->gdr_supported),\n@@ -1062,7 +1118,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\tpfn_cuGetErrorString(res, &(err_string));\n \t\trte_cuda_log(ERR, \"cuDeviceGetAttribute failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tif (private->gdr_supported == 0)\n@@ -1077,7 +1134,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\trte_cuda_log(ERR,\n \t\t\t\t\"cuDeviceGetAttribute failed with %s\",\n \t\t\t\terr_string);\n-\t\treturn -EPERM;\n+\t\trte_errno = EPERM;\n+\t\treturn -rte_errno;\n \t}\n \n \tif (private->gdr_write_ordering == CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE) {\n@@ -1088,7 +1146,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic\n \t\t\tpfn_cuGetErrorString(res, &(err_string));\n \t\t\trte_cuda_log(ERR, \"cuDeviceGetAttribute failed with %s\",\n \t\t\t\t\terr_string);\n-\t\t\treturn -EPERM;\n+\t\t\trte_errno = EPERM;\n+\t\t\treturn -rte_errno;\n \t\t}\n \n \t\tif (private->gdr_flush_type != CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST)\n@@ -1118,14 +1177,17 @@ cuda_gpu_remove(struct rte_pci_device *pci_dev)\n \tint ret;\n \tuint8_t gpu_id;\n \n-\tif (pci_dev == NULL)\n-\t\treturn -EINVAL;\n+\tif (pci_dev == NULL) {\n+\t\trte_errno = ENODEV;\n+\t\treturn -rte_errno;\n+\t}\n \n \tdev = rte_gpu_get_by_name(pci_dev->device.name);\n \tif (dev == NULL) {\n \t\trte_cuda_log(ERR, \"Couldn't find HW dev \\\"%s\\\" to uninitialise it\",\n \t\t\t\tpci_dev->device.name);\n-\t\treturn -ENODEV;\n+\t\trte_errno = ENODEV;\n+\t\treturn -rte_errno;\n \t}\n \tgpu_id = dev->mpshared->info.dev_id;\n \n",
    "prefixes": [
        "v2"
    ]
}