@@ -17,6 +17,8 @@
#include <cuda.h>
#include <cudaTypedefs.h>
+#include "gdrcopy.h"
+
#define CUDA_DRIVER_MIN_VERSION 11040
#define CUDA_API_MIN_VERSION 3020
@@ -52,6 +54,8 @@ static void *cudalib;
static unsigned int cuda_api_version;
static int cuda_driver_version;
+static gdr_t gdrc_h;
+
/* NVIDIA GPU vendor */
#define NVIDIA_GPU_VENDOR_ID (0x10de)
@@ -144,6 +148,7 @@ struct mem_entry {
struct rte_gpu *dev;
CUcontext ctx;
cuda_ptr_key pkey;
+ gdr_mh_t mh;
enum mem_type mtype;
struct mem_entry *prev;
struct mem_entry *next;
@@ -943,6 +948,87 @@ cuda_wmb(struct rte_gpu *dev)
return 0;
}
+static int
+cuda_mem_expose(struct rte_gpu *dev, __rte_unused size_t size, void *ptr_in, void **ptr_out)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (gdrc_h == NULL) {
+ rte_cuda_log(ERR, "GDRCopy not built or loaded. Can't expose GPU memory.");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->mtype == GPU_MEM) {
+ rte_cuda_log(ERR, "Memory address 0x%p is not GPU memory type.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->size != size)
+ rte_cuda_log(WARNING,
+ "Can't expose memory area with size (%zd) different from original size (%zd).",
+ size, mem_item->size);
+
+ if (gdrcopy_pin(gdrc_h, &(mem_item->mh), (uint64_t)mem_item->ptr_d,
+ mem_item->size, &(mem_item->ptr_h))) {
+ rte_cuda_log(ERR, "Error exposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ *ptr_out = mem_item->ptr_h;
+
+ return 0;
+}
+
+static int
+cuda_mem_unexpose(struct rte_gpu *dev, void *ptr_in)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (gdrc_h == NULL) {
+ rte_cuda_log(ERR, "GDRCopy not built or loaded. Can't unexpose GPU memory.");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (gdrcopy_unpin(gdrc_h, mem_item->mh, (void *)mem_item->ptr_d,
+ mem_item->size)) {
+ rte_cuda_log(ERR, "Error unexposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
{
@@ -1018,6 +1104,19 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
rte_errno = ENOTSUP;
return -rte_errno;
}
+
+ gdrc_h = NULL;
+
+ #ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ if (gdrcopy_loader())
+ rte_cuda_log(ERR, "GDRCopy shared library not found.\n");
+ else {
+ if (gdrcopy_open(&gdrc_h))
+ rte_cuda_log(ERR, "GDRCopy handler can't be created. Is gdrdrv driver installed and loaded?\n");
+ }
+ #else
+ gdrc_h = NULL;
+ #endif
}
/* Fill HW specific part of device structure */
@@ -1160,6 +1259,8 @@ cuda_gpu_probe(__rte_unused struct rte_pci_driver *pci_drv, struct rte_pci_devic
dev->ops.mem_free = cuda_mem_free;
dev->ops.mem_register = cuda_mem_register;
dev->ops.mem_unregister = cuda_mem_unregister;
+ dev->ops.mem_expose = cuda_mem_expose;
+ dev->ops.mem_unexpose = cuda_mem_unexpose;
dev->ops.wmb = cuda_wmb;
rte_gpu_complete_new(dev);
new file mode 100644
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include "gdrcopy.h"
+
+static void *gdrclib;
+
+static gdr_t (*sym_gdr_open)(void);
+static int (*sym_gdr_close)(gdr_t g);
+static int (*sym_gdr_pin_buffer)(gdr_t g, unsigned long addr, size_t size, uint64_t p2p_token, uint32_t va_space, gdr_mh_t *handle);
+static int (*sym_gdr_unpin_buffer)(gdr_t g, gdr_mh_t handle);
+static int (*sym_gdr_map)(gdr_t g, gdr_mh_t handle, void **va, size_t size);
+static int (*sym_gdr_unmap)(gdr_t g, gdr_mh_t handle, void *va, size_t size);
+
+int
+gdrcopy_loader(void)
+{
+ char gdrcopy_path[1024];
+
+ if (getenv("GDRCOPY_PATH_L") == NULL)
+ snprintf(gdrcopy_path, 1024, "%s", "libgdrapi.so");
+ else
+ snprintf(gdrcopy_path, 1024, "%s%s", getenv("GDRCOPY_PATH_L"), "libgdrapi.so");
+
+ gdrclib = dlopen(gdrcopy_path, RTLD_LAZY);
+ if (gdrclib == NULL) {
+ fprintf(stderr, "Failed to find GDRCopy library in %s (GDRCOPY_PATH_L=%s)\n",
+ gdrcopy_path, getenv("GDRCOPY_PATH_L"));
+ return -1;
+ }
+
+ sym_gdr_open = dlsym(gdrclib, "gdr_open");
+ if (sym_gdr_open == NULL) {
+ fprintf(stderr, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_close = dlsym(gdrclib, "gdr_close");
+ if (sym_gdr_close == NULL) {
+ fprintf(stderr, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_pin_buffer = dlsym(gdrclib, "gdr_pin_buffer");
+ if (sym_gdr_pin_buffer == NULL) {
+ fprintf(stderr, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_unpin_buffer = dlsym(gdrclib, "gdr_unpin_buffer");
+ if (sym_gdr_unpin_buffer == NULL) {
+ fprintf(stderr, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_map = dlsym(gdrclib, "gdr_map");
+ if (sym_gdr_map == NULL) {
+ fprintf(stderr, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_unmap = dlsym(gdrclib, "gdr_unmap");
+ if (sym_gdr_unmap == NULL) {
+ fprintf(stderr, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+gdrcopy_open(gdr_t *g)
+{
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ gdr_t g_;
+
+ g_ = sym_gdr_open();
+ if (!g_)
+ return -1;
+
+ *g = g_;
+#else
+ *g = NULL;
+#endif
+ return 0;
+}
+
+int
+gdrcopy_close(__rte_unused gdr_t *g)
+{
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ sym_gdr_close(*g);
+#endif
+ return 0;
+}
+
+int
+gdrcopy_pin(gdr_t g, __rte_unused gdr_mh_t *mh, uint64_t d_addr, size_t size, void **h_addr)
+{
+ if (g == NULL)
+ return -ENOTSUP;
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ /* Pin the device buffer */
+ if (sym_gdr_pin_buffer(g, d_addr, size, 0, 0, mh) != 0) {
+ fprintf(stderr, "sym_gdr_pin_buffer\n");
+ return -1;
+ }
+
+ /* Map the buffer to user space */
+ if (sym_gdr_map(g, *mh, h_addr, size) != 0) {
+ fprintf(stderr, "sym_gdr_map\n");
+ sym_gdr_unpin_buffer(g, *mh);
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+int
+gdrcopy_unpin(gdr_t g, __rte_unused gdr_mh_t mh, void *d_addr, size_t size)
+{
+ if (g == NULL)
+ return -ENOTSUP;
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ /* Unmap the buffer from user space */
+ if (sym_gdr_unmap(g, mh, d_addr, size) != 0)
+ fprintf(stderr, "sym_gdr_unmap\n");
+
+ /* Pin the device buffer */
+ if (sym_gdr_unpin_buffer(g, mh) != 0) {
+ fprintf(stderr, "sym_gdr_pin_buffer\n");
+ return -11;
+ }
+#endif
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef _CUDA_GDRCOPY_H_
+#define _CUDA_GDRCOPY_H_
+
+#include <dlfcn.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_errno.h>
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ #include <gdrapi.h>
+#else
+ struct gdr;
+ typedef struct gdr *gdr_t;
+ struct gdr_mh_s;
+ typedef struct gdr_mh_s gdr_mh_t;
+#endif
+
+int gdrcopy_loader(void);
+int gdrcopy_open(gdr_t *g);
+int gdrcopy_close(gdr_t *g);
+int gdrcopy_pin(gdr_t g, gdr_mh_t *mh, uint64_t d_addr, size_t size, void **h_addr);
+int gdrcopy_unpin(gdr_t g, gdr_mh_t mh, void *d_addr, size_t size);
+
+#endif
@@ -17,5 +17,9 @@ if not cc.has_header('cudaTypedefs.h')
subdir_done()
endif
+if cc.has_header('gdrapi.h')
+ dpdk_conf.set('DRIVERS_GPU_CUDA_GDRCOPY_H', 1)
+endif
+
deps += ['gpudev', 'pci', 'bus_pci']
-sources = files('cuda.c')
+sources = files('cuda.c', 'gdrcopy.c')