@@ -4260,12 +4260,12 @@ acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
!strcmp(drv->driver.name, RTE_STR(ACC100PF_DRIVER_NAME));
((struct acc_device *) dev->data->dev_private)->mmio_base =
- pci_dev->mem_resource[0].addr;
+ pci_dev->pci_mem[0].mem_res.addr;
rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
drv->driver.name, dev->data->name,
- (void *)pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[0].phys_addr);
+ (void *)pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[0].mem_res.phys_addr);
}
static int acc100_pci_probe(struct rte_pci_driver *pci_drv,
@@ -3349,7 +3349,7 @@ vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
dev->dequeue_fft_ops = vrb_dequeue_fft;
d->pf_device = !strcmp(drv->driver.name, RTE_STR(VRB_PF_DRIVER_NAME));
- d->mmio_base = pci_dev->mem_resource[0].addr;
+ d->mmio_base = pci_dev->pci_mem[0].mem_res.addr;
/* Device variant specific handling. */
if ((pci_dev->id.device_id == RTE_VRB1_PF_DEVICE_ID) ||
@@ -3367,8 +3367,8 @@ vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
rte_bbdev_log_debug("Init device %s [%s] @ vaddr %p paddr %#"PRIx64"",
drv->driver.name, dev->data->name,
- (void *)pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[0].phys_addr);
+ (void *)pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[0].mem_res.phys_addr);
}
static int vrb_pci_probe(struct rte_pci_driver *pci_drv,
@@ -2149,13 +2149,13 @@ fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
!strcmp(drv->driver.name,
RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
- pci_dev->mem_resource[0].addr;
+ pci_dev->pci_mem[0].mem_res.addr;
rte_bbdev_log_debug(
"Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
drv->driver.name, dev->data->name,
- (void *)pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[0].phys_addr);
+ (void *)pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[0].mem_res.phys_addr);
}
static int
@@ -2326,13 +2326,13 @@ fpga_lte_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
!strcmp(drv->driver.name,
RTE_STR(FPGA_LTE_FEC_PF_DRIVER_NAME));
((struct fpga_lte_fec_device *) dev->data->dev_private)->mmio_base =
- pci_dev->mem_resource[0].addr;
+ pci_dev->pci_mem[0].mem_res.addr;
rte_bbdev_log_debug(
"Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
drv->driver.name, dev->data->name,
- (void *)pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[0].phys_addr);
+ (void *)pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[0].mem_res.phys_addr);
}
static int
@@ -186,17 +186,17 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
/* if matching map is found, then use it */
offset = res_idx * pagesz;
mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
- (size_t)dev->mem_resource[res_idx].len, 0);
+ (size_t)dev->pci_mem[res_idx].mem_res.len, 0);
close(fd);
if (mapaddr == NULL)
goto error;
- maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
- maps[map_idx].size = dev->mem_resource[res_idx].len;
+ maps[map_idx].phaddr = dev->pci_mem[res_idx].mem_res.phys_addr;
+ maps[map_idx].size = dev->pci_mem[res_idx].mem_res.len;
maps[map_idx].addr = mapaddr;
maps[map_idx].offset = offset;
strcpy(maps[map_idx].path, devname);
- dev->mem_resource[res_idx].addr = mapaddr;
+ dev->pci_mem[res_idx].mem_res.addr = mapaddr;
return 0;
@@ -493,10 +493,10 @@ int rte_pci_write_config(const struct rte_pci_device *dev,
int rte_pci_mmio_read(const struct rte_pci_device *dev, int bar,
void *buf, size_t len, off_t offset)
{
- if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
- (uint64_t)offset + len > dev->mem_resource[bar].len)
+ if (bar >= PCI_MAX_RESOURCE || dev->pci_mem[bar].mem_res.addr == NULL ||
+ (uint64_t)offset + len > dev->pci_mem[bar].mem_res.len)
return -1;
- memcpy(buf, (uint8_t *)dev->mem_resource[bar].addr + offset, len);
+ memcpy(buf, (uint8_t *)dev->pci_mem[bar].mem_res.addr + offset, len);
return len;
}
@@ -504,10 +504,10 @@ int rte_pci_mmio_read(const struct rte_pci_device *dev, int bar,
int rte_pci_mmio_write(const struct rte_pci_device *dev, int bar,
const void *buf, size_t len, off_t offset)
{
- if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
- (uint64_t)offset + len > dev->mem_resource[bar].len)
+ if (bar >= PCI_MAX_RESOURCE || dev->pci_mem[bar].mem_res.addr == NULL ||
+ (uint64_t)offset + len > dev->pci_mem[bar].mem_res.len)
return -1;
- memcpy((uint8_t *)dev->mem_resource[bar].addr + offset, buf, len);
+ memcpy((uint8_t *)dev->pci_mem[bar].mem_res.addr + offset, buf, len);
return len;
}
@@ -28,6 +28,27 @@ enum rte_pci_kernel_driver {
RTE_PCI_KDRV_NET_UIO, /* NetUIO for Windows */
};
+struct rte_mem_map_area {
+ void *addr;
+ uint64_t offset;
+ uint64_t size;
+};
+
+struct rte_sparse_mem_map {
+ uint64_t phys_addr;
+ uint64_t len;
+ uint32_t nr_maps;
+ struct rte_mem_map_area *areas;
+};
+
+struct rte_pci_mem_resource {
+ bool is_sparse;
+ union {
+ struct rte_mem_resource mem_res;
+ struct rte_sparse_mem_map sparse_mem;
+ };
+};
+
/**
* A structure describing a PCI device.
*/
@@ -36,8 +57,7 @@ struct rte_pci_device {
struct rte_device device; /**< Inherit core device */
struct rte_pci_addr addr; /**< PCI location. */
struct rte_pci_id id; /**< PCI ID. */
- struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
- /**< PCI Memory Resource */
+ struct rte_pci_mem_resource pci_mem[PCI_MAX_RESOURCE]; /**< PCI Memory Resource */
struct rte_intr_handle *intr_handle; /**< Interrupt handle */
struct rte_pci_driver *driver; /**< PCI driver used in probing */
uint16_t max_vfs; /**< sriov enable if not zero */
@@ -179,7 +179,7 @@ pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
return -1;
}
- for (i = 0; i<PCI_MAX_RESOURCE; i++) {
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
if (fgets(buf, sizeof(buf), f) == NULL) {
RTE_LOG(ERR, EAL,
@@ -191,10 +191,10 @@ pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
goto error;
if (flags & IORESOURCE_MEM) {
- dev->mem_resource[i].phys_addr = phys_addr;
- dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ dev->pci_mem[i].mem_res.phys_addr = phys_addr;
+ dev->pci_mem[i].mem_res.len = end_addr - phys_addr + 1;
/* not mapped for now */
- dev->mem_resource[i].addr = NULL;
+ dev->pci_mem[i].mem_res.addr = NULL;
}
}
fclose(f);
@@ -347,9 +347,8 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
dev2->max_vfs = dev->max_vfs;
dev2->id = dev->id;
pci_common_set(dev2);
- memmove(dev2->mem_resource,
- dev->mem_resource,
- sizeof(dev->mem_resource));
+ memmove(dev2->pci_mem, dev->pci_mem,
+ sizeof(dev->pci_mem));
} else {
/**
* If device is plugged and driver is
@@ -59,10 +59,10 @@ int
pci_uio_mmio_read(const struct rte_pci_device *dev, int bar,
void *buf, size_t len, off_t offset)
{
- if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
- (uint64_t)offset + len > dev->mem_resource[bar].len)
+ if (bar >= PCI_MAX_RESOURCE || dev->pci_mem[bar].mem_res.addr == NULL ||
+ (uint64_t)offset + len > dev->pci_mem[bar].mem_res.len)
return -1;
- memcpy(buf, (uint8_t *)dev->mem_resource[bar].addr + offset, len);
+ memcpy(buf, (uint8_t *)dev->pci_mem[bar].mem_res.addr + offset, len);
return len;
}
@@ -70,10 +70,10 @@ int
pci_uio_mmio_write(const struct rte_pci_device *dev, int bar,
const void *buf, size_t len, off_t offset)
{
- if (bar >= PCI_MAX_RESOURCE || dev->mem_resource[bar].addr == NULL ||
- (uint64_t)offset + len > dev->mem_resource[bar].len)
+ if (bar >= PCI_MAX_RESOURCE || dev->pci_mem[bar].mem_res.addr == NULL ||
+ (uint64_t)offset + len > dev->pci_mem[bar].mem_res.len)
return -1;
- memcpy((uint8_t *)dev->mem_resource[bar].addr + offset, buf, len);
+ memcpy((uint8_t *)dev->pci_mem[bar].mem_res.addr + offset, buf, len);
return len;
}
@@ -388,22 +388,22 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
pci_map_addr = pci_find_max_end_va();
mapaddr = pci_map_resource(pci_map_addr, fd, 0,
- (size_t)dev->mem_resource[res_idx].len, 0);
+ (size_t)dev->pci_mem[res_idx].mem_res.len, 0);
close(fd);
if (mapaddr == NULL)
goto error;
pci_map_addr = RTE_PTR_ADD(mapaddr,
- (size_t)dev->mem_resource[res_idx].len);
+ (size_t)dev->pci_mem[res_idx].mem_res.len);
pci_map_addr = RTE_PTR_ALIGN(pci_map_addr, sysconf(_SC_PAGE_SIZE));
- maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
- maps[map_idx].size = dev->mem_resource[res_idx].len;
+ maps[map_idx].phaddr = dev->pci_mem[res_idx].mem_res.phys_addr;
+ maps[map_idx].size = dev->pci_mem[res_idx].mem_res.len;
maps[map_idx].addr = mapaddr;
maps[map_idx].offset = 0;
strcpy(maps[map_idx].path, devname);
- dev->mem_resource[res_idx].addr = mapaddr;
+ dev->pci_mem[res_idx].mem_res.addr = mapaddr;
return 0;
@@ -463,7 +463,7 @@ pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
RTE_LOG(DEBUG, EAL, "%s(): PIO BAR %08lx detected\n", __func__, base);
} else if (flags & IORESOURCE_MEM) {
- base = (unsigned long)dev->mem_resource[bar].addr;
+ base = (unsigned long)dev->pci_mem[bar].mem_res.addr;
RTE_LOG(DEBUG, EAL, "%s(): MMIO BAR %08lx detected\n", __func__, base);
} else {
RTE_LOG(ERR, EAL, "%s(): unknown BAR type\n", __func__);
@@ -673,6 +673,82 @@ pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
return 0;
}
+static int
+pci_vfio_sparse_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
+ struct vfio_region_sparse_mmap_area *vfio_areas,
+ uint32_t nr_areas, int bar_index, int additional_flags,
+ int numa_node)
+{
+ struct pci_map *map = &vfio_res->maps[bar_index];
+ struct rte_mem_map_area *area;
+ struct vfio_region_sparse_mmap_area *sparse;
+ void *bar_addr;
+ uint32_t i, j;
+
+ map->nr_areas = nr_areas;
+
+ if (map->size == 0) {
+ RTE_LOG(DEBUG, EAL, "Bar size is 0, skip BAR%d\n", bar_index);
+ return 0;
+ }
+
+ if (!map->nr_areas) {
+ RTE_LOG(DEBUG, EAL, "Skip bar %d with no sparse mmap areas\n",
+ bar_index);
+ map->areas = NULL;
+ return 0;
+ }
+
+ if (map->areas == NULL) {
+ map->areas = rte_zmalloc_socket(NULL,
+ sizeof(*map->areas) * nr_areas,
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (map->areas == NULL) {
+ RTE_LOG(ERR, EAL,
+ "Cannot alloc memory for sparse map areas\n");
+ return -1;
+ }
+ }
+
+ for (i = 0; i < map->nr_areas; i++) {
+ area = &map->areas[i];
+ sparse = &vfio_areas[i];
+
+ bar_addr = mmap(map->addr, sparse->size, 0, MAP_PRIVATE |
+ MAP_ANONYMOUS | additional_flags, -1, 0);
+ if (bar_addr != MAP_FAILED) {
+ area->addr = pci_map_resource(bar_addr, vfio_dev_fd,
+ map->offset + sparse->offset, sparse->size,
+ RTE_MAP_FORCE_ADDRESS);
+ if (area->addr == NULL) {
+ munmap(bar_addr, sparse->size);
+ RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n",
+ bar_index);
+ goto err_map;
+ }
+
+ area->offset = sparse->offset;
+ area->size = sparse->size;
+ } else {
+ RTE_LOG(ERR, EAL, "Failed to create inaccessible mapping for BAR%d\n",
+ bar_index);
+ goto err_map;
+ }
+ }
+
+ return 0;
+
+err_map:
+ for (j = 0; j < i; j++) {
+ pci_unmap_resource(map->areas[j].addr, map->areas[j].size);
+ map->areas[j].offset = 0;
+ map->areas[j].size = 0;
+ }
+ rte_free(map->areas);
+ map->nr_areas = 0;
+ return -1;
+}
+
/*
* region info may contain capability headers, so we need to keep reallocating
* the memory until we match allocated memory size with argsz.
@@ -789,6 +865,31 @@ pci_vfio_fill_regions(struct rte_pci_device *dev, int vfio_dev_fd,
return 0;
}
+static void
+clean_up_pci_resource(struct mapped_pci_resource *vfio_res)
+{
+ struct pci_map *map;
+ uint32_t i, j;
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ map = &vfio_res->maps[i];
+ if (map->nr_areas > 1) {
+ for (j = 0; j < map->nr_areas; j++)
+ pci_unmap_resource(map->areas[j].addr,
+ map->areas[j].size);
+ } else {
+ /*
+ * We do not need to be aware of MSI-X BAR mappings.
+ * Using current maps array is enough.
+ */
+ if (map->addr)
+ pci_unmap_resource(map->addr, map->size);
+ }
+ }
+
+ rte_free(map->areas);
+}
+
static int
pci_vfio_map_resource_primary(struct rte_pci_device *dev)
{
@@ -875,6 +976,8 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
for (i = 0; i < vfio_res->nb_maps; i++) {
void *bar_addr;
+ struct vfio_info_cap_header *hdr;
+ struct vfio_region_info_cap_sparse_mmap *sparse;
ret = pci_vfio_get_region_info(vfio_dev_fd, ®, i);
if (ret < 0) {
@@ -920,15 +1023,39 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
maps[i].size = reg->size;
maps[i].path = NULL; /* vfio doesn't have per-resource paths */
- ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n",
- pci_addr, i, strerror(errno));
- free(reg);
- goto err_vfio_res;
- }
+ hdr = pci_vfio_info_cap(reg, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
+
+ if (hdr != NULL) {
+ sparse = container_of(hdr,
+ struct vfio_region_info_cap_sparse_mmap,
+ header);
+
+ ret = pci_vfio_sparse_mmap_bar(vfio_dev_fd, vfio_res,
+ sparse->areas, sparse->nr_areas, i, 0,
+ dev->device.numa_node);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "%s sparse mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ free(reg);
+ goto err_vfio_res;
+ }
- dev->mem_resource[i].addr = maps[i].addr;
+ dev->pci_mem[i].is_sparse = true;
+ dev->pci_mem[i].sparse_mem.len = maps[i].size;
+ dev->pci_mem[i].sparse_mem.nr_maps = maps[i].nr_areas;
+ dev->pci_mem[i].sparse_mem.areas = maps[i].areas;
+ } else {
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ free(reg);
+ goto err_vfio_res;
+ }
+
+ dev->pci_mem[i].is_sparse = false;
+ dev->pci_mem[i].mem_res.addr = maps[i].addr;
+ }
free(reg);
}
@@ -949,6 +1076,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
return 0;
err_vfio_res:
+ clean_up_pci_resource(vfio_res);
rte_free(vfio_res);
err_vfio_dev_fd:
rte_vfio_release_device(rte_pci_get_sysfs_path(),
@@ -968,7 +1096,7 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
struct mapped_pci_res_list *vfio_res_list =
RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
- struct pci_map *maps;
+ struct pci_map *maps, *cur;
if (rte_intr_fd_set(dev->intr_handle, -1))
return -1;
@@ -1008,14 +1136,50 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
maps = vfio_res->maps;
for (i = 0; i < vfio_res->nb_maps; i++) {
- ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n",
- pci_addr, i, strerror(errno));
- goto err_vfio_dev_fd;
- }
+ cur = &maps[i];
+ if (cur->nr_areas > 1) {
+ struct vfio_region_sparse_mmap_area *areas;
+ uint32_t i;
+
+ areas = malloc(sizeof(*areas) * cur->nr_areas);
+ if (areas == NULL) {
+ RTE_LOG(ERR, EAL, "Failed to alloc vfio areas for %s\n",
+ pci_addr);
+ goto err_vfio_dev_fd;
+ }
+
+ for (i = 0; i < cur->nr_areas; i++) {
+ areas[i].offset = cur->areas[i].offset;
+ areas[i].size = cur->areas[i].size;
+ }
+
+ ret = pci_vfio_sparse_mmap_bar(vfio_dev_fd, vfio_res,
+ areas, cur->nr_areas, i, MAP_FIXED,
+ dev->device.numa_node);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "%s sparse mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ free(areas);
+ goto err_vfio_dev_fd;
+ }
- dev->mem_resource[i].addr = maps[i].addr;
+ dev->pci_mem[i].is_sparse = true;
+ dev->pci_mem[i].sparse_mem.len = cur->size;
+ dev->pci_mem[i].sparse_mem.nr_maps = cur->nr_areas;
+ dev->pci_mem[i].sparse_mem.areas = cur->areas;
+ free(areas);
+ } else {
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res,
+ i, MAP_FIXED);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "%s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ goto err_vfio_dev_fd;
+ }
+
+ dev->pci_mem[i].is_sparse = false;
+ dev->pci_mem[i].mem_res.addr = cur->addr;
+ }
}
/* we need save vfio_dev_fd, so it can be used during release */
@@ -1052,8 +1216,6 @@ find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
const char *pci_addr)
{
struct mapped_pci_resource *vfio_res = NULL;
- struct pci_map *maps;
- int i;
/* Get vfio_res */
TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
@@ -1062,25 +1224,13 @@ find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
break;
}
- if (vfio_res == NULL)
+ if (vfio_res == NULL)
return vfio_res;
RTE_LOG(INFO, EAL, "Releasing PCI mapped resource for %s\n",
pci_addr);
- maps = vfio_res->maps;
- for (i = 0; i < vfio_res->nb_maps; i++) {
-
- /*
- * We do not need to be aware of MSI-X table BAR mappings as
- * when mapping. Just using current maps array is enough
- */
- if (maps[i].addr) {
- RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
- pci_addr, maps[i].addr);
- pci_unmap_resource(maps[i].addr, maps[i].size);
- }
- }
+ clean_up_pci_resource(vfio_res);
return vfio_res;
}
@@ -482,11 +482,10 @@ pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
dev->id.device_id);
- for (i = 0; i != sizeof(dev->mem_resource) /
- sizeof(dev->mem_resource[0]); i++) {
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
- dev->mem_resource[i].phys_addr,
- dev->mem_resource[i].len);
+ dev->pci_mem[i].mem_res.phys_addr,
+ dev->pci_mem[i].mem_res.len);
}
return 0;
}
@@ -582,20 +581,38 @@ pci_find_device_by_addr(const void *failure_addr)
{
struct rte_pci_device *pdev = NULL;
uint64_t check_point, start, end, len;
- int i;
+ struct rte_pci_mem_resource *pci_mem;
+ struct rte_mem_map_area *ar;
+ uint32_t i, j;
check_point = (uint64_t)(uintptr_t)failure_addr;
FOREACH_DEVICE_ON_PCIBUS(pdev) {
- for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) {
- start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr;
- len = pdev->mem_resource[i].len;
- end = start + len;
- if (check_point >= start && check_point < end) {
- RTE_LOG(DEBUG, EAL, "Failure address %16.16"
- PRIx64" belongs to device %s!\n",
- check_point, pdev->device.name);
- return pdev;
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ pci_mem = &pdev->pci_mem[i];
+ if (pci_mem->is_sparse) {
+ for (j = 0; j != pci_mem->sparse_mem.nr_maps; j++) {
+ ar = &pci_mem->sparse_mem.areas[j];
+ start = (uint64_t)(uintptr_t)ar->addr;
+ len = ar->size;
+ end = start + len;
+ if (check_point >= start && check_point < end) {
+ RTE_LOG(DEBUG, EAL, "Failure address %16.16"
+ PRIx64" belongs to device %s!\n",
+ check_point, pdev->device.name);
+ return pdev;
+ }
+ }
+ } else {
+ start = (uint64_t)(uintptr_t)pci_mem->mem_res.addr;
+ len = pci_mem->mem_res.len;
+ end = start + len;
+ if (check_point >= start && check_point < end) {
+ RTE_LOG(DEBUG, EAL, "Failure address %16.16"
+ PRIx64" belongs to device %s!\n",
+ check_point, pdev->device.name);
+ return pdev;
+ }
}
}
}
@@ -71,7 +71,8 @@ pci_uio_map_secondary(struct rte_pci_device *dev)
}
return -1;
}
- dev->mem_resource[i].addr = mapaddr;
+ dev->pci_mem[i].is_sparse = false;
+ dev->pci_mem[i].mem_res.addr = mapaddr;
}
return 0;
}
@@ -108,7 +109,8 @@ pci_uio_map_resource(struct rte_pci_device *dev)
/* Map all BARs */
for (i = 0; i != PCI_MAX_RESOURCE; i++) {
/* skip empty BAR */
- phaddr = dev->mem_resource[i].phys_addr;
+ dev->pci_mem[i].is_sparse = false;
+ phaddr = dev->pci_mem[i].mem_res.phys_addr;
if (phaddr == 0)
continue;
@@ -164,10 +166,10 @@ pci_uio_remap_resource(struct rte_pci_device *dev)
/* Remap all BARs */
for (i = 0; i != PCI_MAX_RESOURCE; i++) {
/* skip empty BAR */
- if (dev->mem_resource[i].phys_addr == 0)
+ if (dev->pci_mem[i].mem_res.phys_addr == 0)
continue;
- map_address = mmap(dev->mem_resource[i].addr,
- (size_t)dev->mem_resource[i].len,
+ map_address = mmap(dev->pci_mem[i].mem_res.addr,
+ (size_t)dev->pci_mem[i].mem_res.len,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (map_address == MAP_FAILED) {
@@ -121,6 +121,8 @@ struct pci_map {
uint64_t offset;
uint64_t size;
uint64_t phaddr;
+ uint32_t nr_areas;
+ struct rte_mem_map_area *areas;
};
struct pci_msix_table {
@@ -1151,8 +1151,8 @@ dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
if (!dev_cache_line_size_valid())
return -EFAULT;
- bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
- bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
+ bar2 = (uintptr_t)pci_dev->pci_mem[2].mem_res.addr;
+ bar4 = (uintptr_t)pci_dev->pci_mem[4].mem_res.addr;
if (bar2 == 0 || bar4 == 0) {
plt_err("Failed to get PCI bars");
rc = -ENODEV;
@@ -152,7 +152,7 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)
struct plt_pci_device *pci_dev = roc_dpi->pci_dev;
uint16_t vfid;
- roc_dpi->rbase = pci_dev->mem_resource[0].addr;
+ roc_dpi->rbase = pci_dev->pci_mem[0].mem_res.addr;
vfid = ((pci_dev->addr.devid & 0x1F) << 3) |
(pci_dev->addr.function & 0x7);
vfid -= 1;
@@ -100,9 +100,9 @@ roc_ml_addr_pa_to_offset(struct roc_ml *roc_ml, uint64_t phys_addr)
struct ml *ml = roc_ml_to_ml_priv(roc_ml);
if (roc_model_is_cn10ka())
- return phys_addr - ml->pci_dev->mem_resource[0].phys_addr;
+ return phys_addr - ml->pci_dev->pci_mem[0].mem_res.phys_addr;
else
- return phys_addr - ml->pci_dev->mem_resource[0].phys_addr - ML_MLAB_BLK_OFFSET;
+ return phys_addr - ml->pci_dev->pci_mem[0].mem_res.phys_addr - ML_MLAB_BLK_OFFSET;
}
uint64_t
@@ -111,9 +111,9 @@ roc_ml_addr_offset_to_pa(struct roc_ml *roc_ml, uint64_t offset)
struct ml *ml = roc_ml_to_ml_priv(roc_ml);
if (roc_model_is_cn10ka())
- return ml->pci_dev->mem_resource[0].phys_addr + offset;
+ return ml->pci_dev->pci_mem[0].mem_res.phys_addr + offset;
else
- return ml->pci_dev->mem_resource[0].phys_addr + ML_MLAB_BLK_OFFSET + offset;
+ return ml->pci_dev->pci_mem[0].mem_res.phys_addr + ML_MLAB_BLK_OFFSET + offset;
}
void
@@ -543,13 +543,14 @@ roc_ml_dev_init(struct roc_ml *roc_ml)
ml->pci_dev = pci_dev;
dev->roc_ml = roc_ml;
- ml->ml_reg_addr = ml->pci_dev->mem_resource[0].addr;
+ ml->ml_reg_addr = ml->pci_dev->pci_mem[0].mem_res.addr;
ml->ml_mlr_base = 0;
ml->ml_mlr_base_saved = false;
- plt_ml_dbg("ML: PCI Physical Address : 0x%016lx", ml->pci_dev->mem_resource[0].phys_addr);
+ plt_ml_dbg("ML: PCI Physical Address : 0x%016lx",
+ ml->pci_dev->pci_mem[0].mem_res.phys_addr);
plt_ml_dbg("ML: PCI Virtual Address : 0x%016lx",
- PLT_U64_CAST(ml->pci_dev->mem_resource[0].addr));
+ PLT_U64_CAST(ml->pci_dev->pci_mem[0].mem_res.addr));
plt_spinlock_init(&roc_ml->sp_spinlock);
plt_spinlock_init(&roc_ml->fp_spinlock);
@@ -589,11 +590,12 @@ roc_ml_blk_init(struct roc_bphy *roc_bphy, struct roc_ml *roc_ml)
plt_ml_dbg(
"MLAB: Physical Address : 0x%016lx",
- PLT_PTR_ADD_U64_CAST(ml->pci_dev->mem_resource[0].phys_addr, ML_MLAB_BLK_OFFSET));
+ PLT_PTR_ADD_U64_CAST(ml->pci_dev->pci_mem[0].mem_res.phys_addr,
+ ML_MLAB_BLK_OFFSET));
plt_ml_dbg("MLAB: Virtual Address : 0x%016lx",
- PLT_PTR_ADD_U64_CAST(ml->pci_dev->mem_resource[0].addr, ML_MLAB_BLK_OFFSET));
+ PLT_PTR_ADD_U64_CAST(ml->pci_dev->pci_mem[0].mem_res.addr, ML_MLAB_BLK_OFFSET));
- ml->ml_reg_addr = PLT_PTR_ADD(ml->pci_dev->mem_resource[0].addr, ML_MLAB_BLK_OFFSET);
+ ml->ml_reg_addr = PLT_PTR_ADD(ml->pci_dev->pci_mem[0].mem_res.addr, ML_MLAB_BLK_OFFSET);
ml->ml_mlr_base = 0;
ml->ml_mlr_base_saved = false;
@@ -214,7 +214,7 @@ qat_reset_ring_pairs_gen1(struct qat_pci_device *qat_pci_dev __rte_unused)
const struct rte_mem_resource *
qat_dev_get_transport_bar_gen1(struct rte_pci_device *pci_dev)
{
- return &pci_dev->mem_resource[0];
+ return &pci_dev->pci_mem[0].mem_res;
}
int
@@ -271,14 +271,14 @@ qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
static const struct rte_mem_resource *
qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev)
{
- return &pci_dev->mem_resource[0];
+ return &pci_dev->pci_mem[0].mem_res;
}
static int
qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource,
struct rte_pci_device *pci_dev)
{
- *mem_resource = &pci_dev->mem_resource[2];
+ *mem_resource = &pci_dev->pci_mem[2].mem_res;
return 0;
}
@@ -77,7 +77,7 @@ sfc_efx_find_mem_bar(efsys_pci_config_t *configp, int bar_index,
result.esb_rid = bar_index;
result.esb_dev = dev;
- result.esb_base = dev->mem_resource[bar_index].addr;
+ result.esb_base = dev->pci_mem[bar_index].mem_res.addr;
*barp = result;
@@ -149,10 +149,10 @@ zipvf_create(struct rte_compressdev *compressdev)
void *vbar0;
uint64_t reg;
- if (pdev->mem_resource[0].phys_addr == 0ULL)
+ if (pdev->pci_mem[0].mem_res.phys_addr == 0ULL)
return -EIO;
- vbar0 = pdev->mem_resource[0].addr;
+ vbar0 = pdev->pci_mem[0].mem_res.addr;
if (!vbar0) {
ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name);
return -ENODEV;
@@ -67,7 +67,7 @@ ccp_read_hwrng(uint32_t *value)
struct ccp_device *dev;
TAILQ_FOREACH(dev, &ccp_list, next) {
- void *vaddr = (void *)(dev->pci->mem_resource[2].addr);
+ void *vaddr = (void *)(dev->pci->pci_mem[2].mem_res.addr);
while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
@@ -493,7 +493,7 @@ ccp_add_device(struct ccp_device *dev)
dev->id = ccp_dev_id++;
dev->qidx = 0;
- vaddr = (void *)(dev->pci->mem_resource[2].addr);
+ vaddr = (void *)(dev->pci->pci_mem[2].mem_res.addr);
if (dev->pci->id.device_id == AMD_PCI_CCP_5B) {
CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
@@ -400,7 +400,7 @@ cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
pci_dev = RTE_DEV_TO_PCI(dev->device);
- if (pci_dev->mem_resource[2].addr == NULL) {
+ if (pci_dev->pci_mem[2].mem_res.addr == NULL) {
plt_err("Invalid PCI mem address");
return -EIO;
}
@@ -35,7 +35,7 @@ ndev_init(struct nitrox_device *ndev, struct rte_pci_device *pdev)
enum nitrox_vf_mode vf_mode;
ndev->pdev = pdev;
- ndev->bar_addr = pdev->mem_resource[0].addr;
+ ndev->bar_addr = pdev->pci_mem[0].mem_res.addr;
vf_mode = vf_get_vf_config_mode(ndev->bar_addr);
ndev->nr_queues = vf_config_mode_to_nr_queues(vf_mode);
}
@@ -70,7 +70,7 @@ nitrox_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
int err;
/* Nitrox CSR space */
- if (!pdev->mem_resource[0].addr)
+ if (!pdev->pci_mem[0].mem_res.addr)
return -EINVAL;
ndev = ndev_allocate(pdev);
@@ -157,7 +157,7 @@ otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
pci_dev = RTE_DEV_TO_PCI(dev->device);
- if (pci_dev->mem_resource[0].addr == NULL) {
+ if (pci_dev->pci_mem[0].mem_res.addr == NULL) {
CPT_LOG_ERR("PCI mem address null");
return -EIO;
}
@@ -1004,7 +1004,7 @@ otx_cpt_dev_create(struct rte_cryptodev *c_dev)
char dev_name[32];
int ret;
- if (pdev->mem_resource[0].phys_addr == 0ULL)
+ if (pdev->pci_mem[0].mem_res.phys_addr == 0ULL)
return -EIO;
/* for secondary processes, we don't initialise any further as primary
@@ -1025,7 +1025,7 @@ otx_cpt_dev_create(struct rte_cryptodev *c_dev)
snprintf(dev_name, 32, "%02x:%02x.%x",
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
- reg_base = pdev->mem_resource[0].addr;
+ reg_base = pdev->pci_mem[0].mem_res.addr;
if (!reg_base) {
CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
ret = -ENODEV;
@@ -322,14 +322,14 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
return NULL;
}
- if (offset + length > dev->mem_resource[bar].len) {
+ if (offset + length > dev->pci_mem[bar].mem_res.len) {
VIRTIO_CRYPTO_INIT_LOG_ERR(
"invalid cap: overflows bar space: %u > %" PRIu64,
- offset + length, dev->mem_resource[bar].len);
+ offset + length, dev->pci_mem[bar].mem_res.len);
return NULL;
}
- base = dev->mem_resource[bar].addr;
+ base = dev->pci_mem[bar].mem_res.addr;
if (base == NULL) {
VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
return NULL;
@@ -637,7 +637,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct roc_dpi *rdpi = NULL;
int rc;
- if (!pci_dev->mem_resource[0].addr)
+ if (!pci_dev->pci_mem[0].mem_res.addr)
return -ENODEV;
rc = roc_plt_init();
@@ -894,7 +894,7 @@ hisi_dma_create(struct rte_pci_device *pci_dev, uint8_t queue_id,
hw->data = dev->data;
hw->revision = revision;
hw->reg_layout = hisi_dma_reg_layout(revision);
- hw->io_base = pci_dev->mem_resource[REG_PCI_BAR_INDEX].addr;
+ hw->io_base = pci_dev->pci_mem[REG_PCI_BAR_INDEX].mem_res.addr;
hw->queue_id = queue_id;
hw->sq_tail_reg = hisi_dma_queue_regaddr(hw,
HISI_DMA_QUEUE_SQ_TAIL_REG);
@@ -950,7 +950,7 @@ hisi_dma_probe(struct rte_pci_driver *pci_drv __rte_unused,
rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
- if (pci_dev->mem_resource[2].addr == NULL) {
+ if (pci_dev->pci_mem[2].mem_res.addr == NULL) {
HISI_DMA_LOG(ERR, "%s BAR2 is NULL!\n", name);
return -ENODEV;
}
@@ -961,7 +961,7 @@ hisi_dma_probe(struct rte_pci_driver *pci_drv __rte_unused,
HISI_DMA_LOG(DEBUG, "%s read PCI revision: 0x%x", name, revision);
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- hisi_dma_init_gbl(pci_dev->mem_resource[2].addr, revision);
+ hisi_dma_init_gbl(pci_dev->pci_mem[2].mem_res.addr, revision);
for (i = 0; i < HISI_DMA_MAX_HW_QUEUES; i++) {
ret = hisi_dma_create(pci_dev, i, revision);
@@ -188,12 +188,12 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
rte_spinlock_init(&pci->lk);
/* assign the bar registers, and then configure device */
- pci->regs = dev->mem_resource[0].addr;
+ pci->regs = dev->pci_mem[0].mem_res.addr;
grp_offset = (uint16_t)pci->regs->offsets[0];
pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
- pci->portals = dev->mem_resource[2].addr;
+ pci->portals = dev->pci_mem[2].mem_res.addr;
pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
/* sanity check device status */
@@ -644,7 +644,7 @@ ioat_dmadev_create(const char *name, struct rte_pci_device *dev)
ioat = dmadev->data->dev_private;
ioat->dmadev = dmadev;
- ioat->regs = dev->mem_resource[0].addr;
+ ioat->regs = dev->pci_mem[0].mem_res.addr;
ioat->doorbell = &ioat->regs->dmacount;
ioat->qcfg.nb_desc = 0;
ioat->desc_ring = NULL;
@@ -170,32 +170,32 @@ dlb2_probe(struct rte_pci_device *pdev, const void *probe_args)
*/
/* BAR 0 */
- if (pdev->mem_resource[0].addr == NULL) {
+ if (pdev->pci_mem[0].mem_res.addr == NULL) {
DLB2_ERR(dlb2_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
ret = -EINVAL;
goto pci_mmap_bad_addr;
}
- dlb2_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
- dlb2_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
+ dlb2_dev->hw.func_kva = (void *)(uintptr_t)pdev->pci_mem[0].mem_res.addr;
+ dlb2_dev->hw.func_phys_addr = pdev->pci_mem[0].mem_res.phys_addr;
DLB2_INFO(dlb2_dev, "DLB2 FUNC VA=%p, PA=%p, len=%p\n",
(void *)dlb2_dev->hw.func_kva,
(void *)dlb2_dev->hw.func_phys_addr,
- (void *)(pdev->mem_resource[0].len));
+ (void *)(pdev->pci_mem[0].mem_res.len));
/* BAR 2 */
- if (pdev->mem_resource[2].addr == NULL) {
+ if (pdev->pci_mem[2].mem_res.addr == NULL) {
DLB2_ERR(dlb2_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
ret = -EINVAL;
goto pci_mmap_bad_addr;
}
- dlb2_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
- dlb2_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
+ dlb2_dev->hw.csr_kva = (void *)(uintptr_t)pdev->pci_mem[2].mem_res.addr;
+ dlb2_dev->hw.csr_phys_addr = pdev->pci_mem[2].mem_res.phys_addr;
DLB2_INFO(dlb2_dev, "DLB2 CSR VA=%p, PA=%p, len=%p\n",
(void *)dlb2_dev->hw.csr_kva,
(void *)dlb2_dev->hw.csr_phys_addr,
- (void *)(pdev->mem_resource[2].len));
+ (void *)(pdev->pci_mem[2].mem_res.len));
dlb2_dev->pdev = pdev;
@@ -148,23 +148,23 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (pci_dev->mem_resource[0].addr == NULL ||
- pci_dev->mem_resource[2].addr == NULL ||
- pci_dev->mem_resource[4].addr == NULL) {
+ if (pci_dev->pci_mem[0].mem_res.addr == NULL ||
+ pci_dev->pci_mem[2].mem_res.addr == NULL ||
+ pci_dev->pci_mem[4].mem_res.addr == NULL) {
mbox_log_err("Empty bars %p %p %p",
- pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[2].addr,
- pci_dev->mem_resource[4].addr);
+ pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[2].mem_res.addr,
+ pci_dev->pci_mem[4].mem_res.addr);
return -ENODEV;
}
- if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
+ if (pci_dev->pci_mem[4].mem_res.len != SSOW_BAR4_LEN) {
mbox_log_err("Bar4 len mismatch %d != %d",
- SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
+ SSOW_BAR4_LEN, (int)pci_dev->pci_mem[4].mem_res.len);
return -EINVAL;
}
- id = pci_dev->mem_resource[4].addr;
+ id = pci_dev->pci_mem[4].mem_res.addr;
vfid = id->vfid;
if (vfid >= SSO_MAX_VHWS) {
mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
@@ -173,9 +173,9 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
res = &sdev.hws[vfid];
res->vfid = vfid;
- res->bar0 = pci_dev->mem_resource[0].addr;
- res->bar2 = pci_dev->mem_resource[2].addr;
- res->bar4 = pci_dev->mem_resource[4].addr;
+ res->bar0 = pci_dev->pci_mem[0].mem_res.addr;
+ res->bar2 = pci_dev->pci_mem[2].mem_res.addr;
+ res->bar4 = pci_dev->pci_mem[4].mem_res.addr;
res->domain = id->domain;
sdev.total_ssowvfs++;
@@ -229,14 +229,14 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (pci_dev->mem_resource[0].addr == NULL ||
- pci_dev->mem_resource[2].addr == NULL) {
+ if (pci_dev->pci_mem[0].mem_res.addr == NULL ||
+ pci_dev->pci_mem[2].mem_res.addr == NULL) {
mbox_log_err("Empty bars %p %p",
- pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[2].addr);
+ pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[2].mem_res.addr);
return -ENODEV;
}
- idreg = pci_dev->mem_resource[0].addr;
+ idreg = pci_dev->pci_mem[0].mem_res.addr;
idreg += SSO_VHGRP_AQ_THR;
val = rte_read64(idreg);
@@ -250,8 +250,8 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
res = &sdev.grp[vfid];
res->vfid = vfid;
- res->bar0 = pci_dev->mem_resource[0].addr;
- res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar0 = pci_dev->pci_mem[0].mem_res.addr;
+ res->bar2 = pci_dev->pci_mem[2].mem_res.addr;
res->domain = val & 0xffff;
sdev.total_ssovfs++;
@@ -112,15 +112,15 @@ timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (pci_dev->mem_resource[0].addr == NULL ||
- pci_dev->mem_resource[4].addr == NULL) {
+ if (pci_dev->pci_mem[0].mem_res.addr == NULL ||
+ pci_dev->pci_mem[4].mem_res.addr == NULL) {
timvf_log_err("Empty bars %p %p",
- pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[4].addr);
+ pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[4].mem_res.addr);
return -ENODEV;
}
- val = rte_read64((uint8_t *)pci_dev->mem_resource[0].addr +
+ val = rte_read64((uint8_t *)pci_dev->pci_mem[0].mem_res.addr +
0x100 /* TIM_VRINGX_BASE */);
vfid = (val >> 23) & 0xff;
if (vfid >= TIM_MAX_RINGS) {
@@ -130,16 +130,16 @@ timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
res = &tdev.rings[tdev.total_timvfs];
res->vfid = vfid;
- res->bar0 = pci_dev->mem_resource[0].addr;
- res->bar2 = pci_dev->mem_resource[2].addr;
- res->bar4 = pci_dev->mem_resource[4].addr;
+ res->bar0 = pci_dev->pci_mem[0].mem_res.addr;
+ res->bar2 = pci_dev->pci_mem[2].mem_res.addr;
+ res->bar4 = pci_dev->pci_mem[4].mem_res.addr;
res->domain = (val >> 7) & 0xffff;
res->in_use = false;
tdev.total_timvfs++;
rte_wmb();
timvf_log_dbg("Domain=%d VFid=%d bar0 %p total_timvfs=%d", res->domain,
- res->vfid, pci_dev->mem_resource[0].addr,
+ res->vfid, pci_dev->pci_mem[0].mem_res.addr,
tdev.total_timvfs);
return 0;
}
@@ -360,7 +360,7 @@ skeleton_eventdev_init(struct rte_eventdev *eventdev)
pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
- skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
+ skel->reg_base = (uintptr_t)pci_dev->pci_mem[0].mem_res.addr;
if (!skel->reg_base) {
PMD_DRV_ERR("Failed to map BAR0");
ret = -ENODEV;
@@ -785,11 +785,11 @@ fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (pci_dev->mem_resource[0].addr == NULL) {
- fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
+ if (pci_dev->pci_mem[0].mem_res.addr == NULL) {
+ fpavf_log_err("Empty bars %p ", pci_dev->pci_mem[0].mem_res.addr);
return -ENODEV;
}
- idreg = pci_dev->mem_resource[0].addr;
+ idreg = pci_dev->pci_mem[0].mem_res.addr;
octeontx_fpavf_setup();
@@ -329,8 +329,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
- ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
- ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
+ ark->bar0 = (uint8_t *)pci_dev->pci_mem[0].mem_res.addr;
+ ark->a_bar = (uint8_t *)pci_dev->pci_mem[2].mem_res.addr;
ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE];
ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE];
@@ -384,7 +384,7 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->mmio = (void *)pci_dev->mem_resource[0].addr;
+ hw->mmio = (void *)pci_dev->pci_mem[0].mem_res.addr;
/* Hardware configuration - hardcode */
adapter->hw_cfg.is_lro = false;
@@ -368,8 +368,8 @@ avp_dev_translate_address(struct rte_eth_dev *eth_dev,
void *addr;
unsigned int i;
- addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
- resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
+ addr = pci_dev->pci_mem[RTE_AVP_PCI_MEMORY_BAR].mem_res.addr;
+ resource = &pci_dev->pci_mem[RTE_AVP_PCI_MEMMAP_BAR].mem_res;
info = (struct rte_avp_memmap_info *)resource->addr;
offset = 0;
@@ -421,7 +421,7 @@ avp_dev_check_regions(struct rte_eth_dev *eth_dev)
/* Dump resource info for debug */
for (i = 0; i < PCI_MAX_RESOURCE; i++) {
- resource = &pci_dev->mem_resource[i];
+ resource = &pci_dev->pci_mem[i].mem_res;
if ((resource->phys_addr == 0) || (resource->len == 0))
continue;
@@ -554,7 +554,7 @@ _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
struct rte_avp_device_info *host_info;
void *addr;
- addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ addr = pci_dev->pci_mem[RTE_AVP_PCI_DEVICE_BAR].mem_res.addr;
host_info = (struct rte_avp_device_info *)addr;
/*
@@ -664,7 +664,7 @@ avp_dev_interrupt_handler(void *data)
{
struct rte_eth_dev *eth_dev = data;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ void *registers = pci_dev->pci_mem[RTE_AVP_PCI_MMIO_BAR].mem_res.addr;
uint32_t status, value;
int ret;
@@ -723,7 +723,7 @@ static int
avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ void *registers = pci_dev->pci_mem[RTE_AVP_PCI_MMIO_BAR].mem_res.addr;
int ret;
if (registers == NULL)
@@ -748,7 +748,7 @@ static int
avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ void *registers = pci_dev->pci_mem[RTE_AVP_PCI_MMIO_BAR].mem_res.addr;
int ret;
if (registers == NULL)
@@ -793,7 +793,7 @@ static int
avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ void *registers = pci_dev->pci_mem[RTE_AVP_PCI_MMIO_BAR].mem_res.addr;
uint32_t value;
if (registers == NULL)
@@ -824,7 +824,7 @@ avp_dev_create(struct rte_pci_device *pci_dev,
struct rte_mem_resource *resource;
unsigned int i;
- resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
+ resource = &pci_dev->pci_mem[RTE_AVP_PCI_DEVICE_BAR].mem_res;
if (resource->addr == NULL) {
PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
RTE_AVP_PCI_DEVICE_BAR);
@@ -1992,7 +1992,7 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
goto unlock;
}
- addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ addr = pci_dev->pci_mem[RTE_AVP_PCI_DEVICE_BAR].mem_res.addr;
host_info = (struct rte_avp_device_info *)addr;
/* Setup required number of queues */
@@ -2216,12 +2216,12 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
pdata->pci_dev = pci_dev;
pdata->xgmac_regs =
- (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+ (void *)pci_dev->pci_mem[AXGBE_AXGMAC_BAR].mem_res.addr;
pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ AXGBE_MAC_PROP_OFFSET);
pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ AXGBE_I2C_CTRL_OFFSET);
- pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+ pdata->xpcs_regs = (void *)pci_dev->pci_mem[AXGBE_XPCS_BAR].mem_res.addr;
/* version specific driver data*/
if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
@@ -656,12 +656,12 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
sc->flags = BNX2X_IS_VF_FLAG;
sc->pcie_func = pci_dev->addr.function;
- sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
+ sc->bar[BAR0].base_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
if (is_vf)
sc->bar[BAR1].base_addr = (void *)
- ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
+ ((uintptr_t)pci_dev->pci_mem[0].mem_res.addr + PXP_VF_ADDR_DB_START);
else
- sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
+ sc->bar[BAR1].base_addr = pci_dev->pci_mem[2].mem_res.addr;
assert(sc->bar[BAR0].base_addr);
assert(sc->bar[BAR1].base_addr);
@@ -4682,8 +4682,8 @@ static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev)
struct bnxt *bp = eth_dev->data->dev_private;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
- bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
- bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
+ bp->bar0 = (void *)pci_dev->pci_mem[0].mem_res.addr;
+ bp->doorbell_base = (void *)pci_dev->pci_mem[2].mem_res.addr;
if (!bp->bar0 || !bp->doorbell_base) {
PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
return -ENODEV;
@@ -5932,8 +5932,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
PMD_DRV_LOG(INFO,
"Found %s device at mem %" PRIX64 ", node addr %pM\n",
DRV_MODULE_NAME,
- pci_dev->mem_resource[0].phys_addr,
- pci_dev->mem_resource[0].addr);
+ pci_dev->pci_mem[0].mem_res.phys_addr,
+ pci_dev->pci_mem[0].mem_res.addr);
return 0;
@@ -1172,8 +1172,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
struct idpf_hw *hw = &base->hw;
int ret = 0;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
- hw->hw_addr_len = pci_dev->mem_resource[0].len;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
+ hw->hw_addr_len = pci_dev->pci_mem[0].mem_res.len;
hw->back = base;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
@@ -1729,7 +1729,7 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
return -1;
adapter->use_unpacked_mode = 1;
- adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ adapter->regs = (void *)pci_dev->pci_mem[0].mem_res.addr;
if (!adapter->regs) {
dev_err(adapter, "%s: cannot map device registers\n", __func__);
err = -ENOMEM;
@@ -2228,7 +2228,7 @@ int cxgbe_probe(struct adapter *adapter)
if (qpp > num_seg)
dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
- adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ adapter->bar2 = (void *)adapter->pdev->pci_mem[2].mem_res.addr;
if (!adapter->bar2) {
dev_err(adapter, "cannot map device bar2 region\n");
err = -ENOMEM;
@@ -148,7 +148,7 @@ static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
return -1;
adapter->use_unpacked_mode = 1;
- adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ adapter->regs = (void *)pci_dev->pci_mem[0].mem_res.addr;
if (!adapter->regs) {
dev_err(adapter, "%s: cannot map device registers\n", __func__);
err = -ENOMEM;
@@ -184,7 +184,7 @@ int cxgbevf_probe(struct adapter *adapter)
return err;
if (!is_t4(adapter->params.chip)) {
- adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ adapter->bar2 = (void *)adapter->pdev->pci_mem[2].mem_res.addr;
if (!adapter->bar2) {
dev_err(adapter, "cannot map device bar2 region\n");
err = -ENOMEM;
@@ -265,13 +265,13 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
rte_eth_copy_pci_info(eth_dev, pci_dev);
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
hw->device_id = pci_dev->id.device_id;
adapter->stopped = 0;
/* For ICH8 support we'll need to map the flash memory BAR */
if (eth_em_dev_is_ich8(hw))
- hw->flash_address = (void *)pci_dev->mem_resource[1].addr;
+ hw->flash_address = (void *)pci_dev->pci_mem[1].mem_res.addr;
if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS ||
em_hw_init(hw) != 0) {
@@ -743,7 +743,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
rte_eth_copy_pci_info(eth_dev, pci_dev);
- hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
igb_identify_hardware(eth_dev, pci_dev);
if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
@@ -938,7 +938,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
adapter->stopped = 0;
/* Initialize the shared code (base driver) */
@@ -2121,8 +2121,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
intr_handle = pci_dev->intr_handle;
- adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
- adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
+ adapter->regs = pci_dev->pci_mem[ENA_REGS_BAR].mem_res.addr;
+ adapter->dev_mem_base = pci_dev->pci_mem[ENA_MEM_BAR].mem_res.addr;
if (!adapter->regs) {
PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
@@ -883,7 +883,7 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
/* Retrieving and storing the HW base address of device */
- hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw.reg = (void *)pci_dev->pci_mem[0].mem_res.addr;
hw->device_id = pci_dev->id.device_id;
error = enetc_hardware_init(hw);
@@ -1914,8 +1914,8 @@ int enic_probe(struct enic *enic)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
- enic->bar0.len = pdev->mem_resource[0].len;
+ enic->bar0.vaddr = (void *)pdev->pci_mem[0].mem_res.addr;
+ enic->bar0.len = pdev->pci_mem[0].mem_res.len;
/* Register vNIC device */
enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
@@ -3090,7 +3090,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
hw->subsystem_device_id = pdev->id.subsystem_device_id;
hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
hw->revision_id = 0;
- hw->hw_addr = (void *)pdev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pdev->pci_mem[0].mem_res.addr;
if (hw->hw_addr == NULL) {
PMD_INIT_LOG(ERR, "Bad mem resource."
" Try to refuse unused devices.");
@@ -777,13 +777,13 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
- reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr;
+ reg_bar = pci_dev->pci_mem[GVE_REG_BAR].mem_res.addr;
if (!reg_bar) {
PMD_DRV_LOG(ERR, "Failed to map pci bar!");
return -ENOMEM;
}
- db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr;
+ db_bar = pci_dev->pci_mem[GVE_DB_BAR].mem_res.addr;
if (!db_bar) {
PMD_DRV_LOG(ERR, "Failed to map doorbell bar!");
return -ENOMEM;
@@ -398,7 +398,7 @@ static int hinic_init_hwif(struct hinic_hwdev *hwdev, void *cfg_reg_base,
int err;
pci_dev = (struct rte_pci_device *)(hwdev->pcidev_hdl);
- db_bar_len = pci_dev->mem_resource[HINIC_DB_MEM_BAR].len;
+ db_bar_len = pci_dev->pci_mem[HINIC_DB_MEM_BAR].mem_res.len;
hwif = hwdev->hwif;
@@ -470,16 +470,16 @@ static void hinic_get_mmio(struct hinic_hwdev *hwdev, void **cfg_regs_base,
uint64_t bar0_phy_addr;
uint64_t pagesize = sysconf(_SC_PAGESIZE);
- *cfg_regs_base = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].addr;
- *intr_base = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].addr;
- *db_base = pci_dev->mem_resource[HINIC_DB_MEM_BAR].addr;
+ *cfg_regs_base = pci_dev->pci_mem[HINIC_CFG_REGS_BAR].mem_res.addr;
+ *intr_base = pci_dev->pci_mem[HINIC_INTR_MSI_BAR].mem_res.addr;
+ *db_base = pci_dev->pci_mem[HINIC_DB_MEM_BAR].mem_res.addr;
- bar0_size = pci_dev->mem_resource[HINIC_CFG_REGS_BAR].len;
- bar2_size = pci_dev->mem_resource[HINIC_INTR_MSI_BAR].len;
+ bar0_size = pci_dev->pci_mem[HINIC_CFG_REGS_BAR].mem_res.len;
+ bar2_size = pci_dev->pci_mem[HINIC_INTR_MSI_BAR].mem_res.len;
if (pagesize == PAGE_SIZE_64K && (bar0_size % pagesize != 0)) {
bar0_phy_addr =
- pci_dev->mem_resource[HINIC_CFG_REGS_BAR].phys_addr;
+ pci_dev->pci_mem[HINIC_CFG_REGS_BAR].mem_res.phys_addr;
if (bar0_phy_addr % pagesize != 0 &&
(bar0_size + bar2_size <= pagesize) &&
bar2_size >= bar0_size) {
@@ -4525,7 +4525,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
/* Get hardware io base address from pcie BAR2 IO space */
- hw->io_base = pci_dev->mem_resource[2].addr;
+ hw->io_base = pci_dev->pci_mem[2].mem_res.addr;
/* Firmware command queue initialize */
ret = hns3_cmd_init_queue(hw);
@@ -1414,7 +1414,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
/* Get hardware io base address from pcie BAR2 IO space */
- hw->io_base = pci_dev->mem_resource[2].addr;
+ hw->io_base = pci_dev->pci_mem[2].mem_res.addr;
/* Firmware command queue initialize */
ret = hns3_cmd_init_queue(hw);
@@ -2923,8 +2923,8 @@ hns3_tx_push_get_queue_tail_reg(struct rte_eth_dev *dev, uint16_t queue_id)
*
* The quick doorbell located at 64B offset in the TQP region.
*/
- return (char *)pci_dev->mem_resource[bar_id].addr +
- (pci_dev->mem_resource[bar_id].len >> 1) +
+ return (char *)pci_dev->pci_mem[bar_id].mem_res.addr +
+ (pci_dev->pci_mem[bar_id].mem_res.len >> 1) +
HNS3_TX_PUSH_TQP_REGION_SIZE * queue_id +
HNS3_TX_PUSH_QUICK_DOORBELL_OFFSET;
}
@@ -1449,7 +1449,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
pf->dev_data = dev->data;
hw->back = I40E_PF_TO_ADAPTER(pf);
- hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
+ hw->hw_addr = (uint8_t *)(pci_dev->pci_mem[0].mem_res.addr);
if (!hw->hw_addr) {
PMD_INIT_LOG(ERR,
"Hardware is not available, as address is NULL");
@@ -2605,7 +2605,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
hw->bus.bus_id = pci_dev->addr.bus;
hw->bus.device = pci_dev->addr.devid;
hw->bus.func = pci_dev->addr.function;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
adapter->dev_data = eth_dev->data;
adapter->stopped = 1;
@@ -618,7 +618,7 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
hw->resetting = false;
- hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
+ hw->avf.hw_addr = pci_dev->pci_mem[0].mem_res.addr;
hw->avf.back = hw;
hw->avf.bus.bus_id = pci_dev->addr.bus;
@@ -2293,7 +2293,7 @@ ice_dev_init(struct rte_eth_dev *dev)
pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->dev_data = dev->data;
hw->back = pf->adapter;
- hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (uint8_t *)pci_dev->pci_mem[0].mem_res.addr;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
@@ -1135,8 +1135,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
struct idpf_hw *hw = &base->hw;
int ret = 0;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
- hw->hw_addr_len = pci_dev->mem_resource[0].len;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
+ hw->hw_addr_len = pci_dev->pci_mem[0].mem_res.len;
hw->back = base;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
@@ -1343,7 +1343,7 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->back = pci_dev;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
igc_identify_hardware(dev, pci_dev);
if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
@@ -234,7 +234,7 @@ eth_ionic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
bars.num_bars = 0;
for (i = 0; i < PCI_MAX_RESOURCE && i < IONIC_BARS_MAX; i++) {
- resource = &pci_dev->mem_resource[i];
+ resource = &pci_dev->pci_mem[i].mem_res;
if (resource->phys_addr == 0 || resource->len == 0)
continue;
@@ -1136,7 +1136,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
hw->allow_unsupported_sfp = 1;
/* Initialize the shared code (base driver) */
@@ -1634,7 +1634,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
@@ -2071,8 +2071,8 @@ lio_eth_dev_init(struct rte_eth_dev *eth_dev)
rte_eth_copy_pci_info(eth_dev, pdev);
- if (pdev->mem_resource[0].addr) {
- lio_dev->hw_addr = pdev->mem_resource[0].addr;
+ if (pdev->pci_mem[0].mem_res.addr) {
+ lio_dev->hw_addr = pdev->pci_mem[0].mem_res.addr;
} else {
PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
return -ENODEV;
@@ -543,7 +543,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
- hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
+ hw->ctrl_bar = (uint8_t *)pci_dev->pci_mem[0].mem_res.addr;
if (hw->ctrl_bar == NULL) {
PMD_DRV_LOG(ERR,
"hw->ctrl_bar is NULL. BAR0 not configured");
@@ -290,7 +290,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
+ hw->ctrl_bar = (uint8_t *)pci_dev->pci_mem[0].mem_res.addr;
if (hw->ctrl_bar == NULL) {
PMD_DRV_LOG(ERR,
"hw->ctrl_bar is NULL. BAR0 not configured");
@@ -351,9 +351,9 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
- hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
+ hw->tx_bar = (uint8_t *)pci_dev->pci_mem[2].mem_res.addr +
tx_bar_off;
- hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
+ hw->rx_bar = (uint8_t *)pci_dev->pci_mem[2].mem_res.addr +
rx_bar_off;
PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
@@ -790,7 +790,7 @@ nfp6000_set_barsz(struct rte_pci_device *dev, struct nfp_pcie_user *desc)
unsigned long tmp;
int i = 0;
- tmp = dev->mem_resource[0].len;
+ tmp = dev->pci_mem[0].mem_res.len;
while (tmp >>= 1)
i++;
@@ -836,7 +836,7 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
if (nfp6000_set_barsz(dev, desc) < 0)
goto error;
- desc->cfg = (char *)dev->mem_resource[0].addr;
+ desc->cfg = (char *)dev->pci_mem[0].mem_res.addr;
nfp_enable_bars(desc);
@@ -364,7 +364,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
/* Vendor and Device ID need to be set before init of shared code */
hw->back = pci_dev;
@@ -490,7 +490,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_random_addr(vf_mac_addr.addr_bytes);
rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
- otx_epvf->hw_addr = pdev->mem_resource[0].addr;
+ otx_epvf->hw_addr = pdev->pci_mem[0].mem_res.addr;
otx_epvf->pdev = pdev;
otx_epdev_init(otx_epvf);
@@ -195,13 +195,13 @@ pkivf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (pci_dev->mem_resource[0].addr == NULL) {
+ if (pci_dev->pci_mem[0].mem_res.addr == NULL) {
octeontx_log_err("PKI Empty bar[0] %p",
- pci_dev->mem_resource[0].addr);
+ pci_dev->pci_mem[0].mem_res.addr);
return -ENODEV;
}
- bar0 = pci_dev->mem_resource[0].addr;
+ bar0 = pci_dev->pci_mem[0].mem_res.addr;
val = octeontx_read64(bar0);
domain = val & 0xffff;
vfid = (val >> 16) & 0xffff;
@@ -586,15 +586,15 @@ pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (pci_dev->mem_resource[0].addr == NULL ||
- pci_dev->mem_resource[2].addr == NULL) {
+ if (pci_dev->pci_mem[0].mem_res.addr == NULL ||
+ pci_dev->pci_mem[2].mem_res.addr == NULL) {
octeontx_log_err("Empty bars %p %p",
- pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[2].addr);
+ pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[2].mem_res.addr);
return -ENODEV;
}
- bar0 = pci_dev->mem_resource[0].addr;
- bar2 = pci_dev->mem_resource[2].addr;
+ bar0 = pci_dev->pci_mem[0].mem_res.addr;
+ bar2 = pci_dev->pci_mem[2].mem_res.addr;
octeontx_pkovf_setup();
@@ -37,9 +37,9 @@ qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
{
- edev->regview = pci_dev->mem_resource[0].addr;
- edev->doorbells = pci_dev->mem_resource[2].addr;
- edev->db_size = pci_dev->mem_resource[2].len;
+ edev->regview = pci_dev->pci_mem[0].mem_res.addr;
+ edev->doorbells = pci_dev->pci_mem[2].mem_res.addr;
+ edev->db_size = pci_dev->pci_mem[2].mem_res.len;
edev->pci_dev = pci_dev;
}
@@ -773,7 +773,7 @@ sfc_mem_bar_init(struct sfc_adapter *sa, const efx_bar_region_t *mem_ebrp)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
efsys_bar_t *ebp = &sa->mem_bar;
struct rte_mem_resource *res =
- &pci_dev->mem_resource[mem_ebrp->ebr_index];
+ &pci_dev->pci_mem[mem_ebrp->ebr_index].mem_res;
SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
ebp->esb_rid = mem_ebrp->ebr_index;
@@ -2223,7 +2223,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
- nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
+ nic->reg_base = (uintptr_t)pci_dev->pci_mem[0].mem_res.addr;
if (!nic->reg_base) {
PMD_INIT_LOG(ERR, "Failed to map BAR0");
ret = -ENODEV;
@@ -594,7 +594,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
rte_eth_copy_pci_info(eth_dev, pci_dev);
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -211,7 +211,7 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
hw->vendor_id = pci_dev->id.vendor_id;
hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
- hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pci_dev->pci_mem[0].mem_res.addr;
/* initialize the vfta */
memset(shadow_vfta, 0, sizeof(*shadow_vfta));
@@ -603,14 +603,14 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
return NULL;
}
- if (offset + length > dev->mem_resource[bar].len) {
+ if (offset + length > dev->pci_mem[bar].mem_res.len) {
PMD_INIT_LOG(ERR,
"invalid cap: overflows bar space: %u > %" PRIu64,
- offset + length, dev->mem_resource[bar].len);
+ offset + length, dev->pci_mem[bar].mem_res.len);
return NULL;
}
- base = dev->mem_resource[bar].addr;
+ base = dev->pci_mem[bar].mem_res.addr;
if (base == NULL) {
PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
return NULL;
@@ -345,8 +345,8 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
- hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
- hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
+ hw->hw_addr0 = (void *)pci_dev->pci_mem[0].mem_res.addr;
+ hw->hw_addr1 = (void *)pci_dev->pci_mem[1].mem_res.addr;
hw->num_rx_queues = 1;
hw->num_tx_queues = 1;
@@ -331,10 +331,10 @@ bphy_rawdev_probe(struct rte_pci_driver *pci_drv,
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (!pci_dev->mem_resource[0].addr) {
+ if (!pci_dev->pci_mem[0].mem_res.addr) {
plt_err("BARs have invalid values: BAR0 %p\n BAR2 %p",
- pci_dev->mem_resource[0].addr,
- pci_dev->mem_resource[2].addr);
+ pci_dev->pci_mem[0].mem_res.addr,
+ pci_dev->pci_mem[2].mem_res.addr);
return -ENODEV;
}
@@ -355,8 +355,8 @@ bphy_rawdev_probe(struct rte_pci_driver *pci_drv,
bphy_rawdev->driver_name = pci_dev->driver->driver.name;
bphy_dev = (struct bphy_device *)bphy_rawdev->dev_private;
- bphy_dev->mem.res0 = pci_dev->mem_resource[0];
- bphy_dev->mem.res2 = pci_dev->mem_resource[2];
+ bphy_dev->mem.res0 = pci_dev->pci_mem[0].mem_res;
+ bphy_dev->mem.res2 = pci_dev->pci_mem[2].mem_res;
bphy_dev->bphy.pci_dev = pci_dev;
ret = roc_bphy_dev_init(&bphy_dev->bphy);
@@ -302,7 +302,7 @@ cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- if (!pci_dev->mem_resource[0].addr)
+ if (!pci_dev->pci_mem[0].mem_res.addr)
return -ENODEV;
ret = roc_plt_init();
@@ -326,8 +326,8 @@ cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
}
rcgx = cgx->rcgx;
- rcgx->bar0_pa = pci_dev->mem_resource[0].phys_addr;
- rcgx->bar0_va = pci_dev->mem_resource[0].addr;
+ rcgx->bar0_pa = pci_dev->pci_mem[0].mem_res.phys_addr;
+ rcgx->bar0_va = pci_dev->pci_mem[0].mem_res.addr;
ret = roc_bphy_cgx_dev_init(rcgx);
if (ret)
goto out_free;
@@ -1524,7 +1524,7 @@ static void *n3000_afu_get_port_addr(struct afu_rawdev *dev)
if (!pci_dev)
return NULL;
- addr = (uint8_t *)pci_dev->mem_resource[0].addr;
+ addr = (uint8_t *)pci_dev->pci_mem[0].mem_res.addr;
val = rte_read64(addr + PORT_ATTR_REG(dev->port));
if (!PORT_IMPLEMENTED(val)) {
IFPGA_RAWDEV_PMD_INFO("FIU port %d is not implemented", dev->port);
@@ -1537,7 +1537,7 @@ static void *n3000_afu_get_port_addr(struct afu_rawdev *dev)
return NULL;
}
- addr = (uint8_t *)pci_dev->mem_resource[bar].addr + PORT_OFFSET(val);
+ addr = (uint8_t *)pci_dev->pci_mem[bar].mem_res.addr + PORT_OFFSET(val);
return addr;
}
@@ -1580,9 +1580,9 @@ ifpga_rawdev_create(struct rte_pci_device *pci_dev,
/* init opae_adapter_data_pci for device specific information */
for (i = 0; i < PCI_MAX_RESOURCE; i++) {
- data->region[i].phys_addr = pci_dev->mem_resource[i].phys_addr;
- data->region[i].len = pci_dev->mem_resource[i].len;
- data->region[i].addr = pci_dev->mem_resource[i].addr;
+ data->region[i].phys_addr = pci_dev->pci_mem[i].mem_res.phys_addr;
+ data->region[i].len = pci_dev->pci_mem[i].mem_res.len;
+ data->region[i].addr = pci_dev->pci_mem[i].mem_res.addr;
}
data->device_id = pci_dev->id.device_id;
data->vendor_id = pci_dev->id.vendor_id;
@@ -179,7 +179,7 @@ intel_ntb_dev_init(const struct rte_rawdev *dev)
return -EINVAL;
}
- hw->hw_addr = (char *)hw->pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (char *)hw->pci_dev->pci_mem[0].mem_res.addr;
if (is_gen3_ntb(hw))
ret = intel_ntb3_check_ppd(hw);
@@ -207,7 +207,7 @@ intel_ntb_dev_init(const struct rte_rawdev *dev)
for (i = 0; i < hw->mw_cnt; i++) {
bar = intel_ntb_bar[i];
- hw->mw_size[i] = hw->pci_dev->mem_resource[bar].len;
+ hw->mw_size[i] = hw->pci_dev->pci_mem[bar].mem_res.len;
}
/* Reserve the last 2 spad registers for users. */
@@ -238,7 +238,7 @@ intel_ntb_get_peer_mw_addr(const struct rte_rawdev *dev, int mw_idx)
bar = intel_ntb_bar[mw_idx];
- return hw->pci_dev->mem_resource[bar].addr;
+ return hw->pci_dev->pci_mem[bar].mem_res.addr;
}
static int
@@ -271,7 +271,7 @@ intel_ntb_mw_set_trans(const struct rte_rawdev *dev, int mw_idx,
/* Limit reg val should be EMBAR base address plus MW size. */
base = addr;
- limit = hw->pci_dev->mem_resource[bar].phys_addr + size;
+ limit = hw->pci_dev->pci_mem[bar].mem_res.phys_addr + size;
rte_write64(base, xlat_addr);
rte_write64(limit, limit_addr);
@@ -204,11 +204,11 @@ ifcvf_vfio_setup(struct ifcvf_internal *internal)
for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
i++) {
internal->hw.mem_resource[i].addr =
- internal->pdev->mem_resource[i].addr;
+ internal->pdev->pci_mem[i].mem_res.addr;
internal->hw.mem_resource[i].phys_addr =
- internal->pdev->mem_resource[i].phys_addr;
+ internal->pdev->pci_mem[i].mem_res.phys_addr;
internal->hw.mem_resource[i].len =
- internal->pdev->mem_resource[i].len;
+ internal->pdev->pci_mem[i].mem_res.len;
}
return 0;
@@ -192,7 +192,7 @@ sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,
struct rte_pci_device *pci_dev = sva->pdev;
efsys_bar_t *ebp = &sva->mem_bar;
struct rte_mem_resource *res =
- &pci_dev->mem_resource[mem_ebrp->ebr_index];
+ &pci_dev->pci_mem[mem_ebrp->ebr_index].mem_res;
SFC_BAR_LOCK_INIT(ebp, pci_dev->name);
ebp->esb_rid = mem_ebrp->ebr_index;
@@ -861,7 +861,7 @@ sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
*offset);
pci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;
- doorbell = (uint8_t *)pci_dev->mem_resource[reg.index].addr + *offset;
+ doorbell = (uint8_t *)pci_dev->pci_mem[reg.index].mem_res.addr + *offset;
/*
* virtio-net driver in VM sends queue notifications before