From patchwork Sun Apr 8 20:18:06 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Burakov, Anatoly" X-Patchwork-Id: 37577 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A98FD1B741; Sun, 8 Apr 2018 22:20:38 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 6BB761B69E for ; Sun, 8 Apr 2018 22:18:55 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 08 Apr 2018 13:18:54 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.48,424,1517904000"; d="scan'208";a="46117824" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by orsmga001.jf.intel.com with ESMTP; 08 Apr 2018 13:18:50 -0700 Received: from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com [10.237.217.45]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id w38KIohJ021166; Sun, 8 Apr 2018 21:18:50 +0100 Received: from sivswdev01.ir.intel.com (localhost [127.0.0.1]) by sivswdev01.ir.intel.com with ESMTP id w38KIoil011120; Sun, 8 Apr 2018 21:18:50 +0100 Received: (from aburakov@localhost) by sivswdev01.ir.intel.com with LOCAL id w38KIn1A011116; Sun, 8 Apr 2018 21:18:49 +0100 From: Anatoly Burakov To: dev@dpdk.org Cc: keith.wiles@intel.com, jianfeng.tan@intel.com, andras.kovacs@ericsson.com, laszlo.vadkeri@ericsson.com, benjamin.walker@intel.com, bruce.richardson@intel.com, thomas@monjalon.net, konstantin.ananyev@intel.com, kuralamudhan.ramakrishnan@intel.com, louise.m.daly@intel.com, nelio.laranjeiro@6wind.com, yskoh@mellanox.com, pepperjo@japf.ch, jerin.jacob@caviumnetworks.com, hemant.agrawal@nxp.com, olivier.matz@6wind.com, shreyansh.jain@nxp.com, gowrishankar.m@linux.vnet.ibm.com Date: Sun, 8 Apr 2018 21:18:06 +0100 Message-Id: <6127b67cb7aa7d381da43baaa6a7f0a7a61ee96a.1523218215.git.anatoly.burakov@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v4 33/70] vfio/spapr: use memseg walk instead of iteration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Reduce dependency on internal details of EAL memory subsystem, and simplify code. Signed-off-by: Anatoly Burakov --- lib/librte_eal/linuxapp/eal/eal_vfio.c | 108 +++++++++++++++++++-------------- 1 file changed, 63 insertions(+), 45 deletions(-) diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c index 2a34ae9..fb41e82 100644 --- a/lib/librte_eal/linuxapp/eal/eal_vfio.c +++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c @@ -694,16 +694,69 @@ vfio_type1_dma_map(int vfio_container_fd) return rte_memseg_walk(type1_map, &vfio_container_fd); } +struct spapr_walk_param { + uint64_t window_size; + uint64_t hugepage_sz; +}; static int -vfio_spapr_dma_map(int vfio_container_fd) +spapr_window_size(const struct rte_memseg *ms, void *arg) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - int i, ret; + struct spapr_walk_param *param = arg; + uint64_t max = ms->iova + ms->len; + + if (max > param->window_size) { + param->hugepage_sz = ms->hugepage_sz; + param->window_size = max; + } + return 0; +} + +static int +spapr_map(const struct rte_memseg *ms, void *arg) +{ + struct vfio_iommu_type1_dma_map dma_map; struct vfio_iommu_spapr_register_memory reg = { .argsz = sizeof(reg), .flags = 0 }; + int *vfio_container_fd = arg; + int ret; + + reg.vaddr = (uintptr_t) ms->addr; + reg.size = ms->len; + ret = ioctl(*vfio_container_fd, + VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®); + if (ret) { + RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, error %i (%s)\n", + errno, strerror(errno)); + return -1; + } + + memset(&dma_map, 0, sizeof(dma_map)); + dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map); + dma_map.vaddr = ms->addr_64; + dma_map.size = ms->len; + dma_map.iova = ms->iova; + dma_map.flags = VFIO_DMA_MAP_FLAG_READ | + VFIO_DMA_MAP_FLAG_WRITE; + + ret = ioctl(*vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map); + + if (ret) { + RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n", + errno, strerror(errno)); + return -1; + } + + return 0; +} + +static int +vfio_spapr_dma_map(int vfio_container_fd) +{ + struct spapr_walk_param param; + int ret; struct vfio_iommu_spapr_tce_info info = { .argsz = sizeof(info), }; @@ -714,6 +767,8 @@ vfio_spapr_dma_map(int vfio_container_fd) .argsz = sizeof(remove), }; + memset(¶m, 0, sizeof(param)); + /* query spapr iommu info */ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); if (ret) { @@ -732,17 +787,11 @@ vfio_spapr_dma_map(int vfio_container_fd) } /* create DMA window from 0 to max(phys_addr + len) */ - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - if (ms[i].addr == NULL) - break; - - create.window_size = RTE_MAX(create.window_size, - ms[i].iova + ms[i].len); - } + rte_memseg_walk(spapr_window_size, ¶m); /* sPAPR requires window size to be a power of 2 */ - create.window_size = rte_align64pow2(create.window_size); - create.page_shift = __builtin_ctzll(ms->hugepage_sz); + create.window_size = rte_align64pow2(param.window_size); + create.page_shift = __builtin_ctzll(param.hugepage_sz); create.levels = 1; ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); @@ -758,39 +807,8 @@ vfio_spapr_dma_map(int vfio_container_fd) } /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */ - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - struct vfio_iommu_type1_dma_map dma_map; - - if (ms[i].addr == NULL) - break; - - reg.vaddr = (uintptr_t) ms[i].addr; - reg.size = ms[i].len; - ret = ioctl(vfio_container_fd, - VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®); - if (ret) { - RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, " - "error %i (%s)\n", errno, strerror(errno)); - return -1; - } - - memset(&dma_map, 0, sizeof(dma_map)); - dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map); - dma_map.vaddr = ms[i].addr_64; - dma_map.size = ms[i].len; - dma_map.iova = ms[i].iova; - dma_map.flags = VFIO_DMA_MAP_FLAG_READ | - VFIO_DMA_MAP_FLAG_WRITE; - - ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map); - - if (ret) { - RTE_LOG(ERR, EAL, " cannot set up DMA remapping, " - "error %i (%s)\n", errno, strerror(errno)); - return -1; - } - - } + if (rte_memseg_walk(spapr_map, &vfio_container_fd) < 0) + return -1; return 0; }