[RFC,v2,1/2] vhost: populate guest memory for DMA-accelerated vhost-user

Message ID 1572598450-245091-2-git-send-email-jiayu.hu@intel.com (mailing list archive)
State RFC, archived
Delegated to: Maxime Coquelin
Headers
Series Add a PMD for DMA-accelerated vhost-user |

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Hu, Jiayu Nov. 1, 2019, 8:54 a.m. UTC
  DMA engines, like I/OAT, are efficient in moving large data
within memory. Offloading large copies in vhost side to DMA
engines can save precious CPU cycles and improve vhost
performance.

However, using the DMA engine requires to populate guest's
memory. This patch is to enable DMA-accelerated vhost-user
to populate guest's memory.

Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
 lib/librte_vhost/rte_vhost.h  |  1 +
 lib/librte_vhost/socket.c     | 11 +++++++++++
 lib/librte_vhost/vhost.h      |  2 ++
 lib/librte_vhost/vhost_user.c |  3 ++-
 4 files changed, 16 insertions(+), 1 deletion(-)
  

Patch

diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 7b5dc87..7716939 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -34,6 +34,7 @@  extern "C" {
 #define RTE_VHOST_USER_EXTBUF_SUPPORT	(1ULL << 5)
 /* support only linear buffers (no chained mbufs) */
 #define RTE_VHOST_USER_LINEARBUF_SUPPORT	(1ULL << 6)
+#define RTE_VHOST_USER_DMA_COPY		(1ULL << 7)
 
 /** Protocol features. */
 #ifndef VHOST_USER_PROTOCOL_F_MQ
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index a34bc7f..9db6f6b 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -62,6 +62,8 @@  struct vhost_user_socket {
 	 */
 	int vdpa_dev_id;
 
+	bool dma_enabled;
+
 	struct vhost_device_ops const *notify_ops;
 };
 
@@ -240,6 +242,13 @@  vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 	if (vsocket->linearbuf)
 		vhost_enable_linearbuf(vid);
 
+	if (vsocket->dma_enabled) {
+		struct virtio_net *dev;
+
+		dev = get_device(vid);
+		dev->dma_enabled = true;
+	}
+
 	RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
 
 	if (vsocket->notify_ops->new_connection) {
@@ -889,6 +898,8 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 		goto out_mutex;
 	}
 
+	vsocket->dma_enabled = flags & RTE_VHOST_USER_DMA_COPY;
+
 	/*
 	 * Set the supported features correctly for the builtin vhost-user
 	 * net driver.
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 9f11b28..b61a790 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -383,6 +383,8 @@  struct virtio_net {
 	 */
 	int			vdpa_dev_id;
 
+	bool			dma_enabled;
+
 	/* context data for the external message handlers */
 	void			*extern_data;
 	/* pre and post vhost user message handlers for the device */
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 2a9fa7c..12722b9 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1067,7 +1067,8 @@  vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
 		}
 		mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
 
-		populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
+		populate = (dev->dequeue_zero_copy || dev->dma_enabled) ?
+			MAP_POPULATE : 0;
 		mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
 				 MAP_SHARED | populate, fd, 0);