[dpdk-dev] lib/librte_vhost: code style fixes

Message ID 1415162572-25941-1-git-send-email-huawei.xie@intel.com (mailing list archive)
State Rejected, archived
Delegated to: Thomas Monjalon
Headers

Commit Message

Huawei Xie Nov. 5, 2014, 4:42 a.m. UTC
This patch fixes code style issues and refines some comments in vhost library.


---
 lib/librte_vhost/eventfd_link/eventfd_link.c | 244 ++++++++++-----------
 lib/librte_vhost/eventfd_link/eventfd_link.h | 127 ++++++-----
 lib/librte_vhost/rte_virtio_net.h            |   3 +-
 lib/librte_vhost/vhost-net-cdev.c            | 187 +++++++++-------
 lib/librte_vhost/vhost_rxtx.c                |  13 +-
 lib/librte_vhost/virtio-net.c                | 317 +++++++++++++++++----------
 6 files changed, 494 insertions(+), 397 deletions(-)
  

Comments

Thomas Monjalon Nov. 5, 2014, 9:09 a.m. UTC | #1
Hi Huawei,

Please set a Signed-off in your patch.

2014-11-05 12:42, Huawei Xie:
> This patch fixes code style issues and refines some comments in vhost library.
> 
> 
> ---
>  lib/librte_vhost/eventfd_link/eventfd_link.c | 244 ++++++++++-----------
>  lib/librte_vhost/eventfd_link/eventfd_link.h | 127 ++++++-----
>  lib/librte_vhost/rte_virtio_net.h            |   3 +-
>  lib/librte_vhost/vhost-net-cdev.c            | 187 +++++++++-------
>  lib/librte_vhost/vhost_rxtx.c                |  13 +-
>  lib/librte_vhost/virtio-net.c                | 317 +++++++++++++++++----------
>  6 files changed, 494 insertions(+), 397 deletions(-)
> 
> diff --git a/lib/librte_vhost/eventfd_link/eventfd_link.c b/lib/librte_vhost/eventfd_link/eventfd_link.c
> index fc0653a..542ec2c 100644
> --- a/lib/librte_vhost/eventfd_link/eventfd_link.c
> +++ b/lib/librte_vhost/eventfd_link/eventfd_link.c
> @@ -1,26 +1,26 @@
>  /*-
> - *  * GPL LICENSE SUMMARY
> - *  *
> - *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> - *  *
> - *  *   This program is free software; you can redistribute it and/or modify
> - *  *   it under the terms of version 2 of the GNU General Public License as
> - *  *   published by the Free Software Foundation.
> - *  *
> - *  *   This program is distributed in the hope that it will be useful, but
> - *  *   WITHOUT ANY WARRANTY; without even the implied warranty of
> - *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> - *  *   General Public License for more details.
> - *  *
> - *  *   You should have received a copy of the GNU General Public License
> - *  *   along with this program; if not, write to the Free Software
> - *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> - *  *   The full GNU General Public License is included in this distribution
> - *  *   in the file called LICENSE.GPL.
> - *  *
> - *  *   Contact Information:
> - *  *   Intel Corporation
> - *   */
> + * GPL LICENSE SUMMARY
> + *
> + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> + *
> + *   This program is free software; you can redistribute it and/or modify
> + *   it under the terms of version 2 of the GNU General Public License as
> + *   published by the Free Software Foundation.
> + *
> + *   This program is distributed in the hope that it will be useful, but
> + *   WITHOUT ANY WARRANTY; without even the implied warranty of
> + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + *   General Public License for more details.
> + *
> + *   You should have received a copy of the GNU General Public License
> + *   along with this program; if not, write to the Free Software
> + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> + *   The full GNU General Public License is included in this distribution
> + *   in the file called LICENSE.GPL.
> + *
> + *   Contact Information:
> + *   Intel Corporation
> + */
>  
>  #include <linux/eventfd.h>
>  #include <linux/miscdevice.h>
> @@ -42,15 +42,15 @@
>   * get_files_struct is copied from fs/file.c
>   */
>  struct files_struct *
> -get_files_struct (struct task_struct *task)
> +get_files_struct(struct task_struct *task)
>  {
>  	struct files_struct *files;
>  
> -	task_lock (task);
> +	task_lock(task);
>  	files = task->files;
>  	if (files)
> -		atomic_inc (&files->count);
> -	task_unlock (task);
> +		atomic_inc(&files->count);
> +	task_unlock(task);
>  
>  	return files;
>  }
> @@ -59,17 +59,15 @@ get_files_struct (struct task_struct *task)
>   * put_files_struct is extracted from fs/file.c
>   */
>  void
> -put_files_struct (struct files_struct *files)
> +put_files_struct(struct files_struct *files)
>  {
> -	if (atomic_dec_and_test (&files->count))
> -	{
> -		BUG ();
> -	}
> +	if (atomic_dec_and_test(&files->count))
> +		BUG();
>  }
>  
>  
>  static long
> -eventfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
> +eventfd_link_ioctl(struct file *f, unsigned int ioctl, unsigned long arg)
>  {
>  	void __user *argp = (void __user *) arg;
>  	struct task_struct *task_target = NULL;
> @@ -78,96 +76,88 @@ eventfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
>  	struct fdtable *fdt;
>  	struct eventfd_copy eventfd_copy;
>  
> -	switch (ioctl)
> -	{
> -		case EVENTFD_COPY:
> -			if (copy_from_user (&eventfd_copy, argp, sizeof (struct eventfd_copy)))
> -				return -EFAULT;
> -
> -			/*
> -			 * Find the task struct for the target pid
> -			 */
> -			task_target =
> -				pid_task (find_vpid (eventfd_copy.target_pid), PIDTYPE_PID);
> -			if (task_target == NULL)
> -			{
> -				printk (KERN_DEBUG "Failed to get mem ctx for target pid\n");
> -				return -EFAULT;
> -			}
> -
> -			files = get_files_struct (current);
> -			if (files == NULL)
> -			{
> -				printk (KERN_DEBUG "Failed to get files struct\n");
> -				return -EFAULT;
> -			}
> -
> -			rcu_read_lock ();
> -			file = fcheck_files (files, eventfd_copy.source_fd);
> -			if (file)
> -			{
> -				if (file->f_mode & FMODE_PATH
> -						|| !atomic_long_inc_not_zero (&file->f_count))
> -					file = NULL;
> -			}
> -			rcu_read_unlock ();
> -			put_files_struct (files);
> -
> -			if (file == NULL)
> -			{
> -				printk (KERN_DEBUG "Failed to get file from source pid\n");
> -				return 0;
> -			}
> -
> -			/*
> -			 * Release the existing eventfd in the source process
> -			 */
> -			spin_lock (&files->file_lock);
> -			filp_close (file, files);
> -			fdt = files_fdtable (files);
> -			fdt->fd[eventfd_copy.source_fd] = NULL;
> -			spin_unlock (&files->file_lock);
> -
> -			/*
> -			 * Find the file struct associated with the target fd.
> -			 */
> -
> -			files = get_files_struct (task_target);
> -			if (files == NULL)
> -			{
> -				printk (KERN_DEBUG "Failed to get files struct\n");
> -				return -EFAULT;
> -			}
> -
> -			rcu_read_lock ();
> -			file = fcheck_files (files, eventfd_copy.target_fd);
> -			if (file)
> -			{
> -				if (file->f_mode & FMODE_PATH
> -						|| !atomic_long_inc_not_zero (&file->f_count))
> +	switch (ioctl) {
> +	case EVENTFD_COPY:
> +		if (copy_from_user(&eventfd_copy, argp,
> +			sizeof(struct eventfd_copy)))
> +			return -EFAULT;
> +
> +		/*
> +		 * Find the task struct for the target pid
> +		 */
> +		task_target =
> +			pid_task(find_vpid(eventfd_copy.target_pid), PIDTYPE_PID);
> +		if (task_target == NULL) {
> +			printk(KERN_DEBUG "Failed to get mem ctx for target pid\n");
> +			return -EFAULT;
> +		}
> +
> +		files = get_files_struct(current);
> +		if (files == NULL) {
> +			printk(KERN_DEBUG "Failed to get files struct\n");
> +			return -EFAULT;
> +		}
> +
> +		rcu_read_lock();
> +		file = fcheck_files(files, eventfd_copy.source_fd);
> +		if (file) {
> +			if (file->f_mode & FMODE_PATH ||
> +				!atomic_long_inc_not_zero(&file->f_count))
> +				file = NULL;
> +		}
> +		rcu_read_unlock();
> +		put_files_struct(files);
> +
> +		if (file == NULL) {
> +			printk(KERN_DEBUG "Failed to get file from source pid\n");
> +			return 0;
> +		}
> +
> +		/*
> +		 * Release the existing eventfd in the source process
> +		 */
> +		spin_lock(&files->file_lock);
> +		filp_close(file, files);
> +		fdt = files_fdtable(files);
> +		fdt->fd[eventfd_copy.source_fd] = NULL;
> +		spin_unlock(&files->file_lock);
> +
> +		/*
> +		 * Find the file struct associated with the target fd.
> +		 */
> +
> +		files = get_files_struct(task_target);
> +		if (files == NULL) {
> +			printk(KERN_DEBUG "Failed to get files struct\n");
> +			return -EFAULT;
> +		}
> +
> +		rcu_read_lock();
> +		file = fcheck_files(files, eventfd_copy.target_fd);
> +		if (file) {
> +			if (file->f_mode & FMODE_PATH ||
> +				!atomic_long_inc_not_zero(&file->f_count))
>  					file = NULL;
> -			}
> -			rcu_read_unlock ();
> -			put_files_struct (files);
> -
> -			if (file == NULL)
> -			{
> -				printk (KERN_DEBUG "Failed to get file from target pid\n");
> -				return 0;
> -			}
> +		}
> +		rcu_read_unlock();
> +		put_files_struct(files);
>  
> +		if (file == NULL) {
> +			printk(KERN_DEBUG "Failed to get file from target pid\n");
> +			return 0;
> +		}
>  
> -			/*
> -			 * Install the file struct from the target process into the
> -			 * file desciptor of the source process,
> -			 */
> +		/*
> +		 * Install the file struct from the target process into the
> +		 * file desciptor of the source process,
> +		 */
>  
> -			fd_install (eventfd_copy.source_fd, file);
> +		fd_install(eventfd_copy.source_fd, file);
>  
> -			return 0;
> +		return 0;
>  
> -		default:
> -			return -ENOIOCTLCMD;
> +	default:
> +		return -ENOIOCTLCMD;
>  	}
>  }
>  
> @@ -183,23 +173,23 @@ static struct miscdevice eventfd_link_misc = {
>  };
>  
>  static int __init
> -eventfd_link_init (void)
> +eventfd_link_init(void)
>  {
> -	return misc_register (&eventfd_link_misc);
> +	return misc_register(&eventfd_link_misc);
>  }
>  
> -module_init (eventfd_link_init);
> +module_init(eventfd_link_init);
>  
>  static void __exit
> -eventfd_link_exit (void)
> +eventfd_link_exit(void)
>  {
> -	misc_deregister (&eventfd_link_misc);
> +	misc_deregister(&eventfd_link_misc);
>  }
>  
> -module_exit (eventfd_link_exit);
> +module_exit(eventfd_link_exit);
>  
> -MODULE_VERSION ("0.0.1");
> -MODULE_LICENSE ("GPL v2");
> -MODULE_AUTHOR ("Anthony Fee");
> -MODULE_DESCRIPTION ("Link eventfd");
> -MODULE_ALIAS ("devname:eventfd-link");
> +MODULE_VERSION("0.0.1");
> +MODULE_LICENSE("GPL v2");
> +MODULE_AUTHOR("Anthony Fee");
> +MODULE_DESCRIPTION("Link eventfd");
> +MODULE_ALIAS("devname:eventfd-link");
> diff --git a/lib/librte_vhost/eventfd_link/eventfd_link.h b/lib/librte_vhost/eventfd_link/eventfd_link.h
> index a32a8dd..ea619ec 100644
> --- a/lib/librte_vhost/eventfd_link/eventfd_link.h
> +++ b/lib/librte_vhost/eventfd_link/eventfd_link.h
> @@ -1,79 +1,76 @@
>  /*-
> - *  * This file is provided under a dual BSD/GPLv2 license.  When using or
> - *  *   redistributing this file, you may do so under either license.
> - *  *
> - *  *   GPL LICENSE SUMMARY
> - *  *
> - *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> - *  *
> - *  *   This program is free software; you can redistribute it and/or modify
> - *  *   it under the terms of version 2 of the GNU General Public License as
> - *  *   published by the Free Software Foundation.
> - *  *
> - *  *   This program is distributed in the hope that it will be useful, but
> - *  *   WITHOUT ANY WARRANTY; without even the implied warranty of
> - *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> - *  *   General Public License for more details.
> - *  *
> - *  *   You should have received a copy of the GNU General Public License
> - *  *   along with this program; if not, write to the Free Software
> - *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> - *  *   The full GNU General Public License is included in this distribution
> - *  *   in the file called LICENSE.GPL.
> - *  *
> - *  *   Contact Information:
> - *  *   Intel Corporation
> - *  *
> - *  *   BSD LICENSE
> - *  *
> - *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> - *  *   All rights reserved.
> - *  *
> - *  *   Redistribution and use in source and binary forms, with or without
> - *  *   modification, are permitted provided that the following conditions
> - *  *   are met:
> - *  *
> - *  *     * Redistributions of source code must retain the above copyright
> - *  *       notice, this list of conditions and the following disclaimer.
> - *  *     * Redistributions in binary form must reproduce the above copyright
> - *  *       notice, this list of conditions and the following disclaimer in
> - *  *       the documentation and/or other materials provided with the
> - *  *       distribution.
> - *  *     * Neither the name of Intel Corporation nor the names of its
> - *  *       contributors may be used to endorse or promote products derived
> - *  *       from this software without specific prior written permission.
> - *  *
> - *  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> - *  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> - *  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> - *  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> - *  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> - *  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> - *  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> - *  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> - *  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> - *  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> - *  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> - *  *
> - *   */
> + *  This file is provided under a dual BSD/GPLv2 license.  When using or
> + *  redistributing this file, you may do so under either license.
> + *
> + * GPL LICENSE SUMMARY
> + *
> + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> + *
> + *   This program is free software; you can redistribute it and/or modify
> + *   it under the terms of version 2 of the GNU General Public License as
> + *   published by the Free Software Foundation.
> + *
> + *   This program is distributed in the hope that it will be useful, but
> + *   WITHOUT ANY WARRANTY; without even the implied warranty of
> + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + *   General Public License for more details.
> + *
> + *   You should have received a copy of the GNU General Public License
> + *   along with this program; if not, write to the Free Software
> + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> + *   The full GNU General Public License is included in this distribution
> + *   in the file called LICENSE.GPL.
> + *
> + *   Contact Information:
> + *   Intel Corporation
> + *
> + * BSD LICENSE
> + *
> + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> + *   All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *   Redistributions of source code must retain the above copyright
> + *   notice, this list of conditions and the following disclaimer.
> + *   Redistributions in binary form must reproduce the above copyright
> + *   notice, this list of conditions and the following disclaimer in
> + *   the documentation and/or other materials provided with the
> + *   distribution.
> + *   Neither the name of Intel Corporation nor the names of its
> + *   contributors may be used to endorse or promote products derived
> + *   from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + *
> + */
>  
>  #ifndef _EVENTFD_LINK_H_
>  #define _EVENTFD_LINK_H_
>  
>  /*
> - *	ioctl to copy an fd entry in calling process to an fd in a target process
> + * ioctl to copy an fd entry in calling process to an fd in a target process
>   */
>  #define EVENTFD_COPY 1
>  
>  /*
> - *	arguements for the EVENTFD_COPY ioctl
> + * arguements for the EVENTFD_COPY ioctl
>   */
>  struct eventfd_copy {
> -	// fd in the target pid
> -    unsigned target_fd;
> -	// fd in the calling pid
> -    unsigned source_fd;
> -	// pid of the target pid
> -    pid_t target_pid;
> +	unsigned target_fd; /* fd in the target pid */
> +	unsigned source_fd; /* fd in the calling pid */
> +	pid_t target_pid; /* pid of the target pid */
>  };
>  #endif /* _EVENTFD_LINK_H_ */
> diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
> index b6548a1..00b1328 100644
> --- a/lib/librte_vhost/rte_virtio_net.h
> +++ b/lib/librte_vhost/rte_virtio_net.h
> @@ -90,8 +90,7 @@ struct vhost_virtqueue {
>  /**
>   * Device structure contains all configuration information relating to the device.
>   */
> -struct virtio_net
> -{
> +struct virtio_net {
>  	struct vhost_virtqueue	*virtqueue[VIRTIO_QNUM];	/**< Contains all virtqueue information. */
>  	struct virtio_memory	*mem;		/**< QEMU memory and memory region information. */
>  	uint64_t		features;	/**< Negotiated feature set. */
> diff --git a/lib/librte_vhost/vhost-net-cdev.c b/lib/librte_vhost/vhost-net-cdev.c
> index 91ff0d8..57c76cb 100644
> --- a/lib/librte_vhost/vhost-net-cdev.c
> +++ b/lib/librte_vhost/vhost-net-cdev.c
> @@ -46,21 +46,21 @@
>  
>  #include "vhost-net-cdev.h"
>  
> -#define FUSE_OPT_DUMMY		"\0\0"
> -#define FUSE_OPT_FORE		"-f\0\0"
> -#define FUSE_OPT_NOMULTI	"-s\0\0"
> +#define FUSE_OPT_DUMMY "\0\0"
> +#define FUSE_OPT_FORE  "-f\0\0"
> +#define FUSE_OPT_NOMULTI "-s\0\0"
>  
> -static const uint32_t	default_major = 231;
> -static const uint32_t	default_minor = 1;
> -static const char	cuse_device_name[]	= "/dev/cuse";
> -static const char	default_cdev[] = "vhost-net";
> +static const uint32_t default_major = 231;
> +static const uint32_t default_minor = 1;
> +static const char cuse_device_name[] = "/dev/cuse";
> +static const char default_cdev[] = "vhost-net";
>  
> -static struct fuse_session			*session;
> -static struct vhost_net_device_ops	const *ops;
> +static struct fuse_session *session;
> +static struct vhost_net_device_ops const *ops;
>  
>  /*
> - * Returns vhost_device_ctx from given fuse_req_t. The index is populated later when
> - * the device is added to the device linked list.
> + * Returns vhost_device_ctx from given fuse_req_t. The index is populated later
> + * when the device is added to the device linked list.
>   */
>  static struct vhost_device_ctx
>  fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
> @@ -75,7 +75,8 @@ fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
>  }
>  
>  /*
> - * When the device is created in QEMU it gets initialised here and added to the device linked list.
> + * When the device is created in QEMU it gets initialised here and
> + * added to the device linked list.
>   */
>  static void
>  vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
> @@ -91,7 +92,8 @@ vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
>  
>  	fi->fh = err;
>  
> -	RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device configuration started\n", fi->fh);
> +	RTE_LOG(INFO, VHOST_CONFIG,
> +		"(%"PRIu64") Device configuration started\n", fi->fh);
>  	fuse_reply_open(req, fi);
>  }
>  
> @@ -113,8 +115,8 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
>   * Boilerplate code for CUSE IOCTL
>   * Implicit arguments: ctx, req, result.
>   */
> -#define VHOST_IOCTL(func) do {			\
> -	result = (func)(ctx);			\
> +#define VHOST_IOCTL(func) do {	\
> +	result = (func)(ctx);	\
>  	fuse_reply_ioctl(req, result, NULL, 0);	\
>  } while (0)
>  
> @@ -122,57 +124,58 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
>   * Boilerplate IOCTL RETRY
>   * Implicit arguments: req.
>   */
> -#define VHOST_IOCTL_RETRY(size_r, size_w) do {		\
> -	struct iovec iov_r = { arg, (size_r) };		\
> -	struct iovec iov_w = { arg, (size_w) };		\
> -	fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0);	\
> +#define VHOST_IOCTL_RETRY(size_r, size_w) do {	\
> +	struct iovec iov_r = { arg, (size_r) };	\
> +	struct iovec iov_w = { arg, (size_w) };	\
> +	fuse_reply_ioctl_retry(req, &iov_r,	\
> +		(size_r) ? 1 : 0, &iov_w, (size_w) ? 1 : 0);\
>  } while (0)
>  
>  /*
>   * Boilerplate code for CUSE Read IOCTL
>   * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
>   */
> -#define VHOST_IOCTL_R(type, var, func) do {		\
> -	if (!in_bufsz) {				\
> -		VHOST_IOCTL_RETRY(sizeof(type), 0);	\
> -	} else {					\
> -		(var) = *(const type*) in_buf;		\
> -		result = func(ctx, &(var));		\
> -		fuse_reply_ioctl(req, result, NULL, 0);	\
> -	}						\
> +#define VHOST_IOCTL_R(type, var, func) do {	\
> +	if (!in_bufsz) {	\
> +		VHOST_IOCTL_RETRY(sizeof(type), 0);\
> +	} else {	\
> +		(var) = *(const type*)in_buf;	\
> +		result = func(ctx, &(var));	\
> +		fuse_reply_ioctl(req, result, NULL, 0);\
> +	}	\
>  } while (0)
>  
>  /*
> - *	Boilerplate code for CUSE Write IOCTL
> + * Boilerplate code for CUSE Write IOCTL
>   * Implicit arguments: ctx, req, result, out_bufsz.
>   */
> -#define	VHOST_IOCTL_W(type, var, func) do {		\
> -	if (!out_bufsz) {				\
> -		VHOST_IOCTL_RETRY(0, sizeof(type));	\
> -	} else {					\
> -		result = (func)(ctx, &(var));		\
> -		fuse_reply_ioctl(req, result, &(var), sizeof(type));	\
> -	}								\
> +#define VHOST_IOCTL_W(type, var, func) do {	\
> +	if (!out_bufsz) {	\
> +		VHOST_IOCTL_RETRY(0, sizeof(type));\
> +	} else {	\
> +		result = (func)(ctx, &(var));\
> +		fuse_reply_ioctl(req, result, &(var), sizeof(type));\
> +	} \
>  } while (0)
>  
>  /*
>   * Boilerplate code for CUSE Read/Write IOCTL
>   * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
>   */
> -#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {		\
> -	if (!in_bufsz) {						\
> -		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));	\
> -	} else {							\
> -		(var1) = *(const type1*) (in_buf);			\
> -		result = (func)(ctx, (var1), &(var2));			\
> -		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));	\
> -	}								\
> +#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {	\
> +	if (!in_bufsz) {	\
> +		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));\
> +	} else {	\
> +		(var1) = *(const type1*) (in_buf);	\
> +		result = (func)(ctx, (var1), &(var2));	\
> +		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));\
> +	}	\
>  } while (0)
>  
>  /*
> - * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on
> - * the type of IOCTL a buffer is requested to read or to write. This
> - * request is handled by FUSE and the buffer is then given to CUSE.
> + * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on the type
> + * of IOCTL a buffer is requested to read or to write. This request is handled
> + * by FUSE and the buffer is then given to CUSE.
>   */
>  static void
>  vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
> @@ -189,33 +192,39 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
>  
>  	switch (cmd) {
>  	case VHOST_NET_SET_BACKEND:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
>  		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
>  		break;
>  
>  	case VHOST_GET_FEATURES:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
>  		VHOST_IOCTL_W(uint64_t, features, ops->get_features);
>  		break;
>  
>  	case VHOST_SET_FEATURES:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
>  		VHOST_IOCTL_R(uint64_t, features, ops->set_features);
>  		break;
>  
>  	case VHOST_RESET_OWNER:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
>  		VHOST_IOCTL(ops->reset_owner);
>  		break;
>  
>  	case VHOST_SET_OWNER:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
>  		VHOST_IOCTL(ops->set_owner);
>  		break;
>  
>  	case VHOST_SET_MEM_TABLE:
>  		/*TODO fix race condition.*/
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
>  		static struct vhost_memory mem_temp;
>  
>  		switch (in_bufsz) {
> @@ -227,7 +236,9 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
>  			mem_temp = *(const struct vhost_memory *) in_buf;
>  
>  			if (mem_temp.nregions > 0) {
> -				VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
> +				VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) +
> +					(sizeof(struct vhost_memory_region) *
> +						mem_temp.nregions), 0);
>  			} else {
>  				result = -1;
>  				fuse_reply_ioctl(req, result, NULL, 0);
> @@ -235,56 +246,70 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
>  			break;
>  
>  		default:
> -			result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
> +			result = ops->set_mem_table(ctx,
> +					in_buf, mem_temp.nregions);
>  			if (result)
>  				fuse_reply_err(req, EINVAL);
>  			else
>  				fuse_reply_ioctl(req, result, NULL, 0);
> -
>  		}
> -
>  		break;
>  
>  	case VHOST_SET_VRING_NUM:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
> -		VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
> +		VHOST_IOCTL_R(struct vhost_vring_state, state,
> +			ops->set_vring_num);
>  		break;
>  
>  	case VHOST_SET_VRING_BASE:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
> -		VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
> +		VHOST_IOCTL_R(struct vhost_vring_state, state,
> +			ops->set_vring_base);
>  		break;
>  
>  	case VHOST_GET_VRING_BASE:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
> -		VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
> +		VHOST_IOCTL_RW(uint32_t, index,
> +			struct vhost_vring_state, state, ops->get_vring_base);
>  		break;
>  
>  	case VHOST_SET_VRING_ADDR:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
> -		VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
> +		VHOST_IOCTL_R(struct vhost_vring_addr, addr,
> +			ops->set_vring_addr);
>  		break;
>  
>  	case VHOST_SET_VRING_KICK:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
> -		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
> +		VHOST_IOCTL_R(struct vhost_vring_file, file,
> +			ops->set_vring_kick);
>  		break;
>  
>  	case VHOST_SET_VRING_CALL:
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
> -		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
> +		VHOST_IOCTL_R(struct vhost_vring_file, file,
> +			ops->set_vring_call);
>  		break;
>  
>  	default:
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
>  		result = -1;
>  		fuse_reply_ioctl(req, result, NULL, 0);
>  	}
>  
>  	if (result < 0)
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
>  	else
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
>  }
>  
>  /*
> @@ -297,8 +322,8 @@ static const struct cuse_lowlevel_ops vhost_net_ops = {
>  };
>  
>  /*
> - * cuse_info is populated and used to register the cuse device. vhost_net_device_ops are
> - * also passed when the device is registered in main.c.
> + * cuse_info is populated and used to register the cuse device.
> + * vhost_net_device_ops are also passed when the device is registered in app.
>   */
>  int
>  rte_vhost_driver_register(const char *dev_name)
> @@ -314,20 +339,23 @@ rte_vhost_driver_register(const char *dev_name)
>  	char *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};
>  
>  	if (access(cuse_device_name, R_OK | W_OK) < 0) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "Character device %s can't be accessed, maybe not exist\n", cuse_device_name);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"char device %s can't be accessed, maybe not exist\n",
> +			cuse_device_name);
>  		return -1;
>  	}
>  
>  	/*
> -	 * The device name is created. This is passed to QEMU so that it can register
> -	 * the device with our application.
> +	 * The device name is created. This is passed to QEMU so that it can
> +	 * register the device with our application.
>  	 */
>  	snprintf(device_name, PATH_MAX, "DEVNAME=%s", dev_name);
>  	snprintf(char_device_name, PATH_MAX, "/dev/%s", dev_name);
>  
>  	/* Check if device already exists. */
>  	if (access(char_device_name, F_OK) != -1) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "Character device %s already exists\n", char_device_name);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"char device %s already exists\n", char_device_name);
>  		return -1;
>  	}
>  
> @@ -341,7 +369,7 @@ rte_vhost_driver_register(const char *dev_name)
>  	ops = get_virtio_net_callbacks();
>  
>  	session = cuse_lowlevel_setup(3, fuse_argv,
> -				&cuse_info, &vhost_net_ops, 0, NULL);
> +			&cuse_info, &vhost_net_ops, 0, NULL);
>  	if (session == NULL)
>  		return -1;
>  
> @@ -349,7 +377,8 @@ rte_vhost_driver_register(const char *dev_name)
>  }
>  
>  /**
> - * The CUSE session is launched allowing the application to receive open, release and ioctl calls.
> + * The CUSE session is launched allowing the application to receive open,
> + * release and ioctl calls.
>   */
>  int
>  rte_vhost_driver_session_start(void)
> diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
> index 84ec0e8..ccfd82f 100644
> --- a/lib/librte_vhost/vhost_rxtx.c
> +++ b/lib/librte_vhost/vhost_rxtx.c
> @@ -45,7 +45,7 @@
>  /**
>   * This function adds buffers to the virtio devices RX virtqueue. Buffers can
>   * be received from the physical port or from another virtio device. A packet
> - * count is returned to indicate the number of packets that were succesfully
> + * count is returned to indicate the number of packets that are succesfully
>   * added to the RX queue. This function works when mergeable is disabled.
>   */
>  static inline uint32_t __attribute__((always_inline))
> @@ -76,7 +76,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
>  	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
>  
>  	/*
> -	 * As many data cores may want access to available buffers, 
> +	 * As many data cores may want access to available buffers,
>  	 * they need to be reserved.
>  	 */
>  	do {
> @@ -143,7 +143,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
>  		}
>  
>  		/* Update used ring with desc information */
> -		vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
> +		vq->used->ring[res_cur_idx & (vq->size - 1)].id =
> +							head[packet_success];
>  		vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
>  
>  		/* Copy mbuf data to buffer */
> @@ -389,10 +390,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
>  }
>  
>  /*
> - * This function adds buffers to the virtio devices RX virtqueue. Buffers can
> - * be received from the physical port or from another virtio device. A packet
> - * count is returned to indicate the number of packets that were succesfully
> - * added to the RX queue. This function works for mergeable RX.
> + * This function works for mergeable RX.
>   */
>  static inline uint32_t __attribute__((always_inline))
>  virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
> @@ -729,5 +727,4 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
>  	if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
>  		eventfd_write((int)vq->kickfd, 1);
>  	return entry_success;
> -
>  }
> diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
> index 8015dd8..c07a11e 100644
> --- a/lib/librte_vhost/virtio-net.c
> +++ b/lib/librte_vhost/virtio-net.c
> @@ -52,27 +52,27 @@
>  #include "vhost-net-cdev.h"
>  #include "eventfd_link/eventfd_link.h"
>  
> -/**
> +/*
>   * Device linked list structure for configuration.
>   */
>  struct virtio_net_config_ll {
> -	struct virtio_net		dev;	/* Virtio device.*/
> -	struct virtio_net_config_ll	*next;	/* Next entry on linked list.*/
> +	struct virtio_net dev;			/* Virtio device.*/
> +	struct virtio_net_config_ll *next;	/* Next dev on linked list.*/
>  };
>  
>  const char eventfd_cdev[] = "/dev/eventfd-link";
>  
> -/* device ops to add/remove device to data core. */
> +/* device ops to add/remove device to/from data core. */
>  static struct virtio_net_device_ops const *notify_ops;
> -/* Root address of the linked list in the configuration core. */
> -static struct virtio_net_config_ll	*ll_root;
> +/* root address of the linked list of managed virtio devices */
> +static struct virtio_net_config_ll *ll_root;
>  
> -/* Features supported by this application. RX merge buffers are enabled by default. */
> +/* Features supported by this lib. */
>  #define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)
>  static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
>  
>  /* Line size for reading maps file. */
> -const uint32_t BUFSIZE = PATH_MAX;
> +static const uint32_t BUFSIZE = PATH_MAX;
>  
>  /* Size of prot char array in procmap. */
>  #define PROT_SZ 5
> @@ -82,19 +82,19 @@ const uint32_t BUFSIZE = PATH_MAX;
>  
>  /* Structure containing information gathered from maps file. */
>  struct procmap {
> -	uint64_t	va_start;	/* Start virtual address in file. */
> -	uint64_t	len;		/* Size of file. */
> -	uint64_t	pgoff;		/* Not used. */
> -	uint32_t	maj;		/* Not used. */
> -	uint32_t	min;		/* Not used. */
> -	uint32_t	ino;		/* Not used. */
> -	char		prot[PROT_SZ];	/* Not used. */
> -	char		fname[PATH_MAX];/* File name. */
> +	uint64_t va_start;	/* Start virtual address in file. */
> +	uint64_t len;		/* Size of file. */
> +	uint64_t pgoff;		/* Not used. */
> +	uint32_t maj;		/* Not used. */
> +	uint32_t min;		/* Not used. */
> +	uint32_t ino;		/* Not used. */
> +	char prot[PROT_SZ];	/* Not used. */
> +	char fname[PATH_MAX];	/* File name. */
>  };
>  
>  /*
> - * Converts QEMU virtual address to Vhost virtual address. This function is used
> - * to convert the ring addresses to our address space.
> + * Converts QEMU virtual address to Vhost virtual address. This function is
> + * used to convert the ring addresses to our address space.
>   */
>  static uint64_t
>  qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
> @@ -107,8 +107,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
>  	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
>  		region = &dev->mem->regions[regionidx];
>  		if ((qemu_va >= region->userspace_address) &&
> -				(qemu_va <= region->userspace_address +
> -				region->memory_size)) {
> +			(qemu_va <= region->userspace_address +
> +			region->memory_size)) {
>  			vhost_va = dev->mem->mapped_address + qemu_va -
>  					dev->mem->base_address;
>  			break;
> @@ -118,7 +118,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
>  }
>  
>  /*
> - * Locate the file containing QEMU's memory space and map it to our address space.
> + * Locate the file containing QEMU's memory space and
> + * map it to our address space.
>   */
>  static int
>  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
> @@ -134,10 +135,10 @@ host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
>  	char procdir[PATH_MAX];
>  	char resolved_path[PATH_MAX];
>  	char *path = NULL;
> -	FILE		*fmap;
> -	void		*map;
> -	uint8_t		found = 0;
> -	char		line[BUFSIZE];
> +	FILE *fmap;
> +	void *map;
> +	uint8_t found = 0;
> +	char line[BUFSIZE];
>  	char dlm[] = "-   :   ";
>  	char *str, *sp, *in[PROCMAP_SZ];
>  	char *end = NULL;
> @@ -159,7 +160,7 @@ host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
>  	while (fgets(line, BUFSIZE, fmap) != 0) {
>  		str = line;
>  		errno = 0;
> -		/* Split line in to fields. */
> +		/* Split line into fields. */
>  		for (i = 0; i < PROCMAP_SZ; i++) {
>  			in[i] = strtok_r(str, &dlm[i], &sp);
>  			if ((in[i] == NULL) || (errno != 0)) {
> @@ -171,37 +172,43 @@ host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
>  
>  		/* Convert/Copy each field as needed. */
>  		procmap.va_start = strtoull(in[0], &end, 16);
> -		if ((in[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
> +		if ((in[0] == '\0') || (end == NULL) || (*end != '\0') ||
> +			(errno != 0)) {
>  			fclose(fmap);
>  			return -1;
>  		}
>  
>  		procmap.len = strtoull(in[1], &end, 16);
> -		if ((in[1] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
> +		if ((in[1] == '\0') || (end == NULL) || (*end != '\0') ||
> +			(errno != 0)) {
>  			fclose(fmap);
>  			return -1;
>  		}
>  
>  		procmap.pgoff = strtoull(in[3], &end, 16);
> -		if ((in[3] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
> +		if ((in[3] == '\0') || (end == NULL) || (*end != '\0') ||
> +			(errno != 0)) {
>  			fclose(fmap);
>  			return -1;
>  		}
>  
>  		procmap.maj = strtoul(in[4], &end, 16);
> -		if ((in[4] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
> +		if ((in[4] == '\0') || (end == NULL) || (*end != '\0') ||
> +			(errno != 0)) {
>  			fclose(fmap);
>  			return -1;
>  		}
>  
>  		procmap.min = strtoul(in[5], &end, 16);
> -		if ((in[5] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
> +		if ((in[5] == '\0') || (end == NULL) || (*end != '\0') ||
> +			(errno != 0)) {
>  			fclose(fmap);
>  			return -1;
>  		}
>  
>  		procmap.ino = strtoul(in[6], &end, 16);
> -		if ((in[6] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
> +		if ((in[6] == '\0') || (end == NULL) || (*end != '\0') ||
> +			(errno != 0)) {
>  			fclose(fmap);
>  			return -1;
>  		}
> @@ -218,16 +225,19 @@ host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
>  	fclose(fmap);
>  
>  	if (!found) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file in pid %d maps file\n", dev->device_fh, pid);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Failed to find memory file in pid %d maps file\n",
> +			dev->device_fh, pid);
>  		return -1;
>  	}
>  
>  	/* Find the guest memory file among the process fds. */
>  	dp = opendir(procdir);
>  	if (dp == NULL) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory\n", dev->device_fh, pid);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Cannot open pid %d process directory\n",
> +			dev->device_fh, pid);
>  		return -1;
> -
>  	}
>  
>  	found = 0;
> @@ -254,23 +264,29 @@ host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
>  	closedir(dp);
>  
>  	if (found == 0) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file for pid %d\n", dev->device_fh, pid);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Failed to find memory file for pid %d\n",
> +			dev->device_fh, pid);
>  		return -1;
>  	}
>  	/* Open the shared memory file and map the memory into this process. */
>  	fd = open(memfile, O_RDWR);
>  
>  	if (fd == -1) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open %s for pid %d\n", dev->device_fh, memfile, pid);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Failed to open %s for pid %d\n",
> +			dev->device_fh, memfile, pid);
>  		return -1;
>  	}
>  
> -	map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE ,
> -			MAP_POPULATE|MAP_SHARED, fd, 0);
> +	map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE,
> +		MAP_POPULATE|MAP_SHARED, fd, 0);
>  	close(fd);
>  
>  	if (map == MAP_FAILED) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n",  dev->device_fh, memfile, pid);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Error mapping the file %s for pid %d\n",
> +			dev->device_fh, memfile, pid);
>  		return -1;
>  	}
>  
> @@ -278,8 +294,11 @@ host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
>  	mem->mapped_address = (uint64_t)(uintptr_t)map;
>  	mem->mapped_size = procmap.len;
>  
> -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n", dev->device_fh,
> -		memfile, resolved_path, (long long unsigned)mem->mapped_size, map);
> +	LOG_DEBUG(VHOST_CONFIG,
> +		"(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n",
> +		dev->device_fh,
> +		memfile, resolved_path,
> +		(long long unsigned)mem->mapped_size, map);
>  
>  	return 0;
>  }
> @@ -303,7 +322,8 @@ get_config_ll_entry(struct vhost_device_ctx ctx)
>  }
>  
>  /*
> - * Searches the configuration core linked list and retrieves the device if it exists.
> + * Searches the configuration core linked list and
> + * retrieves the device if it exists.
>   */
>  static struct virtio_net *
>  get_device(struct vhost_device_ctx ctx)
> @@ -312,11 +332,11 @@ get_device(struct vhost_device_ctx ctx)
>  
>  	ll_dev = get_config_ll_entry(ctx);
>  
> -	/* If a matching entry is found in the linked list, return the device in that entry. */
>  	if (ll_dev)
>  		return &ll_dev->dev;
>  
> -	RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
> +	RTE_LOG(ERR, VHOST_CONFIG,
> +		"(%"PRIu64") Device not found in linked list.\n", ctx.fh);
>  	return NULL;
>  }
>  
> @@ -331,13 +351,18 @@ add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
>  	/* If ll_dev == NULL then this is the first device so go to else */
>  	if (ll_dev) {
>  		/* If the 1st device_fh != 0 then we insert our device here. */
> -		if (ll_dev->dev.device_fh != 0)	{
> +		if (ll_dev->dev.device_fh != 0) {
>  			new_ll_dev->dev.device_fh = 0;
>  			new_ll_dev->next = ll_dev;
>  			ll_root = new_ll_dev;
>  		} else {
> -			/* Increment through the ll until we find un unused device_fh. Insert the device at that entry*/
> -			while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))
> +			/*
> +			 * Increment through the ll until we find un unused
> +			 * device_fh. Insert the device at that entry.
> +			 */
> +			while ((ll_dev->next != NULL) &&
> +				(ll_dev->dev.device_fh ==
> +					(ll_dev->next->dev.device_fh - 1)))
>  				ll_dev = ll_dev->next;
>  
>  			new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
> @@ -352,7 +377,8 @@ add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
>  }
>  
>  /*
> - * Unmap any memory, close any file descriptors and free any memory owned by a device.
> + * Unmap any memory, close any file descriptors and
> + * free any memory owned by a device.
>   */
>  static void
>  cleanup_device(struct virtio_net *dev)
> @@ -386,6 +412,7 @@ free_device(struct virtio_net_config_ll *ll_dev)
>  	free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
>  	free(ll_dev);
>  }
> +
>  /*
>   * Remove an entry from the device configuration linked list.
>   */
> @@ -423,7 +450,10 @@ init_device(struct virtio_net *dev)
>  {
>  	uint64_t vq_offset;
>  
> -	/* Virtqueues have already been malloced so we don't want to set them to NULL. */
> +	/*
> +	 * Virtqueues have already been malloced so
> +	 * we don't want to set them to NULL.
> +	 */
>  	vq_offset = offsetof(struct virtio_net, mem);
>  
>  	/* Set everything to 0. */
> @@ -491,8 +521,8 @@ new_device(struct vhost_device_ctx ctx)
>  }
>  
>  /*
> - * Function is called from the CUSE release function. This function will cleanup
> - * the device and remove it from device configuration linked list.
> + * Function is called from the CUSE release function. This function will
> + * cleanup the device and remove it from device configuration linked list.
>   */
>  static void
>  destroy_device(struct vhost_device_ctx ctx)
> @@ -503,15 +533,19 @@ destroy_device(struct vhost_device_ctx ctx)
>  	/* Find the linked list entry for the device to be removed. */
>  	ll_dev_cur_ctx = get_config_ll_entry(ctx);
>  	while (ll_dev_cur != NULL) {
> -		/* If the device is found or a device that doesn't exist is found then it is removed. */
> +		/*
> +		 * If the device is found or
> +		 * a device that doesn't exist is found then it is removed.
> +		 */
>  		if (ll_dev_cur == ll_dev_cur_ctx) {
>  			/*
> -			 * If the device is running on a data core then call the function to remove it from
> -			 * the data core.
> +			 * If the device is running on a data core then call
> +			 * the function to remove it from the data core.
>  			 */
>  			if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
>  				notify_ops->destroy_device(&(ll_dev_cur->dev));
> -			ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
> +			ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
> +					ll_dev_last);
>  		} else {
>  			ll_dev_last = ll_dev_cur;
>  			ll_dev_cur = ll_dev_cur->next;
> @@ -521,7 +555,8 @@ destroy_device(struct vhost_device_ctx ctx)
>  
>  /*
>   * Called from CUSE IOCTL: VHOST_SET_OWNER
> - * This function just returns success at the moment unless the device hasn't been initialised.
> + * This function just returns success at the moment unless
> + * the device hasn't been initialised.
>   */
>  static int
>  set_owner(struct vhost_device_ctx ctx)
> @@ -571,7 +606,7 @@ get_features(struct vhost_device_ctx ctx, uint64_t *pu)
>  
>  /*
>   * Called from CUSE IOCTL: VHOST_SET_FEATURES
> - * We receive the negotiated set of features supported by us and the virtio device.
> + * We receive the negotiated features supported by us and the virtio device.
>   */
>  static int
>  set_features(struct vhost_device_ctx ctx, uint64_t *pu)
> @@ -589,13 +624,17 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
>  
>  	/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
>  	if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") Mergeable RX buffers enabled\n",
> +			dev->device_fh);
>  		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
>  			sizeof(struct virtio_net_hdr_mrg_rxbuf);
>  		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
>  			sizeof(struct virtio_net_hdr_mrg_rxbuf);
>  	} else {
> -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
> +		LOG_DEBUG(VHOST_CONFIG,
> +			"(%"PRIu64") Mergeable RX buffers disabled\n",
> +			dev->device_fh);
>  		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
>  			sizeof(struct virtio_net_hdr);
>  		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
> @@ -607,8 +646,8 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
>  
>  /*
>   * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
> - * This function creates and populates the memory structure for the device. This includes
> - * storing offsets used to translate buffer addresses.
> + * This function creates and populates the memory structure for the device.
> + * This includes storing offsets used to translate buffer addresses.
>   */
>  static int
>  set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
> @@ -634,7 +673,9 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
>  	mem = calloc(1, sizeof(struct virtio_memory) +
>  		(sizeof(struct virtio_memory_regions) * nregions));
>  	if (mem == NULL) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Failed to allocate memory for dev->mem.\n",
> +			dev->device_fh);
>  		return -1;
>  	}
>  
> @@ -656,15 +697,18 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
>  			mem_regions[regionidx].userspace_addr;
>  
>  		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
> -				regionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
> -				(void *)(uintptr_t)mem->regions[regionidx].userspace_address,
> -				mem->regions[regionidx].memory_size);
> +			regionidx,
> +			(void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
> +			(void *)(uintptr_t)mem->regions[regionidx].userspace_address,
> +			mem->regions[regionidx].memory_size);
>  
>  		/*set the base address mapping*/
>  		if (mem->regions[regionidx].guest_phys_address == 0x0) {
> -			mem->base_address = mem->regions[regionidx].userspace_address;
> +			mem->base_address =
> +				mem->regions[regionidx].userspace_address;
>  			/* Map VM memory file */
> -			if (host_memory_map(dev, mem, ctx.pid, mem->base_address) != 0) {
> +			if (host_memory_map(dev, mem, ctx.pid,
> +				mem->base_address) != 0) {
>  				free(mem);
>  				return -1;
>  			}
> @@ -678,27 +722,42 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
>  		return -1;
>  	}
>  
> -	/* Check if all of our regions have valid mappings. Usually one does not exist in the QEMU memory file. */
> +	/*
> +	 * Check if all of our regions have valid mappings.
> +	 * Usually one does not exist in the QEMU memory file.
> +	 */
>  	valid_regions = mem->nregions;
>  	for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
> -		if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
> -			(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size)))
> +		if ((mem->regions[regionidx].userspace_address <
> +			mem->base_address) ||
> +			(mem->regions[regionidx].userspace_address >
> +			(mem->base_address + mem->mapped_size)))
>  				valid_regions--;
>  	}
>  
> -	/* If a region does not have a valid mapping we rebuild our memory struct to contain only valid entries. */
> +	/*
> +	 * If a region does not have a valid mapping,
> +	 * we rebuild our memory struct to contain only valid entries.
> +	 */
>  	if (valid_regions != mem->nregions) {
>  		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
>  			dev->device_fh);
>  
> -		/* Re-populate the memory structure with only valid regions. Invalid regions are over-written with memmove. */
> +		/*
> +		 * Re-populate the memory structure with only valid regions.
> +		 * Invalid regions are over-written with memmove.
> +		 */
>  		valid_regions = 0;
>  
>  		for (regionidx = mem->nregions; 0 != regionidx--;) {
> -			if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
> -					(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size))) {
> -				memmove(&mem->regions[regionidx], &mem->regions[regionidx + 1],
> -					sizeof(struct virtio_memory_regions) * valid_regions);
> +			if ((mem->regions[regionidx].userspace_address <
> +				mem->base_address) ||
> +				(mem->regions[regionidx].userspace_address >
> +				(mem->base_address + mem->mapped_size))) {
> +				memmove(&mem->regions[regionidx],
> +					&mem->regions[regionidx + 1],
> +					sizeof(struct virtio_memory_regions) *
> +						valid_regions);
>  			} else {
>  				valid_regions++;
>  			}
> @@ -708,12 +767,16 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
>  	dev->mem = mem;
>  
>  	/*
> -	 * Calculate the address offset for each region. This offset is used to identify the vhost virtual address
> +	 * Calculate the address offset for each region.
> +	 * This offset is used to identify the vhost virtual address
>  	 * corresponding to a QEMU guest physical address.
>  	 */
>  	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
> -		dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address
> -			+ dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;
> +		dev->mem->regions[regionidx].address_offset =
> +			dev->mem->regions[regionidx].userspace_address -
> +				dev->mem->base_address +
> +				dev->mem->mapped_address -
> +				dev->mem->regions[regionidx].guest_phys_address;
>  
>  	}
>  	return 0;
> @@ -732,7 +795,7 @@ set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
>  	if (dev == NULL)
>  		return -1;
>  
> -	/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
> +	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
>  	dev->virtqueue[state->index]->size = state->num;
>  
>  	return 0;
> @@ -740,8 +803,8 @@ set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
>  
>  /*
>   * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
> - * The virtio device sends us the desc, used and avail ring addresses. This function
> - * then converts these to our address space.
> + * The virtio device sends us the desc, used and avail ring addresses.
> + * This function then converts these to our address space.
>   */
>  static int
>  set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
> @@ -753,31 +816,43 @@ set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
>  	if (dev == NULL)
>  		return -1;
>  
> -	/* addr->index refers to the queue index. The TX queue is 1, RX queue is 0. */
> +	/* addr->index refers to the queue index. The txq 1, rxq is 0. */
>  	vq = dev->virtqueue[addr->index];
>  
>  	/* The addresses are converted from QEMU virtual to Vhost virtual. */
> -	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
> +	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
> +			addr->desc_user_addr);
>  	if (vq->desc == 0) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Failed to find desc ring address.\n",
> +			dev->device_fh);
>  		return -1;
>  	}
>  
> -	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
> +	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
> +			addr->avail_user_addr);
>  	if (vq->avail == 0) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Failed to find avail ring address.\n",
> +			dev->device_fh);
>  		return -1;
>  	}
>  
> -	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
> +	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
> +			addr->used_user_addr);
>  	if (vq->used == 0) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") Failed to find used ring address.\n",
> +			dev->device_fh);
>  		return -1;
>  	}
>  
> -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n", dev->device_fh, vq->desc);
> -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n", dev->device_fh, vq->avail);
> -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n", dev->device_fh, vq->used);
> +	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
> +			dev->device_fh, vq->desc);
> +	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
> +			dev->device_fh, vq->avail);
> +	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
> +			dev->device_fh, vq->used);
>  
>  	return 0;
>  }
> @@ -795,7 +870,7 @@ set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
>  	if (dev == NULL)
>  		return -1;
>  
> -	/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
> +	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
>  	dev->virtqueue[state->index]->last_used_idx = state->num;
>  	dev->virtqueue[state->index]->last_used_idx_res = state->num;
>  
> @@ -817,15 +892,15 @@ get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
>  		return -1;
>  
>  	state->index = index;
> -	/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
> +	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
>  	state->num = dev->virtqueue[state->index]->last_used_idx;
>  
>  	return 0;
>  }
>  
>  /*
> - * This function uses the eventfd_link kernel module to copy an eventfd file descriptor
> - * provided by QEMU in to our process space.
> + * This function uses the eventfd_link kernel module to copy an eventfd file
> + * descriptor provided by QEMU in to our process space.
>   */
>  static int
>  eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
> @@ -835,7 +910,9 @@ eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
>  	/* Open the character device to the kernel module. */
>  	eventfd_link = open(eventfd_cdev, O_RDWR);
>  	if (eventfd_link < 0) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") eventfd_link module is not loaded\n",  dev->device_fh);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") eventfd_link module is not loaded\n",
> +			dev->device_fh);
>  		return -1;
>  	}
>  
> @@ -844,18 +921,19 @@ eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
>  	close(eventfd_link);
>  
>  	if (ret < 0) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") EVENTFD_COPY ioctl failed\n",  dev->device_fh);
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"(%"PRIu64") EVENTFD_COPY ioctl failed\n",
> +			dev->device_fh);
>  		return -1;
>  	}
>  
> -
>  	return 0;
>  }
>  
>  /*
>   * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
> - * The virtio device sends an eventfd to interrupt the guest. This fd gets copied in
> - * to our process space.
> + * The virtio device sends an eventfd to interrupt the guest. This fd gets
> + * copied into our process space.
>   */
>  static int
>  set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
> @@ -868,7 +946,7 @@ set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
>  	if (dev == NULL)
>  		return -1;
>  
> -	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
> +	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
>  	vq = dev->virtqueue[file->index];
>  
>  	if (vq->kickfd)
> @@ -888,8 +966,8 @@ set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
>  
>  /*
>   * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
> - * The virtio device sends an eventfd that it can use to notify us. This fd gets copied in
> - * to our process space.
> + * The virtio device sends an eventfd that it can use to notify us.
> + * This fd gets copied into our process space.
>   */
>  static int
>  set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
> @@ -902,7 +980,7 @@ set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
>  	if (dev == NULL)
>  		return -1;
>  
> -	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
> +	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
>  	vq = dev->virtqueue[file->index];
>  
>  	if (vq->callfd)
> @@ -922,10 +1000,12 @@ set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
>  
>  /*
>   * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
> - * To complete device initialisation when the virtio driver is loaded we are provided with a
> - * valid fd for a tap device (not used by us). If this happens then we can add the device to a
> - * data core. When the virtio driver is removed we get fd=-1. At that point we remove the device
> - * from the data core. The device will still exist in the device configuration linked list.
> + * To complete device initialisation when the virtio driver is loaded,
> + * we are provided with a valid fd for a tap device (not used by us).
> + * If this happens then we can add the device to a data core.
> + * When the virtio driver is removed we get fd=-1.
> + * At that point we remove the device from the data core.
> + * The device will still exist in the device configuration linked list.
>   */
>  static int
>  set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
> @@ -936,10 +1016,13 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
>  	if (dev == NULL)
>  		return -1;
>  
> -	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
> +	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
>  	dev->virtqueue[file->index]->backend = file->fd;
>  
> -	/* If the device isn't already running and both backend fds are set we add the device. */
> +	/*
> +	 * If the device isn't already running and both backend fds are set,
> +	 * we add the device.
> +	 */
>  	if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
>  		if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
>  			((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED))
> @@ -952,8 +1035,8 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
>  }
>  
>  /*
> - * Function pointers are set for the device operations to allow CUSE to call functions
> - * when an IOCTL, device_add or device_release is received.
> + * Function pointers are set for the device operations to allow CUSE to call
> + * functions when an IOCTL, device_add or device_release is received.
>   */
>  static const struct vhost_net_device_ops vhost_device_ops = {
>  	.new_device = new_device,
> @@ -991,11 +1074,13 @@ int rte_vhost_enable_guest_notification(struct virtio_net *dev,
>  	uint16_t queue_id, int enable)
>  {
>  	if (enable) {
> -		RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n");
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"guest notification isn't supported.\n");
>  		return -1;
>  	}
>  
> -	dev->virtqueue[queue_id]->used->flags = enable ? 0 : VRING_USED_F_NO_NOTIFY;
> +	dev->virtqueue[queue_id]->used->flags =
> +		enable ? 0 : VRING_USED_F_NO_NOTIFY;
>  	return 0;
>  }
>  
>
  
Huawei Xie Nov. 5, 2014, 5:07 p.m. UTC | #2
:(.
Resent done. Please drop this patch.

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Wednesday, November 05, 2014 2:10 AM
> To: Xie, Huawei
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] lib/librte_vhost: code style fixes
> 
> Hi Huawei,
> 
> Please set a Signed-off in your patch.
> 
> 2014-11-05 12:42, Huawei Xie:
> > This patch fixes code style issues and refines some comments in vhost library.
> >
> >
> > ---
> >  lib/librte_vhost/eventfd_link/eventfd_link.c | 244 ++++++++++-----------
> >  lib/librte_vhost/eventfd_link/eventfd_link.h | 127 ++++++-----
> >  lib/librte_vhost/rte_virtio_net.h            |   3 +-
> >  lib/librte_vhost/vhost-net-cdev.c            | 187 +++++++++-------
> >  lib/librte_vhost/vhost_rxtx.c                |  13 +-
> >  lib/librte_vhost/virtio-net.c                | 317 +++++++++++++++++----------
> >  6 files changed, 494 insertions(+), 397 deletions(-)
> >
> > diff --git a/lib/librte_vhost/eventfd_link/eventfd_link.c
> b/lib/librte_vhost/eventfd_link/eventfd_link.c
> > index fc0653a..542ec2c 100644
> > --- a/lib/librte_vhost/eventfd_link/eventfd_link.c
> > +++ b/lib/librte_vhost/eventfd_link/eventfd_link.c
> > @@ -1,26 +1,26 @@
> >  /*-
> > - *  * GPL LICENSE SUMMARY
> > - *  *
> > - *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> > - *  *
> > - *  *   This program is free software; you can redistribute it and/or modify
> > - *  *   it under the terms of version 2 of the GNU General Public License as
> > - *  *   published by the Free Software Foundation.
> > - *  *
> > - *  *   This program is distributed in the hope that it will be useful, but
> > - *  *   WITHOUT ANY WARRANTY; without even the implied warranty of
> > - *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> GNU
> > - *  *   General Public License for more details.
> > - *  *
> > - *  *   You should have received a copy of the GNU General Public License
> > - *  *   along with this program; if not, write to the Free Software
> > - *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301
> USA.
> > - *  *   The full GNU General Public License is included in this distribution
> > - *  *   in the file called LICENSE.GPL.
> > - *  *
> > - *  *   Contact Information:
> > - *  *   Intel Corporation
> > - *   */
> > + * GPL LICENSE SUMMARY
> > + *
> > + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> > + *
> > + *   This program is free software; you can redistribute it and/or modify
> > + *   it under the terms of version 2 of the GNU General Public License as
> > + *   published by the Free Software Foundation.
> > + *
> > + *   This program is distributed in the hope that it will be useful, but
> > + *   WITHOUT ANY WARRANTY; without even the implied warranty of
> > + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> GNU
> > + *   General Public License for more details.
> > + *
> > + *   You should have received a copy of the GNU General Public License
> > + *   along with this program; if not, write to the Free Software
> > + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> > + *   The full GNU General Public License is included in this distribution
> > + *   in the file called LICENSE.GPL.
> > + *
> > + *   Contact Information:
> > + *   Intel Corporation
> > + */
> >
> >  #include <linux/eventfd.h>
> >  #include <linux/miscdevice.h>
> > @@ -42,15 +42,15 @@
> >   * get_files_struct is copied from fs/file.c
> >   */
> >  struct files_struct *
> > -get_files_struct (struct task_struct *task)
> > +get_files_struct(struct task_struct *task)
> >  {
> >  	struct files_struct *files;
> >
> > -	task_lock (task);
> > +	task_lock(task);
> >  	files = task->files;
> >  	if (files)
> > -		atomic_inc (&files->count);
> > -	task_unlock (task);
> > +		atomic_inc(&files->count);
> > +	task_unlock(task);
> >
> >  	return files;
> >  }
> > @@ -59,17 +59,15 @@ get_files_struct (struct task_struct *task)
> >   * put_files_struct is extracted from fs/file.c
> >   */
> >  void
> > -put_files_struct (struct files_struct *files)
> > +put_files_struct(struct files_struct *files)
> >  {
> > -	if (atomic_dec_and_test (&files->count))
> > -	{
> > -		BUG ();
> > -	}
> > +	if (atomic_dec_and_test(&files->count))
> > +		BUG();
> >  }
> >
> >
> >  static long
> > -eventfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
> > +eventfd_link_ioctl(struct file *f, unsigned int ioctl, unsigned long arg)
> >  {
> >  	void __user *argp = (void __user *) arg;
> >  	struct task_struct *task_target = NULL;
> > @@ -78,96 +76,88 @@ eventfd_link_ioctl (struct file *f, unsigned int ioctl,
> unsigned long arg)
> >  	struct fdtable *fdt;
> >  	struct eventfd_copy eventfd_copy;
> >
> > -	switch (ioctl)
> > -	{
> > -		case EVENTFD_COPY:
> > -			if (copy_from_user (&eventfd_copy, argp, sizeof (struct
> eventfd_copy)))
> > -				return -EFAULT;
> > -
> > -			/*
> > -			 * Find the task struct for the target pid
> > -			 */
> > -			task_target =
> > -				pid_task (find_vpid (eventfd_copy.target_pid),
> PIDTYPE_PID);
> > -			if (task_target == NULL)
> > -			{
> > -				printk (KERN_DEBUG "Failed to get mem ctx for
> target pid\n");
> > -				return -EFAULT;
> > -			}
> > -
> > -			files = get_files_struct (current);
> > -			if (files == NULL)
> > -			{
> > -				printk (KERN_DEBUG "Failed to get files
> struct\n");
> > -				return -EFAULT;
> > -			}
> > -
> > -			rcu_read_lock ();
> > -			file = fcheck_files (files, eventfd_copy.source_fd);
> > -			if (file)
> > -			{
> > -				if (file->f_mode & FMODE_PATH
> > -						|| !atomic_long_inc_not_zero
> (&file->f_count))
> > -					file = NULL;
> > -			}
> > -			rcu_read_unlock ();
> > -			put_files_struct (files);
> > -
> > -			if (file == NULL)
> > -			{
> > -				printk (KERN_DEBUG "Failed to get file from
> source pid\n");
> > -				return 0;
> > -			}
> > -
> > -			/*
> > -			 * Release the existing eventfd in the source process
> > -			 */
> > -			spin_lock (&files->file_lock);
> > -			filp_close (file, files);
> > -			fdt = files_fdtable (files);
> > -			fdt->fd[eventfd_copy.source_fd] = NULL;
> > -			spin_unlock (&files->file_lock);
> > -
> > -			/*
> > -			 * Find the file struct associated with the target fd.
> > -			 */
> > -
> > -			files = get_files_struct (task_target);
> > -			if (files == NULL)
> > -			{
> > -				printk (KERN_DEBUG "Failed to get files
> struct\n");
> > -				return -EFAULT;
> > -			}
> > -
> > -			rcu_read_lock ();
> > -			file = fcheck_files (files, eventfd_copy.target_fd);
> > -			if (file)
> > -			{
> > -				if (file->f_mode & FMODE_PATH
> > -						|| !atomic_long_inc_not_zero
> (&file->f_count))
> > +	switch (ioctl) {
> > +	case EVENTFD_COPY:
> > +		if (copy_from_user(&eventfd_copy, argp,
> > +			sizeof(struct eventfd_copy)))
> > +			return -EFAULT;
> > +
> > +		/*
> > +		 * Find the task struct for the target pid
> > +		 */
> > +		task_target =
> > +			pid_task(find_vpid(eventfd_copy.target_pid),
> PIDTYPE_PID);
> > +		if (task_target == NULL) {
> > +			printk(KERN_DEBUG "Failed to get mem ctx for target
> pid\n");
> > +			return -EFAULT;
> > +		}
> > +
> > +		files = get_files_struct(current);
> > +		if (files == NULL) {
> > +			printk(KERN_DEBUG "Failed to get files struct\n");
> > +			return -EFAULT;
> > +		}
> > +
> > +		rcu_read_lock();
> > +		file = fcheck_files(files, eventfd_copy.source_fd);
> > +		if (file) {
> > +			if (file->f_mode & FMODE_PATH ||
> > +				!atomic_long_inc_not_zero(&file->f_count))
> > +				file = NULL;
> > +		}
> > +		rcu_read_unlock();
> > +		put_files_struct(files);
> > +
> > +		if (file == NULL) {
> > +			printk(KERN_DEBUG "Failed to get file from source
> pid\n");
> > +			return 0;
> > +		}
> > +
> > +		/*
> > +		 * Release the existing eventfd in the source process
> > +		 */
> > +		spin_lock(&files->file_lock);
> > +		filp_close(file, files);
> > +		fdt = files_fdtable(files);
> > +		fdt->fd[eventfd_copy.source_fd] = NULL;
> > +		spin_unlock(&files->file_lock);
> > +
> > +		/*
> > +		 * Find the file struct associated with the target fd.
> > +		 */
> > +
> > +		files = get_files_struct(task_target);
> > +		if (files == NULL) {
> > +			printk(KERN_DEBUG "Failed to get files struct\n");
> > +			return -EFAULT;
> > +		}
> > +
> > +		rcu_read_lock();
> > +		file = fcheck_files(files, eventfd_copy.target_fd);
> > +		if (file) {
> > +			if (file->f_mode & FMODE_PATH ||
> > +				!atomic_long_inc_not_zero(&file->f_count))
> >  					file = NULL;
> > -			}
> > -			rcu_read_unlock ();
> > -			put_files_struct (files);
> > -
> > -			if (file == NULL)
> > -			{
> > -				printk (KERN_DEBUG "Failed to get file from
> target pid\n");
> > -				return 0;
> > -			}
> > +		}
> > +		rcu_read_unlock();
> > +		put_files_struct(files);
> >
> > +		if (file == NULL) {
> > +			printk(KERN_DEBUG "Failed to get file from target
> pid\n");
> > +			return 0;
> > +		}
> >
> > -			/*
> > -			 * Install the file struct from the target process into the
> > -			 * file desciptor of the source process,
> > -			 */
> > +		/*
> > +		 * Install the file struct from the target process into the
> > +		 * file desciptor of the source process,
> > +		 */
> >
> > -			fd_install (eventfd_copy.source_fd, file);
> > +		fd_install(eventfd_copy.source_fd, file);
> >
> > -			return 0;
> > +		return 0;
> >
> > -		default:
> > -			return -ENOIOCTLCMD;
> > +	default:
> > +		return -ENOIOCTLCMD;
> >  	}
> >  }
> >
> > @@ -183,23 +173,23 @@ static struct miscdevice eventfd_link_misc = {
> >  };
> >
> >  static int __init
> > -eventfd_link_init (void)
> > +eventfd_link_init(void)
> >  {
> > -	return misc_register (&eventfd_link_misc);
> > +	return misc_register(&eventfd_link_misc);
> >  }
> >
> > -module_init (eventfd_link_init);
> > +module_init(eventfd_link_init);
> >
> >  static void __exit
> > -eventfd_link_exit (void)
> > +eventfd_link_exit(void)
> >  {
> > -	misc_deregister (&eventfd_link_misc);
> > +	misc_deregister(&eventfd_link_misc);
> >  }
> >
> > -module_exit (eventfd_link_exit);
> > +module_exit(eventfd_link_exit);
> >
> > -MODULE_VERSION ("0.0.1");
> > -MODULE_LICENSE ("GPL v2");
> > -MODULE_AUTHOR ("Anthony Fee");
> > -MODULE_DESCRIPTION ("Link eventfd");
> > -MODULE_ALIAS ("devname:eventfd-link");
> > +MODULE_VERSION("0.0.1");
> > +MODULE_LICENSE("GPL v2");
> > +MODULE_AUTHOR("Anthony Fee");
> > +MODULE_DESCRIPTION("Link eventfd");
> > +MODULE_ALIAS("devname:eventfd-link");
> > diff --git a/lib/librte_vhost/eventfd_link/eventfd_link.h
> b/lib/librte_vhost/eventfd_link/eventfd_link.h
> > index a32a8dd..ea619ec 100644
> > --- a/lib/librte_vhost/eventfd_link/eventfd_link.h
> > +++ b/lib/librte_vhost/eventfd_link/eventfd_link.h
> > @@ -1,79 +1,76 @@
> >  /*-
> > - *  * This file is provided under a dual BSD/GPLv2 license.  When using or
> > - *  *   redistributing this file, you may do so under either license.
> > - *  *
> > - *  *   GPL LICENSE SUMMARY
> > - *  *
> > - *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> > - *  *
> > - *  *   This program is free software; you can redistribute it and/or modify
> > - *  *   it under the terms of version 2 of the GNU General Public License as
> > - *  *   published by the Free Software Foundation.
> > - *  *
> > - *  *   This program is distributed in the hope that it will be useful, but
> > - *  *   WITHOUT ANY WARRANTY; without even the implied warranty of
> > - *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> GNU
> > - *  *   General Public License for more details.
> > - *  *
> > - *  *   You should have received a copy of the GNU General Public License
> > - *  *   along with this program; if not, write to the Free Software
> > - *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301
> USA.
> > - *  *   The full GNU General Public License is included in this distribution
> > - *  *   in the file called LICENSE.GPL.
> > - *  *
> > - *  *   Contact Information:
> > - *  *   Intel Corporation
> > - *  *
> > - *  *   BSD LICENSE
> > - *  *
> > - *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> > - *  *   All rights reserved.
> > - *  *
> > - *  *   Redistribution and use in source and binary forms, with or without
> > - *  *   modification, are permitted provided that the following conditions
> > - *  *   are met:
> > - *  *
> > - *  *     * Redistributions of source code must retain the above copyright
> > - *  *       notice, this list of conditions and the following disclaimer.
> > - *  *     * Redistributions in binary form must reproduce the above copyright
> > - *  *       notice, this list of conditions and the following disclaimer in
> > - *  *       the documentation and/or other materials provided with the
> > - *  *       distribution.
> > - *  *     * Neither the name of Intel Corporation nor the names of its
> > - *  *       contributors may be used to endorse or promote products derived
> > - *  *       from this software without specific prior written permission.
> > - *  *
> > - *  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> > - *  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> > - *  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> > - *  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> > - *  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> > - *  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
> BUT NOT
> > - *  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
> LOSS OF USE,
> > - *  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> > - *  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> > - *  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
> OF THE USE
> > - *  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> > - *  *
> > - *   */
> > + *  This file is provided under a dual BSD/GPLv2 license.  When using or
> > + *  redistributing this file, you may do so under either license.
> > + *
> > + * GPL LICENSE SUMMARY
> > + *
> > + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> > + *
> > + *   This program is free software; you can redistribute it and/or modify
> > + *   it under the terms of version 2 of the GNU General Public License as
> > + *   published by the Free Software Foundation.
> > + *
> > + *   This program is distributed in the hope that it will be useful, but
> > + *   WITHOUT ANY WARRANTY; without even the implied warranty of
> > + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> GNU
> > + *   General Public License for more details.
> > + *
> > + *   You should have received a copy of the GNU General Public License
> > + *   along with this program; if not, write to the Free Software
> > + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> > + *   The full GNU General Public License is included in this distribution
> > + *   in the file called LICENSE.GPL.
> > + *
> > + *   Contact Information:
> > + *   Intel Corporation
> > + *
> > + * BSD LICENSE
> > + *
> > + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> > + *   All rights reserved.
> > + *
> > + *   Redistribution and use in source and binary forms, with or without
> > + *   modification, are permitted provided that the following conditions
> > + *   are met:
> > + *
> > + *   Redistributions of source code must retain the above copyright
> > + *   notice, this list of conditions and the following disclaimer.
> > + *   Redistributions in binary form must reproduce the above copyright
> > + *   notice, this list of conditions and the following disclaimer in
> > + *   the documentation and/or other materials provided with the
> > + *   distribution.
> > + *   Neither the name of Intel Corporation nor the names of its
> > + *   contributors may be used to endorse or promote products derived
> > + *   from this software without specific prior written permission.
> > + *
> > + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> > + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> > + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> > + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> > + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> > + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> > + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> > + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> > + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> > + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> > + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> > + *
> > + */
> >
> >  #ifndef _EVENTFD_LINK_H_
> >  #define _EVENTFD_LINK_H_
> >
> >  /*
> > - *	ioctl to copy an fd entry in calling process to an fd in a target process
> > + * ioctl to copy an fd entry in calling process to an fd in a target process
> >   */
> >  #define EVENTFD_COPY 1
> >
> >  /*
> > - *	arguements for the EVENTFD_COPY ioctl
> > + * arguements for the EVENTFD_COPY ioctl
> >   */
> >  struct eventfd_copy {
> > -	// fd in the target pid
> > -    unsigned target_fd;
> > -	// fd in the calling pid
> > -    unsigned source_fd;
> > -	// pid of the target pid
> > -    pid_t target_pid;
> > +	unsigned target_fd; /* fd in the target pid */
> > +	unsigned source_fd; /* fd in the calling pid */
> > +	pid_t target_pid; /* pid of the target pid */
> >  };
> >  #endif /* _EVENTFD_LINK_H_ */
> > diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
> > index b6548a1..00b1328 100644
> > --- a/lib/librte_vhost/rte_virtio_net.h
> > +++ b/lib/librte_vhost/rte_virtio_net.h
> > @@ -90,8 +90,7 @@ struct vhost_virtqueue {
> >  /**
> >   * Device structure contains all configuration information relating to the
> device.
> >   */
> > -struct virtio_net
> > -{
> > +struct virtio_net {
> >  	struct vhost_virtqueue	*virtqueue[VIRTIO_QNUM];	/**< Contains
> all virtqueue information. */
> >  	struct virtio_memory	*mem;		/**< QEMU memory and
> memory region information. */
> >  	uint64_t		features;	/**< Negotiated feature set.
> */
> > diff --git a/lib/librte_vhost/vhost-net-cdev.c b/lib/librte_vhost/vhost-net-
> cdev.c
> > index 91ff0d8..57c76cb 100644
> > --- a/lib/librte_vhost/vhost-net-cdev.c
> > +++ b/lib/librte_vhost/vhost-net-cdev.c
> > @@ -46,21 +46,21 @@
> >
> >  #include "vhost-net-cdev.h"
> >
> > -#define FUSE_OPT_DUMMY		"\0\0"
> > -#define FUSE_OPT_FORE		"-f\0\0"
> > -#define FUSE_OPT_NOMULTI	"-s\0\0"
> > +#define FUSE_OPT_DUMMY "\0\0"
> > +#define FUSE_OPT_FORE  "-f\0\0"
> > +#define FUSE_OPT_NOMULTI "-s\0\0"
> >
> > -static const uint32_t	default_major = 231;
> > -static const uint32_t	default_minor = 1;
> > -static const char	cuse_device_name[]	= "/dev/cuse";
> > -static const char	default_cdev[] = "vhost-net";
> > +static const uint32_t default_major = 231;
> > +static const uint32_t default_minor = 1;
> > +static const char cuse_device_name[] = "/dev/cuse";
> > +static const char default_cdev[] = "vhost-net";
> >
> > -static struct fuse_session			*session;
> > -static struct vhost_net_device_ops	const *ops;
> > +static struct fuse_session *session;
> > +static struct vhost_net_device_ops const *ops;
> >
> >  /*
> > - * Returns vhost_device_ctx from given fuse_req_t. The index is populated
> later when
> > - * the device is added to the device linked list.
> > + * Returns vhost_device_ctx from given fuse_req_t. The index is populated
> later
> > + * when the device is added to the device linked list.
> >   */
> >  static struct vhost_device_ctx
> >  fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
> > @@ -75,7 +75,8 @@ fuse_req_to_vhost_ctx(fuse_req_t req, struct
> fuse_file_info *fi)
> >  }
> >
> >  /*
> > - * When the device is created in QEMU it gets initialised here and added to the
> device linked list.
> > + * When the device is created in QEMU it gets initialised here and
> > + * added to the device linked list.
> >   */
> >  static void
> >  vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
> > @@ -91,7 +92,8 @@ vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
> >
> >  	fi->fh = err;
> >
> > -	RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device configuration
> started\n", fi->fh);
> > +	RTE_LOG(INFO, VHOST_CONFIG,
> > +		"(%"PRIu64") Device configuration started\n", fi->fh);
> >  	fuse_reply_open(req, fi);
> >  }
> >
> > @@ -113,8 +115,8 @@ vhost_net_release(fuse_req_t req, struct
> fuse_file_info *fi)
> >   * Boilerplate code for CUSE IOCTL
> >   * Implicit arguments: ctx, req, result.
> >   */
> > -#define VHOST_IOCTL(func) do {			\
> > -	result = (func)(ctx);			\
> > +#define VHOST_IOCTL(func) do {	\
> > +	result = (func)(ctx);	\
> >  	fuse_reply_ioctl(req, result, NULL, 0);	\
> >  } while (0)
> >
> > @@ -122,57 +124,58 @@ vhost_net_release(fuse_req_t req, struct
> fuse_file_info *fi)
> >   * Boilerplate IOCTL RETRY
> >   * Implicit arguments: req.
> >   */
> > -#define VHOST_IOCTL_RETRY(size_r, size_w) do {		\
> > -	struct iovec iov_r = { arg, (size_r) };		\
> > -	struct iovec iov_w = { arg, (size_w) };		\
> > -	fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0);
> 	\
> > +#define VHOST_IOCTL_RETRY(size_r, size_w) do {	\
> > +	struct iovec iov_r = { arg, (size_r) };	\
> > +	struct iovec iov_w = { arg, (size_w) };	\
> > +	fuse_reply_ioctl_retry(req, &iov_r,	\
> > +		(size_r) ? 1 : 0, &iov_w, (size_w) ? 1 : 0);\
> >  } while (0)
> >
> >  /*
> >   * Boilerplate code for CUSE Read IOCTL
> >   * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
> >   */
> > -#define VHOST_IOCTL_R(type, var, func) do {		\
> > -	if (!in_bufsz) {				\
> > -		VHOST_IOCTL_RETRY(sizeof(type), 0);	\
> > -	} else {					\
> > -		(var) = *(const type*) in_buf;		\
> > -		result = func(ctx, &(var));		\
> > -		fuse_reply_ioctl(req, result, NULL, 0);	\
> > -	}						\
> > +#define VHOST_IOCTL_R(type, var, func) do {	\
> > +	if (!in_bufsz) {	\
> > +		VHOST_IOCTL_RETRY(sizeof(type), 0);\
> > +	} else {	\
> > +		(var) = *(const type*)in_buf;	\
> > +		result = func(ctx, &(var));	\
> > +		fuse_reply_ioctl(req, result, NULL, 0);\
> > +	}	\
> >  } while (0)
> >
> >  /*
> > - *	Boilerplate code for CUSE Write IOCTL
> > + * Boilerplate code for CUSE Write IOCTL
> >   * Implicit arguments: ctx, req, result, out_bufsz.
> >   */
> > -#define	VHOST_IOCTL_W(type, var, func) do {		\
> > -	if (!out_bufsz) {				\
> > -		VHOST_IOCTL_RETRY(0, sizeof(type));	\
> > -	} else {					\
> > -		result = (func)(ctx, &(var));		\
> > -		fuse_reply_ioctl(req, result, &(var), sizeof(type));	\
> > -	}								\
> > +#define VHOST_IOCTL_W(type, var, func) do {	\
> > +	if (!out_bufsz) {	\
> > +		VHOST_IOCTL_RETRY(0, sizeof(type));\
> > +	} else {	\
> > +		result = (func)(ctx, &(var));\
> > +		fuse_reply_ioctl(req, result, &(var), sizeof(type));\
> > +	} \
> >  } while (0)
> >
> >  /*
> >   * Boilerplate code for CUSE Read/Write IOCTL
> >   * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
> >   */
> > -#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {		\
> > -	if (!in_bufsz) {						\
> > -		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));	\
> > -	} else {							\
> > -		(var1) = *(const type1*) (in_buf);			\
> > -		result = (func)(ctx, (var1), &(var2));			\
> > -		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));	\
> > -	}								\
> > +#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {	\
> > +	if (!in_bufsz) {	\
> > +		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));\
> > +	} else {	\
> > +		(var1) = *(const type1*) (in_buf);	\
> > +		result = (func)(ctx, (var1), &(var2));	\
> > +		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));\
> > +	}	\
> >  } while (0)
> >
> >  /*
> > - * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on
> > - * the type of IOCTL a buffer is requested to read or to write. This
> > - * request is handled by FUSE and the buffer is then given to CUSE.
> > + * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on the
> type
> > + * of IOCTL a buffer is requested to read or to write. This request is handled
> > + * by FUSE and the buffer is then given to CUSE.
> >   */
> >  static void
> >  vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
> > @@ -189,33 +192,39 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
> >
> >  	switch (cmd) {
> >  	case VHOST_NET_SET_BACKEND:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_NET_SET_BACKEND\n", ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n",
> ctx.fh);
> >  		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
> >  		break;
> >
> >  	case VHOST_GET_FEATURES:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_GET_FEATURES\n", ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
> >  		VHOST_IOCTL_W(uint64_t, features, ops->get_features);
> >  		break;
> >
> >  	case VHOST_SET_FEATURES:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_FEATURES\n", ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
> >  		VHOST_IOCTL_R(uint64_t, features, ops->set_features);
> >  		break;
> >
> >  	case VHOST_RESET_OWNER:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_RESET_OWNER\n", ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
> >  		VHOST_IOCTL(ops->reset_owner);
> >  		break;
> >
> >  	case VHOST_SET_OWNER:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_OWNER\n", ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
> >  		VHOST_IOCTL(ops->set_owner);
> >  		break;
> >
> >  	case VHOST_SET_MEM_TABLE:
> >  		/*TODO fix race condition.*/
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_MEM_TABLE\n", ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n",
> ctx.fh);
> >  		static struct vhost_memory mem_temp;
> >
> >  		switch (in_bufsz) {
> > @@ -227,7 +236,9 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
> >  			mem_temp = *(const struct vhost_memory *) in_buf;
> >
> >  			if (mem_temp.nregions > 0) {
> > -				VHOST_IOCTL_RETRY(sizeof(struct
> vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions),
> 0);
> > +				VHOST_IOCTL_RETRY(sizeof(struct
> vhost_memory) +
> > +					(sizeof(struct vhost_memory_region) *
> > +						mem_temp.nregions), 0);
> >  			} else {
> >  				result = -1;
> >  				fuse_reply_ioctl(req, result, NULL, 0);
> > @@ -235,56 +246,70 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
> >  			break;
> >
> >  		default:
> > -			result = ops->set_mem_table(ctx, in_buf,
> mem_temp.nregions);
> > +			result = ops->set_mem_table(ctx,
> > +					in_buf, mem_temp.nregions);
> >  			if (result)
> >  				fuse_reply_err(req, EINVAL);
> >  			else
> >  				fuse_reply_ioctl(req, result, NULL, 0);
> > -
> >  		}
> > -
> >  		break;
> >
> >  	case VHOST_SET_VRING_NUM:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_VRING_NUM\n", ctx.fh);
> > -		VHOST_IOCTL_R(struct vhost_vring_state, state, ops-
> >set_vring_num);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n",
> ctx.fh);
> > +		VHOST_IOCTL_R(struct vhost_vring_state, state,
> > +			ops->set_vring_num);
> >  		break;
> >
> >  	case VHOST_SET_VRING_BASE:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_VRING_BASE\n", ctx.fh);
> > -		VHOST_IOCTL_R(struct vhost_vring_state, state, ops-
> >set_vring_base);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n",
> ctx.fh);
> > +		VHOST_IOCTL_R(struct vhost_vring_state, state,
> > +			ops->set_vring_base);
> >  		break;
> >
> >  	case VHOST_GET_VRING_BASE:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_GET_VRING_BASE\n", ctx.fh);
> > -		VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state,
> state, ops->get_vring_base);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n",
> ctx.fh);
> > +		VHOST_IOCTL_RW(uint32_t, index,
> > +			struct vhost_vring_state, state, ops->get_vring_base);
> >  		break;
> >
> >  	case VHOST_SET_VRING_ADDR:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_VRING_ADDR\n", ctx.fh);
> > -		VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops-
> >set_vring_addr);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n",
> ctx.fh);
> > +		VHOST_IOCTL_R(struct vhost_vring_addr, addr,
> > +			ops->set_vring_addr);
> >  		break;
> >
> >  	case VHOST_SET_VRING_KICK:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_VRING_KICK\n", ctx.fh);
> > -		VHOST_IOCTL_R(struct vhost_vring_file, file, ops-
> >set_vring_kick);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n",
> ctx.fh);
> > +		VHOST_IOCTL_R(struct vhost_vring_file, file,
> > +			ops->set_vring_kick);
> >  		break;
> >
> >  	case VHOST_SET_VRING_CALL:
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL:
> VHOST_SET_VRING_CALL\n", ctx.fh);
> > -		VHOST_IOCTL_R(struct vhost_vring_file, file, ops-
> >set_vring_call);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n",
> ctx.fh);
> > +		VHOST_IOCTL_R(struct vhost_vring_file, file,
> > +			ops->set_vring_call);
> >  		break;
> >
> >  	default:
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN
> NOT EXIST\n", ctx.fh);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
> >  		result = -1;
> >  		fuse_reply_ioctl(req, result, NULL, 0);
> >  	}
> >
> >  	if (result < 0)
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n",
> ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
> >  	else
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n",
> ctx.fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
> >  }
> >
> >  /*
> > @@ -297,8 +322,8 @@ static const struct cuse_lowlevel_ops vhost_net_ops =
> {
> >  };
> >
> >  /*
> > - * cuse_info is populated and used to register the cuse device.
> vhost_net_device_ops are
> > - * also passed when the device is registered in main.c.
> > + * cuse_info is populated and used to register the cuse device.
> > + * vhost_net_device_ops are also passed when the device is registered in app.
> >   */
> >  int
> >  rte_vhost_driver_register(const char *dev_name)
> > @@ -314,20 +339,23 @@ rte_vhost_driver_register(const char *dev_name)
> >  	char *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore,
> fuse_opt_nomulti};
> >
> >  	if (access(cuse_device_name, R_OK | W_OK) < 0) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "Character device %s can't be
> accessed, maybe not exist\n", cuse_device_name);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"char device %s can't be accessed, maybe not exist\n",
> > +			cuse_device_name);
> >  		return -1;
> >  	}
> >
> >  	/*
> > -	 * The device name is created. This is passed to QEMU so that it can
> register
> > -	 * the device with our application.
> > +	 * The device name is created. This is passed to QEMU so that it can
> > +	 * register the device with our application.
> >  	 */
> >  	snprintf(device_name, PATH_MAX, "DEVNAME=%s", dev_name);
> >  	snprintf(char_device_name, PATH_MAX, "/dev/%s", dev_name);
> >
> >  	/* Check if device already exists. */
> >  	if (access(char_device_name, F_OK) != -1) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "Character device %s already
> exists\n", char_device_name);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"char device %s already exists\n", char_device_name);
> >  		return -1;
> >  	}
> >
> > @@ -341,7 +369,7 @@ rte_vhost_driver_register(const char *dev_name)
> >  	ops = get_virtio_net_callbacks();
> >
> >  	session = cuse_lowlevel_setup(3, fuse_argv,
> > -				&cuse_info, &vhost_net_ops, 0, NULL);
> > +			&cuse_info, &vhost_net_ops, 0, NULL);
> >  	if (session == NULL)
> >  		return -1;
> >
> > @@ -349,7 +377,8 @@ rte_vhost_driver_register(const char *dev_name)
> >  }
> >
> >  /**
> > - * The CUSE session is launched allowing the application to receive open,
> release and ioctl calls.
> > + * The CUSE session is launched allowing the application to receive open,
> > + * release and ioctl calls.
> >   */
> >  int
> >  rte_vhost_driver_session_start(void)
> > diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
> > index 84ec0e8..ccfd82f 100644
> > --- a/lib/librte_vhost/vhost_rxtx.c
> > +++ b/lib/librte_vhost/vhost_rxtx.c
> > @@ -45,7 +45,7 @@
> >  /**
> >   * This function adds buffers to the virtio devices RX virtqueue. Buffers can
> >   * be received from the physical port or from another virtio device. A packet
> > - * count is returned to indicate the number of packets that were succesfully
> > + * count is returned to indicate the number of packets that are succesfully
> >   * added to the RX queue. This function works when mergeable is disabled.
> >   */
> >  static inline uint32_t __attribute__((always_inline))
> > @@ -76,7 +76,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
> >  	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
> >
> >  	/*
> > -	 * As many data cores may want access to available buffers,
> > +	 * As many data cores may want access to available buffers,
> >  	 * they need to be reserved.
> >  	 */
> >  	do {
> > @@ -143,7 +143,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
> >  		}
> >
> >  		/* Update used ring with desc information */
> > -		vq->used->ring[res_cur_idx & (vq->size - 1)].id =
> head[packet_success];
> > +		vq->used->ring[res_cur_idx & (vq->size - 1)].id =
> > +							head[packet_success];
> >  		vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
> >
> >  		/* Copy mbuf data to buffer */
> > @@ -389,10 +390,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev,
> uint16_t res_base_idx,
> >  }
> >
> >  /*
> > - * This function adds buffers to the virtio devices RX virtqueue. Buffers can
> > - * be received from the physical port or from another virtio device. A packet
> > - * count is returned to indicate the number of packets that were succesfully
> > - * added to the RX queue. This function works for mergeable RX.
> > + * This function works for mergeable RX.
> >   */
> >  static inline uint32_t __attribute__((always_inline))
> >  virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
> > @@ -729,5 +727,4 @@ rte_vhost_dequeue_burst(struct virtio_net *dev,
> uint16_t queue_id,
> >  	if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
> >  		eventfd_write((int)vq->kickfd, 1);
> >  	return entry_success;
> > -
> >  }
> > diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
> > index 8015dd8..c07a11e 100644
> > --- a/lib/librte_vhost/virtio-net.c
> > +++ b/lib/librte_vhost/virtio-net.c
> > @@ -52,27 +52,27 @@
> >  #include "vhost-net-cdev.h"
> >  #include "eventfd_link/eventfd_link.h"
> >
> > -/**
> > +/*
> >   * Device linked list structure for configuration.
> >   */
> >  struct virtio_net_config_ll {
> > -	struct virtio_net		dev;	/* Virtio device.*/
> > -	struct virtio_net_config_ll	*next;	/* Next entry on linked list.*/
> > +	struct virtio_net dev;			/* Virtio device.*/
> > +	struct virtio_net_config_ll *next;	/* Next dev on linked list.*/
> >  };
> >
> >  const char eventfd_cdev[] = "/dev/eventfd-link";
> >
> > -/* device ops to add/remove device to data core. */
> > +/* device ops to add/remove device to/from data core. */
> >  static struct virtio_net_device_ops const *notify_ops;
> > -/* Root address of the linked list in the configuration core. */
> > -static struct virtio_net_config_ll	*ll_root;
> > +/* root address of the linked list of managed virtio devices */
> > +static struct virtio_net_config_ll *ll_root;
> >
> > -/* Features supported by this application. RX merge buffers are enabled by
> default. */
> > +/* Features supported by this lib. */
> >  #define VHOST_SUPPORTED_FEATURES (1ULL <<
> VIRTIO_NET_F_MRG_RXBUF)
> >  static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
> >
> >  /* Line size for reading maps file. */
> > -const uint32_t BUFSIZE = PATH_MAX;
> > +static const uint32_t BUFSIZE = PATH_MAX;
> >
> >  /* Size of prot char array in procmap. */
> >  #define PROT_SZ 5
> > @@ -82,19 +82,19 @@ const uint32_t BUFSIZE = PATH_MAX;
> >
> >  /* Structure containing information gathered from maps file. */
> >  struct procmap {
> > -	uint64_t	va_start;	/* Start virtual address in file. */
> > -	uint64_t	len;		/* Size of file. */
> > -	uint64_t	pgoff;		/* Not used. */
> > -	uint32_t	maj;		/* Not used. */
> > -	uint32_t	min;		/* Not used. */
> > -	uint32_t	ino;		/* Not used. */
> > -	char		prot[PROT_SZ];	/* Not used. */
> > -	char		fname[PATH_MAX];/* File name. */
> > +	uint64_t va_start;	/* Start virtual address in file. */
> > +	uint64_t len;		/* Size of file. */
> > +	uint64_t pgoff;		/* Not used. */
> > +	uint32_t maj;		/* Not used. */
> > +	uint32_t min;		/* Not used. */
> > +	uint32_t ino;		/* Not used. */
> > +	char prot[PROT_SZ];	/* Not used. */
> > +	char fname[PATH_MAX];	/* File name. */
> >  };
> >
> >  /*
> > - * Converts QEMU virtual address to Vhost virtual address. This function is
> used
> > - * to convert the ring addresses to our address space.
> > + * Converts QEMU virtual address to Vhost virtual address. This function is
> > + * used to convert the ring addresses to our address space.
> >   */
> >  static uint64_t
> >  qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
> > @@ -107,8 +107,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
> >  	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
> >  		region = &dev->mem->regions[regionidx];
> >  		if ((qemu_va >= region->userspace_address) &&
> > -				(qemu_va <= region->userspace_address +
> > -				region->memory_size)) {
> > +			(qemu_va <= region->userspace_address +
> > +			region->memory_size)) {
> >  			vhost_va = dev->mem->mapped_address + qemu_va -
> >  					dev->mem->base_address;
> >  			break;
> > @@ -118,7 +118,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
> >  }
> >
> >  /*
> > - * Locate the file containing QEMU's memory space and map it to our address
> space.
> > + * Locate the file containing QEMU's memory space and
> > + * map it to our address space.
> >   */
> >  static int
> >  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
> > @@ -134,10 +135,10 @@ host_memory_map(struct virtio_net *dev, struct
> virtio_memory *mem,
> >  	char procdir[PATH_MAX];
> >  	char resolved_path[PATH_MAX];
> >  	char *path = NULL;
> > -	FILE		*fmap;
> > -	void		*map;
> > -	uint8_t		found = 0;
> > -	char		line[BUFSIZE];
> > +	FILE *fmap;
> > +	void *map;
> > +	uint8_t found = 0;
> > +	char line[BUFSIZE];
> >  	char dlm[] = "-   :   ";
> >  	char *str, *sp, *in[PROCMAP_SZ];
> >  	char *end = NULL;
> > @@ -159,7 +160,7 @@ host_memory_map(struct virtio_net *dev, struct
> virtio_memory *mem,
> >  	while (fgets(line, BUFSIZE, fmap) != 0) {
> >  		str = line;
> >  		errno = 0;
> > -		/* Split line in to fields. */
> > +		/* Split line into fields. */
> >  		for (i = 0; i < PROCMAP_SZ; i++) {
> >  			in[i] = strtok_r(str, &dlm[i], &sp);
> >  			if ((in[i] == NULL) || (errno != 0)) {
> > @@ -171,37 +172,43 @@ host_memory_map(struct virtio_net *dev, struct
> virtio_memory *mem,
> >
> >  		/* Convert/Copy each field as needed. */
> >  		procmap.va_start = strtoull(in[0], &end, 16);
> > -		if ((in[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
> {
> > +		if ((in[0] == '\0') || (end == NULL) || (*end != '\0') ||
> > +			(errno != 0)) {
> >  			fclose(fmap);
> >  			return -1;
> >  		}
> >
> >  		procmap.len = strtoull(in[1], &end, 16);
> > -		if ((in[1] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
> {
> > +		if ((in[1] == '\0') || (end == NULL) || (*end != '\0') ||
> > +			(errno != 0)) {
> >  			fclose(fmap);
> >  			return -1;
> >  		}
> >
> >  		procmap.pgoff = strtoull(in[3], &end, 16);
> > -		if ((in[3] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
> {
> > +		if ((in[3] == '\0') || (end == NULL) || (*end != '\0') ||
> > +			(errno != 0)) {
> >  			fclose(fmap);
> >  			return -1;
> >  		}
> >
> >  		procmap.maj = strtoul(in[4], &end, 16);
> > -		if ((in[4] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
> {
> > +		if ((in[4] == '\0') || (end == NULL) || (*end != '\0') ||
> > +			(errno != 0)) {
> >  			fclose(fmap);
> >  			return -1;
> >  		}
> >
> >  		procmap.min = strtoul(in[5], &end, 16);
> > -		if ((in[5] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
> {
> > +		if ((in[5] == '\0') || (end == NULL) || (*end != '\0') ||
> > +			(errno != 0)) {
> >  			fclose(fmap);
> >  			return -1;
> >  		}
> >
> >  		procmap.ino = strtoul(in[6], &end, 16);
> > -		if ((in[6] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
> {
> > +		if ((in[6] == '\0') || (end == NULL) || (*end != '\0') ||
> > +			(errno != 0)) {
> >  			fclose(fmap);
> >  			return -1;
> >  		}
> > @@ -218,16 +225,19 @@ host_memory_map(struct virtio_net *dev, struct
> virtio_memory *mem,
> >  	fclose(fmap);
> >
> >  	if (!found) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find
> memory file in pid %d maps file\n", dev->device_fh, pid);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Failed to find memory file in pid %d maps
> file\n",
> > +			dev->device_fh, pid);
> >  		return -1;
> >  	}
> >
> >  	/* Find the guest memory file among the process fds. */
> >  	dp = opendir(procdir);
> >  	if (dp == NULL) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open
> pid %d process directory\n", dev->device_fh, pid);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Cannot open pid %d process directory\n",
> > +			dev->device_fh, pid);
> >  		return -1;
> > -
> >  	}
> >
> >  	found = 0;
> > @@ -254,23 +264,29 @@ host_memory_map(struct virtio_net *dev, struct
> virtio_memory *mem,
> >  	closedir(dp);
> >
> >  	if (found == 0) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find
> memory file for pid %d\n", dev->device_fh, pid);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Failed to find memory file for pid %d\n",
> > +			dev->device_fh, pid);
> >  		return -1;
> >  	}
> >  	/* Open the shared memory file and map the memory into this process.
> */
> >  	fd = open(memfile, O_RDWR);
> >
> >  	if (fd == -1) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open %s
> for pid %d\n", dev->device_fh, memfile, pid);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Failed to open %s for pid %d\n",
> > +			dev->device_fh, memfile, pid);
> >  		return -1;
> >  	}
> >
> > -	map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE ,
> > -			MAP_POPULATE|MAP_SHARED, fd, 0);
> > +	map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE,
> > +		MAP_POPULATE|MAP_SHARED, fd, 0);
> >  	close(fd);
> >
> >  	if (map == MAP_FAILED) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the
> file %s for pid %d\n",  dev->device_fh, memfile, pid);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Error mapping the file %s for pid %d\n",
> > +			dev->device_fh, memfile, pid);
> >  		return -1;
> >  	}
> >
> > @@ -278,8 +294,11 @@ host_memory_map(struct virtio_net *dev, struct
> virtio_memory *mem,
> >  	mem->mapped_address = (uint64_t)(uintptr_t)map;
> >  	mem->mapped_size = procmap.len;
> >
> > -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mem File: %s->%s -
> Size: %llu - VA: %p\n", dev->device_fh,
> > -		memfile, resolved_path, (long long unsigned)mem-
> >mapped_size, map);
> > +	LOG_DEBUG(VHOST_CONFIG,
> > +		"(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n",
> > +		dev->device_fh,
> > +		memfile, resolved_path,
> > +		(long long unsigned)mem->mapped_size, map);
> >
> >  	return 0;
> >  }
> > @@ -303,7 +322,8 @@ get_config_ll_entry(struct vhost_device_ctx ctx)
> >  }
> >
> >  /*
> > - * Searches the configuration core linked list and retrieves the device if it
> exists.
> > + * Searches the configuration core linked list and
> > + * retrieves the device if it exists.
> >   */
> >  static struct virtio_net *
> >  get_device(struct vhost_device_ctx ctx)
> > @@ -312,11 +332,11 @@ get_device(struct vhost_device_ctx ctx)
> >
> >  	ll_dev = get_config_ll_entry(ctx);
> >
> > -	/* If a matching entry is found in the linked list, return the device in that
> entry. */
> >  	if (ll_dev)
> >  		return &ll_dev->dev;
> >
> > -	RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked
> list.\n", ctx.fh);
> > +	RTE_LOG(ERR, VHOST_CONFIG,
> > +		"(%"PRIu64") Device not found in linked list.\n", ctx.fh);
> >  	return NULL;
> >  }
> >
> > @@ -331,13 +351,18 @@ add_config_ll_entry(struct virtio_net_config_ll
> *new_ll_dev)
> >  	/* If ll_dev == NULL then this is the first device so go to else */
> >  	if (ll_dev) {
> >  		/* If the 1st device_fh != 0 then we insert our device here. */
> > -		if (ll_dev->dev.device_fh != 0)	{
> > +		if (ll_dev->dev.device_fh != 0) {
> >  			new_ll_dev->dev.device_fh = 0;
> >  			new_ll_dev->next = ll_dev;
> >  			ll_root = new_ll_dev;
> >  		} else {
> > -			/* Increment through the ll until we find un unused
> device_fh. Insert the device at that entry*/
> > -			while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh
> == (ll_dev->next->dev.device_fh - 1)))
> > +			/*
> > +			 * Increment through the ll until we find un unused
> > +			 * device_fh. Insert the device at that entry.
> > +			 */
> > +			while ((ll_dev->next != NULL) &&
> > +				(ll_dev->dev.device_fh ==
> > +					(ll_dev->next->dev.device_fh - 1)))
> >  				ll_dev = ll_dev->next;
> >
> >  			new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
> > @@ -352,7 +377,8 @@ add_config_ll_entry(struct virtio_net_config_ll
> *new_ll_dev)
> >  }
> >
> >  /*
> > - * Unmap any memory, close any file descriptors and free any memory owned
> by a device.
> > + * Unmap any memory, close any file descriptors and
> > + * free any memory owned by a device.
> >   */
> >  static void
> >  cleanup_device(struct virtio_net *dev)
> > @@ -386,6 +412,7 @@ free_device(struct virtio_net_config_ll *ll_dev)
> >  	free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
> >  	free(ll_dev);
> >  }
> > +
> >  /*
> >   * Remove an entry from the device configuration linked list.
> >   */
> > @@ -423,7 +450,10 @@ init_device(struct virtio_net *dev)
> >  {
> >  	uint64_t vq_offset;
> >
> > -	/* Virtqueues have already been malloced so we don't want to set them
> to NULL. */
> > +	/*
> > +	 * Virtqueues have already been malloced so
> > +	 * we don't want to set them to NULL.
> > +	 */
> >  	vq_offset = offsetof(struct virtio_net, mem);
> >
> >  	/* Set everything to 0. */
> > @@ -491,8 +521,8 @@ new_device(struct vhost_device_ctx ctx)
> >  }
> >
> >  /*
> > - * Function is called from the CUSE release function. This function will cleanup
> > - * the device and remove it from device configuration linked list.
> > + * Function is called from the CUSE release function. This function will
> > + * cleanup the device and remove it from device configuration linked list.
> >   */
> >  static void
> >  destroy_device(struct vhost_device_ctx ctx)
> > @@ -503,15 +533,19 @@ destroy_device(struct vhost_device_ctx ctx)
> >  	/* Find the linked list entry for the device to be removed. */
> >  	ll_dev_cur_ctx = get_config_ll_entry(ctx);
> >  	while (ll_dev_cur != NULL) {
> > -		/* If the device is found or a device that doesn't exist is found
> then it is removed. */
> > +		/*
> > +		 * If the device is found or
> > +		 * a device that doesn't exist is found then it is removed.
> > +		 */
> >  		if (ll_dev_cur == ll_dev_cur_ctx) {
> >  			/*
> > -			 * If the device is running on a data core then call the
> function to remove it from
> > -			 * the data core.
> > +			 * If the device is running on a data core then call
> > +			 * the function to remove it from the data core.
> >  			 */
> >  			if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
> >  				notify_ops->destroy_device(&(ll_dev_cur-
> >dev));
> > -			ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
> > +			ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
> > +					ll_dev_last);
> >  		} else {
> >  			ll_dev_last = ll_dev_cur;
> >  			ll_dev_cur = ll_dev_cur->next;
> > @@ -521,7 +555,8 @@ destroy_device(struct vhost_device_ctx ctx)
> >
> >  /*
> >   * Called from CUSE IOCTL: VHOST_SET_OWNER
> > - * This function just returns success at the moment unless the device hasn't
> been initialised.
> > + * This function just returns success at the moment unless
> > + * the device hasn't been initialised.
> >   */
> >  static int
> >  set_owner(struct vhost_device_ctx ctx)
> > @@ -571,7 +606,7 @@ get_features(struct vhost_device_ctx ctx, uint64_t *pu)
> >
> >  /*
> >   * Called from CUSE IOCTL: VHOST_SET_FEATURES
> > - * We receive the negotiated set of features supported by us and the virtio
> device.
> > + * We receive the negotiated features supported by us and the virtio device.
> >   */
> >  static int
> >  set_features(struct vhost_device_ctx ctx, uint64_t *pu)
> > @@ -589,13 +624,17 @@ set_features(struct vhost_device_ctx ctx, uint64_t
> *pu)
> >
> >  	/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set.
> */
> >  	if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX
> buffers enabled\n", dev->device_fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") Mergeable RX buffers enabled\n",
> > +			dev->device_fh);
> >  		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
> >  			sizeof(struct virtio_net_hdr_mrg_rxbuf);
> >  		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
> >  			sizeof(struct virtio_net_hdr_mrg_rxbuf);
> >  	} else {
> > -		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX
> buffers disabled\n", dev->device_fh);
> > +		LOG_DEBUG(VHOST_CONFIG,
> > +			"(%"PRIu64") Mergeable RX buffers disabled\n",
> > +			dev->device_fh);
> >  		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
> >  			sizeof(struct virtio_net_hdr);
> >  		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
> > @@ -607,8 +646,8 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
> >
> >  /*
> >   * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
> > - * This function creates and populates the memory structure for the device.
> This includes
> > - * storing offsets used to translate buffer addresses.
> > + * This function creates and populates the memory structure for the device.
> > + * This includes storing offsets used to translate buffer addresses.
> >   */
> >  static int
> >  set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
> > @@ -634,7 +673,9 @@ set_mem_table(struct vhost_device_ctx ctx, const
> void *mem_regions_addr,
> >  	mem = calloc(1, sizeof(struct virtio_memory) +
> >  		(sizeof(struct virtio_memory_regions) * nregions));
> >  	if (mem == NULL) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate
> memory for dev->mem.\n", dev->device_fh);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Failed to allocate memory for dev-
> >mem.\n",
> > +			dev->device_fh);
> >  		return -1;
> >  	}
> >
> > @@ -656,15 +697,18 @@ set_mem_table(struct vhost_device_ctx ctx, const
> void *mem_regions_addr,
> >  			mem_regions[regionidx].userspace_addr;
> >
> >  		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u -
> GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
> > -				regionidx, (void *)(uintptr_t)mem-
> >regions[regionidx].guest_phys_address,
> > -				(void *)(uintptr_t)mem-
> >regions[regionidx].userspace_address,
> > -				mem->regions[regionidx].memory_size);
> > +			regionidx,
> > +			(void *)(uintptr_t)mem-
> >regions[regionidx].guest_phys_address,
> > +			(void *)(uintptr_t)mem-
> >regions[regionidx].userspace_address,
> > +			mem->regions[regionidx].memory_size);
> >
> >  		/*set the base address mapping*/
> >  		if (mem->regions[regionidx].guest_phys_address == 0x0) {
> > -			mem->base_address = mem-
> >regions[regionidx].userspace_address;
> > +			mem->base_address =
> > +				mem->regions[regionidx].userspace_address;
> >  			/* Map VM memory file */
> > -			if (host_memory_map(dev, mem, ctx.pid, mem-
> >base_address) != 0) {
> > +			if (host_memory_map(dev, mem, ctx.pid,
> > +				mem->base_address) != 0) {
> >  				free(mem);
> >  				return -1;
> >  			}
> > @@ -678,27 +722,42 @@ set_mem_table(struct vhost_device_ctx ctx, const
> void *mem_regions_addr,
> >  		return -1;
> >  	}
> >
> > -	/* Check if all of our regions have valid mappings. Usually one does not
> exist in the QEMU memory file. */
> > +	/*
> > +	 * Check if all of our regions have valid mappings.
> > +	 * Usually one does not exist in the QEMU memory file.
> > +	 */
> >  	valid_regions = mem->nregions;
> >  	for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
> > -		if ((mem->regions[regionidx].userspace_address < mem-
> >base_address) ||
> > -			(mem->regions[regionidx].userspace_address > (mem-
> >base_address + mem->mapped_size)))
> > +		if ((mem->regions[regionidx].userspace_address <
> > +			mem->base_address) ||
> > +			(mem->regions[regionidx].userspace_address >
> > +			(mem->base_address + mem->mapped_size)))
> >  				valid_regions--;
> >  	}
> >
> > -	/* If a region does not have a valid mapping we rebuild our memory
> struct to contain only valid entries. */
> > +	/*
> > +	 * If a region does not have a valid mapping,
> > +	 * we rebuild our memory struct to contain only valid entries.
> > +	 */
> >  	if (valid_regions != mem->nregions) {
> >  		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory
> regions exist in the QEMU mem file. Re-populating mem structure\n",
> >  			dev->device_fh);
> >
> > -		/* Re-populate the memory structure with only valid regions.
> Invalid regions are over-written with memmove. */
> > +		/*
> > +		 * Re-populate the memory structure with only valid regions.
> > +		 * Invalid regions are over-written with memmove.
> > +		 */
> >  		valid_regions = 0;
> >
> >  		for (regionidx = mem->nregions; 0 != regionidx--;) {
> > -			if ((mem->regions[regionidx].userspace_address <
> mem->base_address) ||
> > -					(mem-
> >regions[regionidx].userspace_address > (mem->base_address + mem-
> >mapped_size))) {
> > -				memmove(&mem->regions[regionidx], &mem-
> >regions[regionidx + 1],
> > -					sizeof(struct virtio_memory_regions) *
> valid_regions);
> > +			if ((mem->regions[regionidx].userspace_address <
> > +				mem->base_address) ||
> > +				(mem->regions[regionidx].userspace_address >
> > +				(mem->base_address + mem->mapped_size))) {
> > +				memmove(&mem->regions[regionidx],
> > +					&mem->regions[regionidx + 1],
> > +					sizeof(struct virtio_memory_regions) *
> > +						valid_regions);
> >  			} else {
> >  				valid_regions++;
> >  			}
> > @@ -708,12 +767,16 @@ set_mem_table(struct vhost_device_ctx ctx, const
> void *mem_regions_addr,
> >  	dev->mem = mem;
> >
> >  	/*
> > -	 * Calculate the address offset for each region. This offset is used to
> identify the vhost virtual address
> > +	 * Calculate the address offset for each region.
> > +	 * This offset is used to identify the vhost virtual address
> >  	 * corresponding to a QEMU guest physical address.
> >  	 */
> >  	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
> > -		dev->mem->regions[regionidx].address_offset = dev->mem-
> >regions[regionidx].userspace_address - dev->mem->base_address
> > -			+ dev->mem->mapped_address - dev->mem-
> >regions[regionidx].guest_phys_address;
> > +		dev->mem->regions[regionidx].address_offset =
> > +			dev->mem->regions[regionidx].userspace_address -
> > +				dev->mem->base_address +
> > +				dev->mem->mapped_address -
> > +				dev->mem-
> >regions[regionidx].guest_phys_address;
> >
> >  	}
> >  	return 0;
> > @@ -732,7 +795,7 @@ set_vring_num(struct vhost_device_ctx ctx, struct
> vhost_vring_state *state)
> >  	if (dev == NULL)
> >  		return -1;
> >
> > -	/* State->index refers to the queue index. The TX queue is 1, RX queue is
> 0. */
> > +	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
> >  	dev->virtqueue[state->index]->size = state->num;
> >
> >  	return 0;
> > @@ -740,8 +803,8 @@ set_vring_num(struct vhost_device_ctx ctx, struct
> vhost_vring_state *state)
> >
> >  /*
> >   * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
> > - * The virtio device sends us the desc, used and avail ring addresses. This
> function
> > - * then converts these to our address space.
> > + * The virtio device sends us the desc, used and avail ring addresses.
> > + * This function then converts these to our address space.
> >   */
> >  static int
> >  set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
> > @@ -753,31 +816,43 @@ set_vring_addr(struct vhost_device_ctx ctx, struct
> vhost_vring_addr *addr)
> >  	if (dev == NULL)
> >  		return -1;
> >
> > -	/* addr->index refers to the queue index. The TX queue is 1, RX queue is
> 0. */
> > +	/* addr->index refers to the queue index. The txq 1, rxq is 0. */
> >  	vq = dev->virtqueue[addr->index];
> >
> >  	/* The addresses are converted from QEMU virtual to Vhost virtual. */
> > -	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr-
> >desc_user_addr);
> > +	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
> > +			addr->desc_user_addr);
> >  	if (vq->desc == 0) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find
> descriptor ring address.\n", dev->device_fh);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Failed to find desc ring address.\n",
> > +			dev->device_fh);
> >  		return -1;
> >  	}
> >
> > -	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr-
> >avail_user_addr);
> > +	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
> > +			addr->avail_user_addr);
> >  	if (vq->avail == 0) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find
> available ring address.\n", dev->device_fh);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Failed to find avail ring address.\n",
> > +			dev->device_fh);
> >  		return -1;
> >  	}
> >
> > -	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr-
> >used_user_addr);
> > +	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
> > +			addr->used_user_addr);
> >  	if (vq->used == 0) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used
> ring address.\n", dev->device_fh);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") Failed to find used ring address.\n",
> > +			dev->device_fh);
> >  		return -1;
> >  	}
> >
> > -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address
> desc: %p\n", dev->device_fh, vq->desc);
> > -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address
> avail: %p\n", dev->device_fh, vq->avail);
> > -	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address
> used: %p\n", dev->device_fh, vq->used);
> > +	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address
> desc: %p\n",
> > +			dev->device_fh, vq->desc);
> > +	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address
> avail: %p\n",
> > +			dev->device_fh, vq->avail);
> > +	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address
> used: %p\n",
> > +			dev->device_fh, vq->used);
> >
> >  	return 0;
> >  }
> > @@ -795,7 +870,7 @@ set_vring_base(struct vhost_device_ctx ctx, struct
> vhost_vring_state *state)
> >  	if (dev == NULL)
> >  		return -1;
> >
> > -	/* State->index refers to the queue index. The TX queue is 1, RX queue is
> 0. */
> > +	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
> >  	dev->virtqueue[state->index]->last_used_idx = state->num;
> >  	dev->virtqueue[state->index]->last_used_idx_res = state->num;
> >
> > @@ -817,15 +892,15 @@ get_vring_base(struct vhost_device_ctx ctx,
> uint32_t index,
> >  		return -1;
> >
> >  	state->index = index;
> > -	/* State->index refers to the queue index. The TX queue is 1, RX queue is
> 0. */
> > +	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
> >  	state->num = dev->virtqueue[state->index]->last_used_idx;
> >
> >  	return 0;
> >  }
> >
> >  /*
> > - * This function uses the eventfd_link kernel module to copy an eventfd file
> descriptor
> > - * provided by QEMU in to our process space.
> > + * This function uses the eventfd_link kernel module to copy an eventfd file
> > + * descriptor provided by QEMU in to our process space.
> >   */
> >  static int
> >  eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
> > @@ -835,7 +910,9 @@ eventfd_copy(struct virtio_net *dev, struct
> eventfd_copy *eventfd_copy)
> >  	/* Open the character device to the kernel module. */
> >  	eventfd_link = open(eventfd_cdev, O_RDWR);
> >  	if (eventfd_link < 0) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") eventfd_link
> module is not loaded\n",  dev->device_fh);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") eventfd_link module is not loaded\n",
> > +			dev->device_fh);
> >  		return -1;
> >  	}
> >
> > @@ -844,18 +921,19 @@ eventfd_copy(struct virtio_net *dev, struct
> eventfd_copy *eventfd_copy)
> >  	close(eventfd_link);
> >
> >  	if (ret < 0) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") EVENTFD_COPY
> ioctl failed\n",  dev->device_fh);
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"(%"PRIu64") EVENTFD_COPY ioctl failed\n",
> > +			dev->device_fh);
> >  		return -1;
> >  	}
> >
> > -
> >  	return 0;
> >  }
> >
> >  /*
> >   * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
> > - * The virtio device sends an eventfd to interrupt the guest. This fd gets copied
> in
> > - * to our process space.
> > + * The virtio device sends an eventfd to interrupt the guest. This fd gets
> > + * copied into our process space.
> >   */
> >  static int
> >  set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
> > @@ -868,7 +946,7 @@ set_vring_call(struct vhost_device_ctx ctx, struct
> vhost_vring_file *file)
> >  	if (dev == NULL)
> >  		return -1;
> >
> > -	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0.
> */
> > +	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
> >  	vq = dev->virtqueue[file->index];
> >
> >  	if (vq->kickfd)
> > @@ -888,8 +966,8 @@ set_vring_call(struct vhost_device_ctx ctx, struct
> vhost_vring_file *file)
> >
> >  /*
> >   * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
> > - * The virtio device sends an eventfd that it can use to notify us. This fd gets
> copied in
> > - * to our process space.
> > + * The virtio device sends an eventfd that it can use to notify us.
> > + * This fd gets copied into our process space.
> >   */
> >  static int
> >  set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
> > @@ -902,7 +980,7 @@ set_vring_kick(struct vhost_device_ctx ctx, struct
> vhost_vring_file *file)
> >  	if (dev == NULL)
> >  		return -1;
> >
> > -	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0.
> */
> > +	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
> >  	vq = dev->virtqueue[file->index];
> >
> >  	if (vq->callfd)
> > @@ -922,10 +1000,12 @@ set_vring_kick(struct vhost_device_ctx ctx, struct
> vhost_vring_file *file)
> >
> >  /*
> >   * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
> > - * To complete device initialisation when the virtio driver is loaded we are
> provided with a
> > - * valid fd for a tap device (not used by us). If this happens then we can add
> the device to a
> > - * data core. When the virtio driver is removed we get fd=-1. At that point we
> remove the device
> > - * from the data core. The device will still exist in the device configuration
> linked list.
> > + * To complete device initialisation when the virtio driver is loaded,
> > + * we are provided with a valid fd for a tap device (not used by us).
> > + * If this happens then we can add the device to a data core.
> > + * When the virtio driver is removed we get fd=-1.
> > + * At that point we remove the device from the data core.
> > + * The device will still exist in the device configuration linked list.
> >   */
> >  static int
> >  set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
> > @@ -936,10 +1016,13 @@ set_backend(struct vhost_device_ctx ctx, struct
> vhost_vring_file *file)
> >  	if (dev == NULL)
> >  		return -1;
> >
> > -	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0.
> */
> > +	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
> >  	dev->virtqueue[file->index]->backend = file->fd;
> >
> > -	/* If the device isn't already running and both backend fds are set we
> add the device. */
> > +	/*
> > +	 * If the device isn't already running and both backend fds are set,
> > +	 * we add the device.
> > +	 */
> >  	if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
> >  		if (((int)dev->virtqueue[VIRTIO_TXQ]->backend !=
> VIRTIO_DEV_STOPPED) &&
> >  			((int)dev->virtqueue[VIRTIO_RXQ]->backend !=
> VIRTIO_DEV_STOPPED))
> > @@ -952,8 +1035,8 @@ set_backend(struct vhost_device_ctx ctx, struct
> vhost_vring_file *file)
> >  }
> >
> >  /*
> > - * Function pointers are set for the device operations to allow CUSE to call
> functions
> > - * when an IOCTL, device_add or device_release is received.
> > + * Function pointers are set for the device operations to allow CUSE to call
> > + * functions when an IOCTL, device_add or device_release is received.
> >   */
> >  static const struct vhost_net_device_ops vhost_device_ops = {
> >  	.new_device = new_device,
> > @@ -991,11 +1074,13 @@ int rte_vhost_enable_guest_notification(struct
> virtio_net *dev,
> >  	uint16_t queue_id, int enable)
> >  {
> >  	if (enable) {
> > -		RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't
> supported.\n");
> > +		RTE_LOG(ERR, VHOST_CONFIG,
> > +			"guest notification isn't supported.\n");
> >  		return -1;
> >  	}
> >
> > -	dev->virtqueue[queue_id]->used->flags = enable ? 0 :
> VRING_USED_F_NO_NOTIFY;
> > +	dev->virtqueue[queue_id]->used->flags =
> > +		enable ? 0 : VRING_USED_F_NO_NOTIFY;
> >  	return 0;
> >  }
> >
> >
> 
> 
> --
> Thomas
  

Patch

diff --git a/lib/librte_vhost/eventfd_link/eventfd_link.c b/lib/librte_vhost/eventfd_link/eventfd_link.c
index fc0653a..542ec2c 100644
--- a/lib/librte_vhost/eventfd_link/eventfd_link.c
+++ b/lib/librte_vhost/eventfd_link/eventfd_link.c
@@ -1,26 +1,26 @@ 
 /*-
- *  * GPL LICENSE SUMMARY
- *  *
- *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *  *
- *  *   This program is free software; you can redistribute it and/or modify
- *  *   it under the terms of version 2 of the GNU General Public License as
- *  *   published by the Free Software Foundation.
- *  *
- *  *   This program is distributed in the hope that it will be useful, but
- *  *   WITHOUT ANY WARRANTY; without even the implied warranty of
- *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  *   General Public License for more details.
- *  *
- *  *   You should have received a copy of the GNU General Public License
- *  *   along with this program; if not, write to the Free Software
- *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *  *   The full GNU General Public License is included in this distribution
- *  *   in the file called LICENSE.GPL.
- *  *
- *  *   Contact Information:
- *  *   Intel Corporation
- *   */
+ * GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *   The full GNU General Public License is included in this distribution
+ *   in the file called LICENSE.GPL.
+ *
+ *   Contact Information:
+ *   Intel Corporation
+ */
 
 #include <linux/eventfd.h>
 #include <linux/miscdevice.h>
@@ -42,15 +42,15 @@ 
  * get_files_struct is copied from fs/file.c
  */
 struct files_struct *
-get_files_struct (struct task_struct *task)
+get_files_struct(struct task_struct *task)
 {
 	struct files_struct *files;
 
-	task_lock (task);
+	task_lock(task);
 	files = task->files;
 	if (files)
-		atomic_inc (&files->count);
-	task_unlock (task);
+		atomic_inc(&files->count);
+	task_unlock(task);
 
 	return files;
 }
@@ -59,17 +59,15 @@  get_files_struct (struct task_struct *task)
  * put_files_struct is extracted from fs/file.c
  */
 void
-put_files_struct (struct files_struct *files)
+put_files_struct(struct files_struct *files)
 {
-	if (atomic_dec_and_test (&files->count))
-	{
-		BUG ();
-	}
+	if (atomic_dec_and_test(&files->count))
+		BUG();
 }
 
 
 static long
-eventfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
+eventfd_link_ioctl(struct file *f, unsigned int ioctl, unsigned long arg)
 {
 	void __user *argp = (void __user *) arg;
 	struct task_struct *task_target = NULL;
@@ -78,96 +76,88 @@  eventfd_link_ioctl (struct file *f, unsigned int ioctl, unsigned long arg)
 	struct fdtable *fdt;
 	struct eventfd_copy eventfd_copy;
 
-	switch (ioctl)
-	{
-		case EVENTFD_COPY:
-			if (copy_from_user (&eventfd_copy, argp, sizeof (struct eventfd_copy)))
-				return -EFAULT;
-
-			/*
-			 * Find the task struct for the target pid
-			 */
-			task_target =
-				pid_task (find_vpid (eventfd_copy.target_pid), PIDTYPE_PID);
-			if (task_target == NULL)
-			{
-				printk (KERN_DEBUG "Failed to get mem ctx for target pid\n");
-				return -EFAULT;
-			}
-
-			files = get_files_struct (current);
-			if (files == NULL)
-			{
-				printk (KERN_DEBUG "Failed to get files struct\n");
-				return -EFAULT;
-			}
-
-			rcu_read_lock ();
-			file = fcheck_files (files, eventfd_copy.source_fd);
-			if (file)
-			{
-				if (file->f_mode & FMODE_PATH
-						|| !atomic_long_inc_not_zero (&file->f_count))
-					file = NULL;
-			}
-			rcu_read_unlock ();
-			put_files_struct (files);
-
-			if (file == NULL)
-			{
-				printk (KERN_DEBUG "Failed to get file from source pid\n");
-				return 0;
-			}
-
-			/*
-			 * Release the existing eventfd in the source process
-			 */
-			spin_lock (&files->file_lock);
-			filp_close (file, files);
-			fdt = files_fdtable (files);
-			fdt->fd[eventfd_copy.source_fd] = NULL;
-			spin_unlock (&files->file_lock);
-
-			/*
-			 * Find the file struct associated with the target fd.
-			 */
-
-			files = get_files_struct (task_target);
-			if (files == NULL)
-			{
-				printk (KERN_DEBUG "Failed to get files struct\n");
-				return -EFAULT;
-			}
-
-			rcu_read_lock ();
-			file = fcheck_files (files, eventfd_copy.target_fd);
-			if (file)
-			{
-				if (file->f_mode & FMODE_PATH
-						|| !atomic_long_inc_not_zero (&file->f_count))
+	switch (ioctl) {
+	case EVENTFD_COPY:
+		if (copy_from_user(&eventfd_copy, argp,
+			sizeof(struct eventfd_copy)))
+			return -EFAULT;
+
+		/*
+		 * Find the task struct for the target pid
+		 */
+		task_target =
+			pid_task(find_vpid(eventfd_copy.target_pid), PIDTYPE_PID);
+		if (task_target == NULL) {
+			printk(KERN_DEBUG "Failed to get mem ctx for target pid\n");
+			return -EFAULT;
+		}
+
+		files = get_files_struct(current);
+		if (files == NULL) {
+			printk(KERN_DEBUG "Failed to get files struct\n");
+			return -EFAULT;
+		}
+
+		rcu_read_lock();
+		file = fcheck_files(files, eventfd_copy.source_fd);
+		if (file) {
+			if (file->f_mode & FMODE_PATH ||
+				!atomic_long_inc_not_zero(&file->f_count))
+				file = NULL;
+		}
+		rcu_read_unlock();
+		put_files_struct(files);
+
+		if (file == NULL) {
+			printk(KERN_DEBUG "Failed to get file from source pid\n");
+			return 0;
+		}
+
+		/*
+		 * Release the existing eventfd in the source process
+		 */
+		spin_lock(&files->file_lock);
+		filp_close(file, files);
+		fdt = files_fdtable(files);
+		fdt->fd[eventfd_copy.source_fd] = NULL;
+		spin_unlock(&files->file_lock);
+
+		/*
+		 * Find the file struct associated with the target fd.
+		 */
+
+		files = get_files_struct(task_target);
+		if (files == NULL) {
+			printk(KERN_DEBUG "Failed to get files struct\n");
+			return -EFAULT;
+		}
+
+		rcu_read_lock();
+		file = fcheck_files(files, eventfd_copy.target_fd);
+		if (file) {
+			if (file->f_mode & FMODE_PATH ||
+				!atomic_long_inc_not_zero(&file->f_count))
 					file = NULL;
-			}
-			rcu_read_unlock ();
-			put_files_struct (files);
-
-			if (file == NULL)
-			{
-				printk (KERN_DEBUG "Failed to get file from target pid\n");
-				return 0;
-			}
+		}
+		rcu_read_unlock();
+		put_files_struct(files);
 
+		if (file == NULL) {
+			printk(KERN_DEBUG "Failed to get file from target pid\n");
+			return 0;
+		}
 
-			/*
-			 * Install the file struct from the target process into the
-			 * file desciptor of the source process,
-			 */
+		/*
+		 * Install the file struct from the target process into the
+		 * file desciptor of the source process,
+		 */
 
-			fd_install (eventfd_copy.source_fd, file);
+		fd_install(eventfd_copy.source_fd, file);
 
-			return 0;
+		return 0;
 
-		default:
-			return -ENOIOCTLCMD;
+	default:
+		return -ENOIOCTLCMD;
 	}
 }
 
@@ -183,23 +173,23 @@  static struct miscdevice eventfd_link_misc = {
 };
 
 static int __init
-eventfd_link_init (void)
+eventfd_link_init(void)
 {
-	return misc_register (&eventfd_link_misc);
+	return misc_register(&eventfd_link_misc);
 }
 
-module_init (eventfd_link_init);
+module_init(eventfd_link_init);
 
 static void __exit
-eventfd_link_exit (void)
+eventfd_link_exit(void)
 {
-	misc_deregister (&eventfd_link_misc);
+	misc_deregister(&eventfd_link_misc);
 }
 
-module_exit (eventfd_link_exit);
+module_exit(eventfd_link_exit);
 
-MODULE_VERSION ("0.0.1");
-MODULE_LICENSE ("GPL v2");
-MODULE_AUTHOR ("Anthony Fee");
-MODULE_DESCRIPTION ("Link eventfd");
-MODULE_ALIAS ("devname:eventfd-link");
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anthony Fee");
+MODULE_DESCRIPTION("Link eventfd");
+MODULE_ALIAS("devname:eventfd-link");
diff --git a/lib/librte_vhost/eventfd_link/eventfd_link.h b/lib/librte_vhost/eventfd_link/eventfd_link.h
index a32a8dd..ea619ec 100644
--- a/lib/librte_vhost/eventfd_link/eventfd_link.h
+++ b/lib/librte_vhost/eventfd_link/eventfd_link.h
@@ -1,79 +1,76 @@ 
 /*-
- *  * This file is provided under a dual BSD/GPLv2 license.  When using or
- *  *   redistributing this file, you may do so under either license.
- *  *
- *  *   GPL LICENSE SUMMARY
- *  *
- *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *  *
- *  *   This program is free software; you can redistribute it and/or modify
- *  *   it under the terms of version 2 of the GNU General Public License as
- *  *   published by the Free Software Foundation.
- *  *
- *  *   This program is distributed in the hope that it will be useful, but
- *  *   WITHOUT ANY WARRANTY; without even the implied warranty of
- *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  *   General Public License for more details.
- *  *
- *  *   You should have received a copy of the GNU General Public License
- *  *   along with this program; if not, write to the Free Software
- *  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *  *   The full GNU General Public License is included in this distribution
- *  *   in the file called LICENSE.GPL.
- *  *
- *  *   Contact Information:
- *  *   Intel Corporation
- *  *
- *  *   BSD LICENSE
- *  *
- *  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *  *   All rights reserved.
- *  *
- *  *   Redistribution and use in source and binary forms, with or without
- *  *   modification, are permitted provided that the following conditions
- *  *   are met:
- *  *
- *  *     * Redistributions of source code must retain the above copyright
- *  *       notice, this list of conditions and the following disclaimer.
- *  *     * Redistributions in binary form must reproduce the above copyright
- *  *       notice, this list of conditions and the following disclaimer in
- *  *       the documentation and/or other materials provided with the
- *  *       distribution.
- *  *     * Neither the name of Intel Corporation nor the names of its
- *  *       contributors may be used to endorse or promote products derived
- *  *       from this software without specific prior written permission.
- *  *
- *  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *  *
- *   */
+ *  This file is provided under a dual BSD/GPLv2 license.  When using or
+ *  redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *   The full GNU General Public License is included in this distribution
+ *   in the file called LICENSE.GPL.
+ *
+ *   Contact Information:
+ *   Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *   Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *   Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the
+ *   distribution.
+ *   Neither the name of Intel Corporation nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
 
 #ifndef _EVENTFD_LINK_H_
 #define _EVENTFD_LINK_H_
 
 /*
- *	ioctl to copy an fd entry in calling process to an fd in a target process
+ * ioctl to copy an fd entry in calling process to an fd in a target process
  */
 #define EVENTFD_COPY 1
 
 /*
- *	arguements for the EVENTFD_COPY ioctl
+ * arguements for the EVENTFD_COPY ioctl
  */
 struct eventfd_copy {
-	// fd in the target pid
-    unsigned target_fd;
-	// fd in the calling pid
-    unsigned source_fd;
-	// pid of the target pid
-    pid_t target_pid;
+	unsigned target_fd; /* fd in the target pid */
+	unsigned source_fd; /* fd in the calling pid */
+	pid_t target_pid; /* pid of the target pid */
 };
 #endif /* _EVENTFD_LINK_H_ */
diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index b6548a1..00b1328 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -90,8 +90,7 @@  struct vhost_virtqueue {
 /**
  * Device structure contains all configuration information relating to the device.
  */
-struct virtio_net
-{
+struct virtio_net {
 	struct vhost_virtqueue	*virtqueue[VIRTIO_QNUM];	/**< Contains all virtqueue information. */
 	struct virtio_memory	*mem;		/**< QEMU memory and memory region information. */
 	uint64_t		features;	/**< Negotiated feature set. */
diff --git a/lib/librte_vhost/vhost-net-cdev.c b/lib/librte_vhost/vhost-net-cdev.c
index 91ff0d8..57c76cb 100644
--- a/lib/librte_vhost/vhost-net-cdev.c
+++ b/lib/librte_vhost/vhost-net-cdev.c
@@ -46,21 +46,21 @@ 
 
 #include "vhost-net-cdev.h"
 
-#define FUSE_OPT_DUMMY		"\0\0"
-#define FUSE_OPT_FORE		"-f\0\0"
-#define FUSE_OPT_NOMULTI	"-s\0\0"
+#define FUSE_OPT_DUMMY "\0\0"
+#define FUSE_OPT_FORE  "-f\0\0"
+#define FUSE_OPT_NOMULTI "-s\0\0"
 
-static const uint32_t	default_major = 231;
-static const uint32_t	default_minor = 1;
-static const char	cuse_device_name[]	= "/dev/cuse";
-static const char	default_cdev[] = "vhost-net";
+static const uint32_t default_major = 231;
+static const uint32_t default_minor = 1;
+static const char cuse_device_name[] = "/dev/cuse";
+static const char default_cdev[] = "vhost-net";
 
-static struct fuse_session			*session;
-static struct vhost_net_device_ops	const *ops;
+static struct fuse_session *session;
+static struct vhost_net_device_ops const *ops;
 
 /*
- * Returns vhost_device_ctx from given fuse_req_t. The index is populated later when
- * the device is added to the device linked list.
+ * Returns vhost_device_ctx from given fuse_req_t. The index is populated later
+ * when the device is added to the device linked list.
  */
 static struct vhost_device_ctx
 fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
@@ -75,7 +75,8 @@  fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
 }
 
 /*
- * When the device is created in QEMU it gets initialised here and added to the device linked list.
+ * When the device is created in QEMU it gets initialised here and
+ * added to the device linked list.
  */
 static void
 vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
@@ -91,7 +92,8 @@  vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
 
 	fi->fh = err;
 
-	RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device configuration started\n", fi->fh);
+	RTE_LOG(INFO, VHOST_CONFIG,
+		"(%"PRIu64") Device configuration started\n", fi->fh);
 	fuse_reply_open(req, fi);
 }
 
@@ -113,8 +115,8 @@  vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
  * Boilerplate code for CUSE IOCTL
  * Implicit arguments: ctx, req, result.
  */
-#define VHOST_IOCTL(func) do {			\
-	result = (func)(ctx);			\
+#define VHOST_IOCTL(func) do {	\
+	result = (func)(ctx);	\
 	fuse_reply_ioctl(req, result, NULL, 0);	\
 } while (0)
 
@@ -122,57 +124,58 @@  vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
  * Boilerplate IOCTL RETRY
  * Implicit arguments: req.
  */
-#define VHOST_IOCTL_RETRY(size_r, size_w) do {		\
-	struct iovec iov_r = { arg, (size_r) };		\
-	struct iovec iov_w = { arg, (size_w) };		\
-	fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0);	\
+#define VHOST_IOCTL_RETRY(size_r, size_w) do {	\
+	struct iovec iov_r = { arg, (size_r) };	\
+	struct iovec iov_w = { arg, (size_w) };	\
+	fuse_reply_ioctl_retry(req, &iov_r,	\
+		(size_r) ? 1 : 0, &iov_w, (size_w) ? 1 : 0);\
 } while (0)
 
 /*
  * Boilerplate code for CUSE Read IOCTL
  * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
  */
-#define VHOST_IOCTL_R(type, var, func) do {		\
-	if (!in_bufsz) {				\
-		VHOST_IOCTL_RETRY(sizeof(type), 0);	\
-	} else {					\
-		(var) = *(const type*) in_buf;		\
-		result = func(ctx, &(var));		\
-		fuse_reply_ioctl(req, result, NULL, 0);	\
-	}						\
+#define VHOST_IOCTL_R(type, var, func) do {	\
+	if (!in_bufsz) {	\
+		VHOST_IOCTL_RETRY(sizeof(type), 0);\
+	} else {	\
+		(var) = *(const type*)in_buf;	\
+		result = func(ctx, &(var));	\
+		fuse_reply_ioctl(req, result, NULL, 0);\
+	}	\
 } while (0)
 
 /*
- *	Boilerplate code for CUSE Write IOCTL
+ * Boilerplate code for CUSE Write IOCTL
  * Implicit arguments: ctx, req, result, out_bufsz.
  */
-#define	VHOST_IOCTL_W(type, var, func) do {		\
-	if (!out_bufsz) {				\
-		VHOST_IOCTL_RETRY(0, sizeof(type));	\
-	} else {					\
-		result = (func)(ctx, &(var));		\
-		fuse_reply_ioctl(req, result, &(var), sizeof(type));	\
-	}								\
+#define VHOST_IOCTL_W(type, var, func) do {	\
+	if (!out_bufsz) {	\
+		VHOST_IOCTL_RETRY(0, sizeof(type));\
+	} else {	\
+		result = (func)(ctx, &(var));\
+		fuse_reply_ioctl(req, result, &(var), sizeof(type));\
+	} \
 } while (0)
 
 /*
  * Boilerplate code for CUSE Read/Write IOCTL
  * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
  */
-#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {		\
-	if (!in_bufsz) {						\
-		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));	\
-	} else {							\
-		(var1) = *(const type1*) (in_buf);			\
-		result = (func)(ctx, (var1), &(var2));			\
-		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));	\
-	}								\
+#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do {	\
+	if (!in_bufsz) {	\
+		VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));\
+	} else {	\
+		(var1) = *(const type1*) (in_buf);	\
+		result = (func)(ctx, (var1), &(var2));	\
+		fuse_reply_ioctl(req, result, &(var2), sizeof(type2));\
+	}	\
 } while (0)
 
 /*
- * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on
- * the type of IOCTL a buffer is requested to read or to write. This
- * request is handled by FUSE and the buffer is then given to CUSE.
+ * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on the type
+ * of IOCTL a buffer is requested to read or to write. This request is handled
+ * by FUSE and the buffer is then given to CUSE.
  */
 static void
 vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
@@ -189,33 +192,39 @@  vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
 
 	switch (cmd) {
 	case VHOST_NET_SET_BACKEND:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
 		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
 		break;
 
 	case VHOST_GET_FEATURES:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
 		VHOST_IOCTL_W(uint64_t, features, ops->get_features);
 		break;
 
 	case VHOST_SET_FEATURES:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
 		VHOST_IOCTL_R(uint64_t, features, ops->set_features);
 		break;
 
 	case VHOST_RESET_OWNER:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
 		VHOST_IOCTL(ops->reset_owner);
 		break;
 
 	case VHOST_SET_OWNER:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
 		VHOST_IOCTL(ops->set_owner);
 		break;
 
 	case VHOST_SET_MEM_TABLE:
 		/*TODO fix race condition.*/
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
 		static struct vhost_memory mem_temp;
 
 		switch (in_bufsz) {
@@ -227,7 +236,9 @@  vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
 			mem_temp = *(const struct vhost_memory *) in_buf;
 
 			if (mem_temp.nregions > 0) {
-				VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
+				VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) +
+					(sizeof(struct vhost_memory_region) *
+						mem_temp.nregions), 0);
 			} else {
 				result = -1;
 				fuse_reply_ioctl(req, result, NULL, 0);
@@ -235,56 +246,70 @@  vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
 			break;
 
 		default:
-			result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
+			result = ops->set_mem_table(ctx,
+					in_buf, mem_temp.nregions);
 			if (result)
 				fuse_reply_err(req, EINVAL);
 			else
 				fuse_reply_ioctl(req, result, NULL, 0);
-
 		}
-
 		break;
 
 	case VHOST_SET_VRING_NUM:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
-		VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_state, state,
+			ops->set_vring_num);
 		break;
 
 	case VHOST_SET_VRING_BASE:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
-		VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_state, state,
+			ops->set_vring_base);
 		break;
 
 	case VHOST_GET_VRING_BASE:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
-		VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+		VHOST_IOCTL_RW(uint32_t, index,
+			struct vhost_vring_state, state, ops->get_vring_base);
 		break;
 
 	case VHOST_SET_VRING_ADDR:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
-		VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_addr, addr,
+			ops->set_vring_addr);
 		break;
 
 	case VHOST_SET_VRING_KICK:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
-		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_file, file,
+			ops->set_vring_kick);
 		break;
 
 	case VHOST_SET_VRING_CALL:
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
-		VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
+		VHOST_IOCTL_R(struct vhost_vring_file, file,
+			ops->set_vring_call);
 		break;
 
 	default:
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
 		result = -1;
 		fuse_reply_ioctl(req, result, NULL, 0);
 	}
 
 	if (result < 0)
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
 	else
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
 }
 
 /*
@@ -297,8 +322,8 @@  static const struct cuse_lowlevel_ops vhost_net_ops = {
 };
 
 /*
- * cuse_info is populated and used to register the cuse device. vhost_net_device_ops are
- * also passed when the device is registered in main.c.
+ * cuse_info is populated and used to register the cuse device.
+ * vhost_net_device_ops are also passed when the device is registered in app.
  */
 int
 rte_vhost_driver_register(const char *dev_name)
@@ -314,20 +339,23 @@  rte_vhost_driver_register(const char *dev_name)
 	char *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};
 
 	if (access(cuse_device_name, R_OK | W_OK) < 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, "Character device %s can't be accessed, maybe not exist\n", cuse_device_name);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"char device %s can't be accessed, maybe not exist\n",
+			cuse_device_name);
 		return -1;
 	}
 
 	/*
-	 * The device name is created. This is passed to QEMU so that it can register
-	 * the device with our application.
+	 * The device name is created. This is passed to QEMU so that it can
+	 * register the device with our application.
 	 */
 	snprintf(device_name, PATH_MAX, "DEVNAME=%s", dev_name);
 	snprintf(char_device_name, PATH_MAX, "/dev/%s", dev_name);
 
 	/* Check if device already exists. */
 	if (access(char_device_name, F_OK) != -1) {
-		RTE_LOG(ERR, VHOST_CONFIG, "Character device %s already exists\n", char_device_name);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"char device %s already exists\n", char_device_name);
 		return -1;
 	}
 
@@ -341,7 +369,7 @@  rte_vhost_driver_register(const char *dev_name)
 	ops = get_virtio_net_callbacks();
 
 	session = cuse_lowlevel_setup(3, fuse_argv,
-				&cuse_info, &vhost_net_ops, 0, NULL);
+			&cuse_info, &vhost_net_ops, 0, NULL);
 	if (session == NULL)
 		return -1;
 
@@ -349,7 +377,8 @@  rte_vhost_driver_register(const char *dev_name)
 }
 
 /**
- * The CUSE session is launched allowing the application to receive open, release and ioctl calls.
+ * The CUSE session is launched allowing the application to receive open,
+ * release and ioctl calls.
  */
 int
 rte_vhost_driver_session_start(void)
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 84ec0e8..ccfd82f 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -45,7 +45,7 @@ 
 /**
  * This function adds buffers to the virtio devices RX virtqueue. Buffers can
  * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that were succesfully
+ * count is returned to indicate the number of packets that are succesfully
  * added to the RX queue. This function works when mergeable is disabled.
  */
 static inline uint32_t __attribute__((always_inline))
@@ -76,7 +76,7 @@  virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
 
 	/*
-	 * As many data cores may want access to available buffers, 
+	 * As many data cores may want access to available buffers,
 	 * they need to be reserved.
 	 */
 	do {
@@ -143,7 +143,8 @@  virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 		}
 
 		/* Update used ring with desc information */
-		vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
+		vq->used->ring[res_cur_idx & (vq->size - 1)].id =
+							head[packet_success];
 		vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
 
 		/* Copy mbuf data to buffer */
@@ -389,10 +390,7 @@  copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
 }
 
 /*
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that were succesfully
- * added to the RX queue. This function works for mergeable RX.
+ * This function works for mergeable RX.
  */
 static inline uint32_t __attribute__((always_inline))
 virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
@@ -729,5 +727,4 @@  rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 	if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
 		eventfd_write((int)vq->kickfd, 1);
 	return entry_success;
-
 }
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index 8015dd8..c07a11e 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -52,27 +52,27 @@ 
 #include "vhost-net-cdev.h"
 #include "eventfd_link/eventfd_link.h"
 
-/**
+/*
  * Device linked list structure for configuration.
  */
 struct virtio_net_config_ll {
-	struct virtio_net		dev;	/* Virtio device.*/
-	struct virtio_net_config_ll	*next;	/* Next entry on linked list.*/
+	struct virtio_net dev;			/* Virtio device.*/
+	struct virtio_net_config_ll *next;	/* Next dev on linked list.*/
 };
 
 const char eventfd_cdev[] = "/dev/eventfd-link";
 
-/* device ops to add/remove device to data core. */
+/* device ops to add/remove device to/from data core. */
 static struct virtio_net_device_ops const *notify_ops;
-/* Root address of the linked list in the configuration core. */
-static struct virtio_net_config_ll	*ll_root;
+/* root address of the linked list of managed virtio devices */
+static struct virtio_net_config_ll *ll_root;
 
-/* Features supported by this application. RX merge buffers are enabled by default. */
+/* Features supported by this lib. */
 #define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)
 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
 
 /* Line size for reading maps file. */
-const uint32_t BUFSIZE = PATH_MAX;
+static const uint32_t BUFSIZE = PATH_MAX;
 
 /* Size of prot char array in procmap. */
 #define PROT_SZ 5
@@ -82,19 +82,19 @@  const uint32_t BUFSIZE = PATH_MAX;
 
 /* Structure containing information gathered from maps file. */
 struct procmap {
-	uint64_t	va_start;	/* Start virtual address in file. */
-	uint64_t	len;		/* Size of file. */
-	uint64_t	pgoff;		/* Not used. */
-	uint32_t	maj;		/* Not used. */
-	uint32_t	min;		/* Not used. */
-	uint32_t	ino;		/* Not used. */
-	char		prot[PROT_SZ];	/* Not used. */
-	char		fname[PATH_MAX];/* File name. */
+	uint64_t va_start;	/* Start virtual address in file. */
+	uint64_t len;		/* Size of file. */
+	uint64_t pgoff;		/* Not used. */
+	uint32_t maj;		/* Not used. */
+	uint32_t min;		/* Not used. */
+	uint32_t ino;		/* Not used. */
+	char prot[PROT_SZ];	/* Not used. */
+	char fname[PATH_MAX];	/* File name. */
 };
 
 /*
- * Converts QEMU virtual address to Vhost virtual address. This function is used
- * to convert the ring addresses to our address space.
+ * Converts QEMU virtual address to Vhost virtual address. This function is
+ * used to convert the ring addresses to our address space.
  */
 static uint64_t
 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
@@ -107,8 +107,8 @@  qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
 	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
 		region = &dev->mem->regions[regionidx];
 		if ((qemu_va >= region->userspace_address) &&
-				(qemu_va <= region->userspace_address +
-				region->memory_size)) {
+			(qemu_va <= region->userspace_address +
+			region->memory_size)) {
 			vhost_va = dev->mem->mapped_address + qemu_va -
 					dev->mem->base_address;
 			break;
@@ -118,7 +118,8 @@  qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
 }
 
 /*
- * Locate the file containing QEMU's memory space and map it to our address space.
+ * Locate the file containing QEMU's memory space and
+ * map it to our address space.
  */
 static int
 host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
@@ -134,10 +135,10 @@  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
 	char procdir[PATH_MAX];
 	char resolved_path[PATH_MAX];
 	char *path = NULL;
-	FILE		*fmap;
-	void		*map;
-	uint8_t		found = 0;
-	char		line[BUFSIZE];
+	FILE *fmap;
+	void *map;
+	uint8_t found = 0;
+	char line[BUFSIZE];
 	char dlm[] = "-   :   ";
 	char *str, *sp, *in[PROCMAP_SZ];
 	char *end = NULL;
@@ -159,7 +160,7 @@  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
 	while (fgets(line, BUFSIZE, fmap) != 0) {
 		str = line;
 		errno = 0;
-		/* Split line in to fields. */
+		/* Split line into fields. */
 		for (i = 0; i < PROCMAP_SZ; i++) {
 			in[i] = strtok_r(str, &dlm[i], &sp);
 			if ((in[i] == NULL) || (errno != 0)) {
@@ -171,37 +172,43 @@  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
 
 		/* Convert/Copy each field as needed. */
 		procmap.va_start = strtoull(in[0], &end, 16);
-		if ((in[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
+		if ((in[0] == '\0') || (end == NULL) || (*end != '\0') ||
+			(errno != 0)) {
 			fclose(fmap);
 			return -1;
 		}
 
 		procmap.len = strtoull(in[1], &end, 16);
-		if ((in[1] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
+		if ((in[1] == '\0') || (end == NULL) || (*end != '\0') ||
+			(errno != 0)) {
 			fclose(fmap);
 			return -1;
 		}
 
 		procmap.pgoff = strtoull(in[3], &end, 16);
-		if ((in[3] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
+		if ((in[3] == '\0') || (end == NULL) || (*end != '\0') ||
+			(errno != 0)) {
 			fclose(fmap);
 			return -1;
 		}
 
 		procmap.maj = strtoul(in[4], &end, 16);
-		if ((in[4] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
+		if ((in[4] == '\0') || (end == NULL) || (*end != '\0') ||
+			(errno != 0)) {
 			fclose(fmap);
 			return -1;
 		}
 
 		procmap.min = strtoul(in[5], &end, 16);
-		if ((in[5] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
+		if ((in[5] == '\0') || (end == NULL) || (*end != '\0') ||
+			(errno != 0)) {
 			fclose(fmap);
 			return -1;
 		}
 
 		procmap.ino = strtoul(in[6], &end, 16);
-		if ((in[6] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
+		if ((in[6] == '\0') || (end == NULL) || (*end != '\0') ||
+			(errno != 0)) {
 			fclose(fmap);
 			return -1;
 		}
@@ -218,16 +225,19 @@  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
 	fclose(fmap);
 
 	if (!found) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file in pid %d maps file\n", dev->device_fh, pid);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Failed to find memory file in pid %d maps file\n",
+			dev->device_fh, pid);
 		return -1;
 	}
 
 	/* Find the guest memory file among the process fds. */
 	dp = opendir(procdir);
 	if (dp == NULL) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory\n", dev->device_fh, pid);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Cannot open pid %d process directory\n",
+			dev->device_fh, pid);
 		return -1;
-
 	}
 
 	found = 0;
@@ -254,23 +264,29 @@  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
 	closedir(dp);
 
 	if (found == 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file for pid %d\n", dev->device_fh, pid);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Failed to find memory file for pid %d\n",
+			dev->device_fh, pid);
 		return -1;
 	}
 	/* Open the shared memory file and map the memory into this process. */
 	fd = open(memfile, O_RDWR);
 
 	if (fd == -1) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open %s for pid %d\n", dev->device_fh, memfile, pid);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Failed to open %s for pid %d\n",
+			dev->device_fh, memfile, pid);
 		return -1;
 	}
 
-	map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE ,
-			MAP_POPULATE|MAP_SHARED, fd, 0);
+	map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE,
+		MAP_POPULATE|MAP_SHARED, fd, 0);
 	close(fd);
 
 	if (map == MAP_FAILED) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n",  dev->device_fh, memfile, pid);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Error mapping the file %s for pid %d\n",
+			dev->device_fh, memfile, pid);
 		return -1;
 	}
 
@@ -278,8 +294,11 @@  host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
 	mem->mapped_address = (uint64_t)(uintptr_t)map;
 	mem->mapped_size = procmap.len;
 
-	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n", dev->device_fh,
-		memfile, resolved_path, (long long unsigned)mem->mapped_size, map);
+	LOG_DEBUG(VHOST_CONFIG,
+		"(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n",
+		dev->device_fh,
+		memfile, resolved_path,
+		(long long unsigned)mem->mapped_size, map);
 
 	return 0;
 }
@@ -303,7 +322,8 @@  get_config_ll_entry(struct vhost_device_ctx ctx)
 }
 
 /*
- * Searches the configuration core linked list and retrieves the device if it exists.
+ * Searches the configuration core linked list and
+ * retrieves the device if it exists.
  */
 static struct virtio_net *
 get_device(struct vhost_device_ctx ctx)
@@ -312,11 +332,11 @@  get_device(struct vhost_device_ctx ctx)
 
 	ll_dev = get_config_ll_entry(ctx);
 
-	/* If a matching entry is found in the linked list, return the device in that entry. */
 	if (ll_dev)
 		return &ll_dev->dev;
 
-	RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
+	RTE_LOG(ERR, VHOST_CONFIG,
+		"(%"PRIu64") Device not found in linked list.\n", ctx.fh);
 	return NULL;
 }
 
@@ -331,13 +351,18 @@  add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
 	/* If ll_dev == NULL then this is the first device so go to else */
 	if (ll_dev) {
 		/* If the 1st device_fh != 0 then we insert our device here. */
-		if (ll_dev->dev.device_fh != 0)	{
+		if (ll_dev->dev.device_fh != 0) {
 			new_ll_dev->dev.device_fh = 0;
 			new_ll_dev->next = ll_dev;
 			ll_root = new_ll_dev;
 		} else {
-			/* Increment through the ll until we find un unused device_fh. Insert the device at that entry*/
-			while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))
+			/*
+			 * Increment through the ll until we find un unused
+			 * device_fh. Insert the device at that entry.
+			 */
+			while ((ll_dev->next != NULL) &&
+				(ll_dev->dev.device_fh ==
+					(ll_dev->next->dev.device_fh - 1)))
 				ll_dev = ll_dev->next;
 
 			new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
@@ -352,7 +377,8 @@  add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
 }
 
 /*
- * Unmap any memory, close any file descriptors and free any memory owned by a device.
+ * Unmap any memory, close any file descriptors and
+ * free any memory owned by a device.
  */
 static void
 cleanup_device(struct virtio_net *dev)
@@ -386,6 +412,7 @@  free_device(struct virtio_net_config_ll *ll_dev)
 	free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
 	free(ll_dev);
 }
+
 /*
  * Remove an entry from the device configuration linked list.
  */
@@ -423,7 +450,10 @@  init_device(struct virtio_net *dev)
 {
 	uint64_t vq_offset;
 
-	/* Virtqueues have already been malloced so we don't want to set them to NULL. */
+	/*
+	 * Virtqueues have already been malloced so
+	 * we don't want to set them to NULL.
+	 */
 	vq_offset = offsetof(struct virtio_net, mem);
 
 	/* Set everything to 0. */
@@ -491,8 +521,8 @@  new_device(struct vhost_device_ctx ctx)
 }
 
 /*
- * Function is called from the CUSE release function. This function will cleanup
- * the device and remove it from device configuration linked list.
+ * Function is called from the CUSE release function. This function will
+ * cleanup the device and remove it from device configuration linked list.
  */
 static void
 destroy_device(struct vhost_device_ctx ctx)
@@ -503,15 +533,19 @@  destroy_device(struct vhost_device_ctx ctx)
 	/* Find the linked list entry for the device to be removed. */
 	ll_dev_cur_ctx = get_config_ll_entry(ctx);
 	while (ll_dev_cur != NULL) {
-		/* If the device is found or a device that doesn't exist is found then it is removed. */
+		/*
+		 * If the device is found or
+		 * a device that doesn't exist is found then it is removed.
+		 */
 		if (ll_dev_cur == ll_dev_cur_ctx) {
 			/*
-			 * If the device is running on a data core then call the function to remove it from
-			 * the data core.
+			 * If the device is running on a data core then call
+			 * the function to remove it from the data core.
 			 */
 			if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
 				notify_ops->destroy_device(&(ll_dev_cur->dev));
-			ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
+			ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
+					ll_dev_last);
 		} else {
 			ll_dev_last = ll_dev_cur;
 			ll_dev_cur = ll_dev_cur->next;
@@ -521,7 +555,8 @@  destroy_device(struct vhost_device_ctx ctx)
 
 /*
  * Called from CUSE IOCTL: VHOST_SET_OWNER
- * This function just returns success at the moment unless the device hasn't been initialised.
+ * This function just returns success at the moment unless
+ * the device hasn't been initialised.
  */
 static int
 set_owner(struct vhost_device_ctx ctx)
@@ -571,7 +606,7 @@  get_features(struct vhost_device_ctx ctx, uint64_t *pu)
 
 /*
  * Called from CUSE IOCTL: VHOST_SET_FEATURES
- * We receive the negotiated set of features supported by us and the virtio device.
+ * We receive the negotiated features supported by us and the virtio device.
  */
 static int
 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
@@ -589,13 +624,17 @@  set_features(struct vhost_device_ctx ctx, uint64_t *pu)
 
 	/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
 	if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") Mergeable RX buffers enabled\n",
+			dev->device_fh);
 		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
 			sizeof(struct virtio_net_hdr_mrg_rxbuf);
 		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
 			sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	} else {
-		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
+		LOG_DEBUG(VHOST_CONFIG,
+			"(%"PRIu64") Mergeable RX buffers disabled\n",
+			dev->device_fh);
 		dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
 			sizeof(struct virtio_net_hdr);
 		dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
@@ -607,8 +646,8 @@  set_features(struct vhost_device_ctx ctx, uint64_t *pu)
 
 /*
  * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
- * This function creates and populates the memory structure for the device. This includes
- * storing offsets used to translate buffer addresses.
+ * This function creates and populates the memory structure for the device.
+ * This includes storing offsets used to translate buffer addresses.
  */
 static int
 set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
@@ -634,7 +673,9 @@  set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
 	mem = calloc(1, sizeof(struct virtio_memory) +
 		(sizeof(struct virtio_memory_regions) * nregions));
 	if (mem == NULL) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Failed to allocate memory for dev->mem.\n",
+			dev->device_fh);
 		return -1;
 	}
 
@@ -656,15 +697,18 @@  set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
 			mem_regions[regionidx].userspace_addr;
 
 		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
-				regionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
-				(void *)(uintptr_t)mem->regions[regionidx].userspace_address,
-				mem->regions[regionidx].memory_size);
+			regionidx,
+			(void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
+			(void *)(uintptr_t)mem->regions[regionidx].userspace_address,
+			mem->regions[regionidx].memory_size);
 
 		/*set the base address mapping*/
 		if (mem->regions[regionidx].guest_phys_address == 0x0) {
-			mem->base_address = mem->regions[regionidx].userspace_address;
+			mem->base_address =
+				mem->regions[regionidx].userspace_address;
 			/* Map VM memory file */
-			if (host_memory_map(dev, mem, ctx.pid, mem->base_address) != 0) {
+			if (host_memory_map(dev, mem, ctx.pid,
+				mem->base_address) != 0) {
 				free(mem);
 				return -1;
 			}
@@ -678,27 +722,42 @@  set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
 		return -1;
 	}
 
-	/* Check if all of our regions have valid mappings. Usually one does not exist in the QEMU memory file. */
+	/*
+	 * Check if all of our regions have valid mappings.
+	 * Usually one does not exist in the QEMU memory file.
+	 */
 	valid_regions = mem->nregions;
 	for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
-		if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
-			(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size)))
+		if ((mem->regions[regionidx].userspace_address <
+			mem->base_address) ||
+			(mem->regions[regionidx].userspace_address >
+			(mem->base_address + mem->mapped_size)))
 				valid_regions--;
 	}
 
-	/* If a region does not have a valid mapping we rebuild our memory struct to contain only valid entries. */
+	/*
+	 * If a region does not have a valid mapping,
+	 * we rebuild our memory struct to contain only valid entries.
+	 */
 	if (valid_regions != mem->nregions) {
 		LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
 			dev->device_fh);
 
-		/* Re-populate the memory structure with only valid regions. Invalid regions are over-written with memmove. */
+		/*
+		 * Re-populate the memory structure with only valid regions.
+		 * Invalid regions are over-written with memmove.
+		 */
 		valid_regions = 0;
 
 		for (regionidx = mem->nregions; 0 != regionidx--;) {
-			if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
-					(mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size))) {
-				memmove(&mem->regions[regionidx], &mem->regions[regionidx + 1],
-					sizeof(struct virtio_memory_regions) * valid_regions);
+			if ((mem->regions[regionidx].userspace_address <
+				mem->base_address) ||
+				(mem->regions[regionidx].userspace_address >
+				(mem->base_address + mem->mapped_size))) {
+				memmove(&mem->regions[regionidx],
+					&mem->regions[regionidx + 1],
+					sizeof(struct virtio_memory_regions) *
+						valid_regions);
 			} else {
 				valid_regions++;
 			}
@@ -708,12 +767,16 @@  set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
 	dev->mem = mem;
 
 	/*
-	 * Calculate the address offset for each region. This offset is used to identify the vhost virtual address
+	 * Calculate the address offset for each region.
+	 * This offset is used to identify the vhost virtual address
 	 * corresponding to a QEMU guest physical address.
 	 */
 	for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
-		dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address
-			+ dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;
+		dev->mem->regions[regionidx].address_offset =
+			dev->mem->regions[regionidx].userspace_address -
+				dev->mem->base_address +
+				dev->mem->mapped_address -
+				dev->mem->regions[regionidx].guest_phys_address;
 
 	}
 	return 0;
@@ -732,7 +795,7 @@  set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
 	if (dev == NULL)
 		return -1;
 
-	/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
+	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
 	dev->virtqueue[state->index]->size = state->num;
 
 	return 0;
@@ -740,8 +803,8 @@  set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
 
 /*
  * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
- * The virtio device sends us the desc, used and avail ring addresses. This function
- * then converts these to our address space.
+ * The virtio device sends us the desc, used and avail ring addresses.
+ * This function then converts these to our address space.
  */
 static int
 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
@@ -753,31 +816,43 @@  set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
 	if (dev == NULL)
 		return -1;
 
-	/* addr->index refers to the queue index. The TX queue is 1, RX queue is 0. */
+	/* addr->index refers to the queue index. The txq 1, rxq is 0. */
 	vq = dev->virtqueue[addr->index];
 
 	/* The addresses are converted from QEMU virtual to Vhost virtual. */
-	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
+	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
+			addr->desc_user_addr);
 	if (vq->desc == 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Failed to find desc ring address.\n",
+			dev->device_fh);
 		return -1;
 	}
 
-	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
+	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
+			addr->avail_user_addr);
 	if (vq->avail == 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Failed to find avail ring address.\n",
+			dev->device_fh);
 		return -1;
 	}
 
-	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
+	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
+			addr->used_user_addr);
 	if (vq->used == 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") Failed to find used ring address.\n",
+			dev->device_fh);
 		return -1;
 	}
 
-	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n", dev->device_fh, vq->desc);
-	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n", dev->device_fh, vq->avail);
-	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n", dev->device_fh, vq->used);
+	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
+			dev->device_fh, vq->desc);
+	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
+			dev->device_fh, vq->avail);
+	LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
+			dev->device_fh, vq->used);
 
 	return 0;
 }
@@ -795,7 +870,7 @@  set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
 	if (dev == NULL)
 		return -1;
 
-	/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
+	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
 	dev->virtqueue[state->index]->last_used_idx = state->num;
 	dev->virtqueue[state->index]->last_used_idx_res = state->num;
 
@@ -817,15 +892,15 @@  get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
 		return -1;
 
 	state->index = index;
-	/* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
+	/* State->index refers to the queue index. The txq is 1, rxq is 0. */
 	state->num = dev->virtqueue[state->index]->last_used_idx;
 
 	return 0;
 }
 
 /*
- * This function uses the eventfd_link kernel module to copy an eventfd file descriptor
- * provided by QEMU in to our process space.
+ * This function uses the eventfd_link kernel module to copy an eventfd file
+ * descriptor provided by QEMU in to our process space.
  */
 static int
 eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
@@ -835,7 +910,9 @@  eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
 	/* Open the character device to the kernel module. */
 	eventfd_link = open(eventfd_cdev, O_RDWR);
 	if (eventfd_link < 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") eventfd_link module is not loaded\n",  dev->device_fh);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") eventfd_link module is not loaded\n",
+			dev->device_fh);
 		return -1;
 	}
 
@@ -844,18 +921,19 @@  eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
 	close(eventfd_link);
 
 	if (ret < 0) {
-		RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") EVENTFD_COPY ioctl failed\n",  dev->device_fh);
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%"PRIu64") EVENTFD_COPY ioctl failed\n",
+			dev->device_fh);
 		return -1;
 	}
 
-
 	return 0;
 }
 
 /*
  * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
- * The virtio device sends an eventfd to interrupt the guest. This fd gets copied in
- * to our process space.
+ * The virtio device sends an eventfd to interrupt the guest. This fd gets
+ * copied into our process space.
  */
 static int
 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
@@ -868,7 +946,7 @@  set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 	if (dev == NULL)
 		return -1;
 
-	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
+	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
 	vq = dev->virtqueue[file->index];
 
 	if (vq->kickfd)
@@ -888,8 +966,8 @@  set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 
 /*
  * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
- * The virtio device sends an eventfd that it can use to notify us. This fd gets copied in
- * to our process space.
+ * The virtio device sends an eventfd that it can use to notify us.
+ * This fd gets copied into our process space.
  */
 static int
 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
@@ -902,7 +980,7 @@  set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 	if (dev == NULL)
 		return -1;
 
-	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
+	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
 	vq = dev->virtqueue[file->index];
 
 	if (vq->callfd)
@@ -922,10 +1000,12 @@  set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 
 /*
  * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
- * To complete device initialisation when the virtio driver is loaded we are provided with a
- * valid fd for a tap device (not used by us). If this happens then we can add the device to a
- * data core. When the virtio driver is removed we get fd=-1. At that point we remove the device
- * from the data core. The device will still exist in the device configuration linked list.
+ * To complete device initialisation when the virtio driver is loaded,
+ * we are provided with a valid fd for a tap device (not used by us).
+ * If this happens then we can add the device to a data core.
+ * When the virtio driver is removed we get fd=-1.
+ * At that point we remove the device from the data core.
+ * The device will still exist in the device configuration linked list.
  */
 static int
 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
@@ -936,10 +1016,13 @@  set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 	if (dev == NULL)
 		return -1;
 
-	/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
+	/* file->index refers to the queue index. The txq is 1, rxq is 0. */
 	dev->virtqueue[file->index]->backend = file->fd;
 
-	/* If the device isn't already running and both backend fds are set we add the device. */
+	/*
+	 * If the device isn't already running and both backend fds are set,
+	 * we add the device.
+	 */
 	if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
 		if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
 			((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED))
@@ -952,8 +1035,8 @@  set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
 }
 
 /*
- * Function pointers are set for the device operations to allow CUSE to call functions
- * when an IOCTL, device_add or device_release is received.
+ * Function pointers are set for the device operations to allow CUSE to call
+ * functions when an IOCTL, device_add or device_release is received.
  */
 static const struct vhost_net_device_ops vhost_device_ops = {
 	.new_device = new_device,
@@ -991,11 +1074,13 @@  int rte_vhost_enable_guest_notification(struct virtio_net *dev,
 	uint16_t queue_id, int enable)
 {
 	if (enable) {
-		RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n");
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"guest notification isn't supported.\n");
 		return -1;
 	}
 
-	dev->virtqueue[queue_id]->used->flags = enable ? 0 : VRING_USED_F_NO_NOTIFY;
+	dev->virtqueue[queue_id]->used->flags =
+		enable ? 0 : VRING_USED_F_NO_NOTIFY;
 	return 0;
 }