diff mbox series

vhost: use another variable to store vhost msg result code

Message ID 1658110002-191064-1-git-send-email-andy.pei@intel.com (mailing list archive)
State Superseded
Delegated to: Maxime Coquelin
Headers show
Series vhost: use another variable to store vhost msg result code | expand

Checks

Context Check Description
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/intel-Testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Pei, Andy July 18, 2022, 2:06 a.m. UTC
Currently in function vhost_user_msg_handler, variable ret is used to
store both vhost msg result code and function call return value.
After this patch, variable ret is used only to store function call
return value, a new variable msg_result is used to store vhost msg
result. This can improve readability.

Signed-off-by: Andy Pei <andy.pei@intel.com>
---
 lib/vhost/vhost_user.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

Comments

Xia, Chenbo Sept. 22, 2022, 1:26 p.m. UTC | #1
> -----Original Message-----
> From: Pei, Andy <andy.pei@intel.com>
> Sent: Monday, July 18, 2022 10:07 AM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>; Ma,
> WenwuX <wenwux.ma@intel.com>
> Subject: [PATCH] vhost: use another variable to store vhost msg result
> code

Patch looks good. I suggest to use title:
vhost: use dedicated variable for vhost message result code\\

Thanks,
Chenbo

> 
> Currently in function vhost_user_msg_handler, variable ret is used to
> store both vhost msg result code and function call return value.
> After this patch, variable ret is used only to store function call
> return value, a new variable msg_result is used to store vhost msg
> result. This can improve readability.
> 
> Signed-off-by: Andy Pei <andy.pei@intel.com>
> ---
>  lib/vhost/vhost_user.c | 24 ++++++++++++------------
>  1 file changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 4ad28ba..dac06c9 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -2969,6 +2969,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
>  	struct vhu_msg_context ctx;
>  	vhost_message_handler_t *msg_handler;
>  	struct rte_vdpa_device *vdpa_dev;
> +	int msg_result = RTE_VHOST_MSG_RESULT_OK;
>  	int ret;
>  	int unlock_required = 0;
>  	bool handled;
> @@ -3061,8 +3062,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
>  	handled = false;
>  	if (dev->extern_ops.pre_msg_handle) {
>  		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> -		ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> -		switch (ret) {
> +		msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> +		switch (msg_result) {
>  		case RTE_VHOST_MSG_RESULT_REPLY:
>  			send_vhost_reply(dev, fd, &ctx);
>  			/* Fall-through */
> @@ -3080,12 +3081,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
>  		goto skip_to_post_handle;
> 
>  	if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0)
> {
> -		ret = RTE_VHOST_MSG_RESULT_ERR;
> +		msg_result = RTE_VHOST_MSG_RESULT_ERR;
>  	} else {
> -		ret = msg_handler->callback(&dev, &ctx, fd);
> +		msg_result = msg_handler->callback(&dev, &ctx, fd);
>  	}
> 
> -	switch (ret) {
> +	switch (msg_result) {
>  	case RTE_VHOST_MSG_RESULT_ERR:
>  		VHOST_LOG_CONFIG(dev->ifname, ERR,
>  			"processing %s failed.\n",
> @@ -3110,11 +3111,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
>  	}
> 
>  skip_to_post_handle:
> -	if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> +	if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
>  			dev->extern_ops.post_msg_handle) {
>  		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> -		ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> -		switch (ret) {
> +		msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> &ctx);
> +		switch (msg_result) {
>  		case RTE_VHOST_MSG_RESULT_REPLY:
>  			send_vhost_reply(dev, fd, &ctx);
>  			/* Fall-through */
> @@ -3133,7 +3134,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
>  			"vhost message (req: %d) was not handled.\n",
>  			request);
>  		close_msg_fds(&ctx);
> -		ret = RTE_VHOST_MSG_RESULT_ERR;
> +		msg_result = RTE_VHOST_MSG_RESULT_ERR;
>  	}
> 
>  	/*
> @@ -3142,17 +3143,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
>  	 * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
>  	 */
>  	if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> -		ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> +		ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
>  		ctx.msg.size = sizeof(ctx.msg.payload.u64);
>  		ctx.fd_num = 0;
>  		send_vhost_reply(dev, fd, &ctx);
> -	} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> +	} else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
>  		VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling
> failed.\n");
>  		ret = -1;
>  		goto unlock;
>  	}
> 
> -	ret = 0;
>  	for (i = 0; i < dev->nr_vring; i++) {
>  		struct vhost_virtqueue *vq = dev->virtqueue[i];
>  		bool cur_ready = vq_is_ready(dev, vq);
> --
> 1.8.3.1
Pei, Andy Sept. 23, 2022, 2:29 a.m. UTC | #2
HI Chenbo,

Thanks for your reply.
I think your suggestion is good, and I will send a V2 patch to address this.

> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Thursday, September 22, 2022 9:27 PM
> To: Pei, Andy <andy.pei@intel.com>; dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Ma, WenwuX <WenwuX.Ma@intel.com>
> Subject: RE: [PATCH] vhost: use another variable to store vhost msg result code
> 
> > -----Original Message-----
> > From: Pei, Andy <andy.pei@intel.com>
> > Sent: Monday, July 18, 2022 10:07 AM
> > To: dev@dpdk.org
> > Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> > Ma, WenwuX <wenwux.ma@intel.com>
> > Subject: [PATCH] vhost: use another variable to store vhost msg result
> > code
> 
> Patch looks good. I suggest to use title:
> vhost: use dedicated variable for vhost message result code\\
> 
> Thanks,
> Chenbo
> 
> >
> > Currently in function vhost_user_msg_handler, variable ret is used to
> > store both vhost msg result code and function call return value.
> > After this patch, variable ret is used only to store function call
> > return value, a new variable msg_result is used to store vhost msg
> > result. This can improve readability.
> >
> > Signed-off-by: Andy Pei <andy.pei@intel.com>
> > ---
> >  lib/vhost/vhost_user.c | 24 ++++++++++++------------
> >  1 file changed, 12 insertions(+), 12 deletions(-)
> >
> > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> > 4ad28ba..dac06c9 100644
> > --- a/lib/vhost/vhost_user.c
> > +++ b/lib/vhost/vhost_user.c
> > @@ -2969,6 +2969,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> >  	struct vhu_msg_context ctx;
> >  	vhost_message_handler_t *msg_handler;
> >  	struct rte_vdpa_device *vdpa_dev;
> > +	int msg_result = RTE_VHOST_MSG_RESULT_OK;
> >  	int ret;
> >  	int unlock_required = 0;
> >  	bool handled;
> > @@ -3061,8 +3062,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> >  	handled = false;
> >  	if (dev->extern_ops.pre_msg_handle) {
> >  		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) !=
> 0);
> > -		ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> > -		switch (ret) {
> > +		msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid,
> &ctx);
> > +		switch (msg_result) {
> >  		case RTE_VHOST_MSG_RESULT_REPLY:
> >  			send_vhost_reply(dev, fd, &ctx);
> >  			/* Fall-through */
> > @@ -3080,12 +3081,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> >  		goto skip_to_post_handle;
> >
> >  	if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0)
> > {
> > -		ret = RTE_VHOST_MSG_RESULT_ERR;
> > +		msg_result = RTE_VHOST_MSG_RESULT_ERR;
> >  	} else {
> > -		ret = msg_handler->callback(&dev, &ctx, fd);
> > +		msg_result = msg_handler->callback(&dev, &ctx, fd);
> >  	}
> >
> > -	switch (ret) {
> > +	switch (msg_result) {
> >  	case RTE_VHOST_MSG_RESULT_ERR:
> >  		VHOST_LOG_CONFIG(dev->ifname, ERR,
> >  			"processing %s failed.\n",
> > @@ -3110,11 +3111,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> >  	}
> >
> >  skip_to_post_handle:
> > -	if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> > +	if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> >  			dev->extern_ops.post_msg_handle) {
> >  		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) !=
> 0);
> > -		ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> > -		switch (ret) {
> > +		msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> > &ctx);
> > +		switch (msg_result) {
> >  		case RTE_VHOST_MSG_RESULT_REPLY:
> >  			send_vhost_reply(dev, fd, &ctx);
> >  			/* Fall-through */
> > @@ -3133,7 +3134,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> >  			"vhost message (req: %d) was not handled.\n",
> >  			request);
> >  		close_msg_fds(&ctx);
> > -		ret = RTE_VHOST_MSG_RESULT_ERR;
> > +		msg_result = RTE_VHOST_MSG_RESULT_ERR;
> >  	}
> >
> >  	/*
> > @@ -3142,17 +3143,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> >  	 * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> >  	 */
> >  	if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> > -		ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> > +		ctx.msg.payload.u64 = msg_result ==
> RTE_VHOST_MSG_RESULT_ERR;
> >  		ctx.msg.size = sizeof(ctx.msg.payload.u64);
> >  		ctx.fd_num = 0;
> >  		send_vhost_reply(dev, fd, &ctx);
> > -	} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> > +	} else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> >  		VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message
> handling
> > failed.\n");
> >  		ret = -1;
> >  		goto unlock;
> >  	}
> >
> > -	ret = 0;
> >  	for (i = 0; i < dev->nr_vring; i++) {
> >  		struct vhost_virtqueue *vq = dev->virtqueue[i];
> >  		bool cur_ready = vq_is_ready(dev, vq);
> > --
> > 1.8.3.1
diff mbox series

Patch

diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 4ad28ba..dac06c9 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2969,6 +2969,7 @@  static int is_vring_iotlb(struct virtio_net *dev,
 	struct vhu_msg_context ctx;
 	vhost_message_handler_t *msg_handler;
 	struct rte_vdpa_device *vdpa_dev;
+	int msg_result = RTE_VHOST_MSG_RESULT_OK;
 	int ret;
 	int unlock_required = 0;
 	bool handled;
@@ -3061,8 +3062,8 @@  static int is_vring_iotlb(struct virtio_net *dev,
 	handled = false;
 	if (dev->extern_ops.pre_msg_handle) {
 		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
-		ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
-		switch (ret) {
+		msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
+		switch (msg_result) {
 		case RTE_VHOST_MSG_RESULT_REPLY:
 			send_vhost_reply(dev, fd, &ctx);
 			/* Fall-through */
@@ -3080,12 +3081,12 @@  static int is_vring_iotlb(struct virtio_net *dev,
 		goto skip_to_post_handle;
 
 	if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) {
-		ret = RTE_VHOST_MSG_RESULT_ERR;
+		msg_result = RTE_VHOST_MSG_RESULT_ERR;
 	} else {
-		ret = msg_handler->callback(&dev, &ctx, fd);
+		msg_result = msg_handler->callback(&dev, &ctx, fd);
 	}
 
-	switch (ret) {
+	switch (msg_result) {
 	case RTE_VHOST_MSG_RESULT_ERR:
 		VHOST_LOG_CONFIG(dev->ifname, ERR,
 			"processing %s failed.\n",
@@ -3110,11 +3111,11 @@  static int is_vring_iotlb(struct virtio_net *dev,
 	}
 
 skip_to_post_handle:
-	if (ret != RTE_VHOST_MSG_RESULT_ERR &&
+	if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
 			dev->extern_ops.post_msg_handle) {
 		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
-		ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
-		switch (ret) {
+		msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
+		switch (msg_result) {
 		case RTE_VHOST_MSG_RESULT_REPLY:
 			send_vhost_reply(dev, fd, &ctx);
 			/* Fall-through */
@@ -3133,7 +3134,7 @@  static int is_vring_iotlb(struct virtio_net *dev,
 			"vhost message (req: %d) was not handled.\n",
 			request);
 		close_msg_fds(&ctx);
-		ret = RTE_VHOST_MSG_RESULT_ERR;
+		msg_result = RTE_VHOST_MSG_RESULT_ERR;
 	}
 
 	/*
@@ -3142,17 +3143,16 @@  static int is_vring_iotlb(struct virtio_net *dev,
 	 * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
 	 */
 	if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
-		ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
+		ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
 		ctx.msg.size = sizeof(ctx.msg.payload.u64);
 		ctx.fd_num = 0;
 		send_vhost_reply(dev, fd, &ctx);
-	} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
+	} else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
 		VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling failed.\n");
 		ret = -1;
 		goto unlock;
 	}
 
-	ret = 0;
 	for (i = 0; i < dev->nr_vring; i++) {
 		struct vhost_virtqueue *vq = dev->virtqueue[i];
 		bool cur_ready = vq_is_ready(dev, vq);