[4/7] vhost: improve Vhost layer logs

Message ID 20211223083659.245766-5-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: improve logging |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Maxime Coquelin Dec. 23, 2021, 8:36 a.m. UTC
  This patch prepends Vhost logs with the Vhost-user socket
patch when available to ease filtering logs for a given port.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/vhost.c | 104 +++++++++++++++++++++++-----------------------
 1 file changed, 51 insertions(+), 53 deletions(-)
  

Comments

David Marchand Jan. 4, 2022, 2:48 p.m. UTC | #1
On Thu, Dec 23, 2021 at 9:37 AM Maxime Coquelin
<maxime.coquelin@redhat.com> wrote:
>
> This patch prepends Vhost logs with the Vhost-user socket
> patch when available to ease filtering logs for a given port.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/vhost/vhost.c | 104 +++++++++++++++++++++++-----------------------
>  1 file changed, 51 insertions(+), 53 deletions(-)
>
> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
> index e52d7f7bb6..dac0915ac0 100644
> --- a/lib/vhost/vhost.c
> +++ b/lib/vhost/vhost.c
> @@ -58,9 +58,8 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
>
>                 vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
>                 if (vhost_user_iotlb_miss(dev, iova, perm)) {
> -                       VHOST_LOG_CONFIG(ERR,
> -                               "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
> -                               iova);
> +                       VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
> +                               dev->ifname, iova);

This change from CONFIG to DATA looks unrelated, and a separate fix
(which could go with another fix, see below).


>                         vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
>                 }
>

[snip]

>
> @@ -422,9 +421,9 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
>                 gpa = hva_to_gpa(dev, hva, exp_size);
>                 if (!gpa) {
>                         VHOST_LOG_CONFIG(ERR,

translate_log_addr is called from vring_translate which is called from datapath.
So this log should be _DATA, in a separate fix.


> -                               "VQ: Failed to find GPA for log_addr: 0x%"
> +                               "(%s) failed to find GPA for log_addr: 0x%"
>                                 PRIx64 " hva: 0x%" PRIx64 "\n",
> -                               log_addr, hva);
> +                               dev->ifname, log_addr, hva);
>                         return 0;
>                 }
>                 return gpa;
> @@ -551,16 +550,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
>         int numa_node = SOCKET_ID_ANY;
>
>         if (vring_idx >= VHOST_MAX_VRING) {
> -               VHOST_LOG_CONFIG(ERR,
> -                               "Failed not init vring, out of bound (%d)\n",
> -                               vring_idx);
> +               VHOST_LOG_CONFIG(ERR, "(%s) failed not init vring, out of bound (%d)\n",

failed to*


> +                               dev->ifname, vring_idx);
>                 return;
>         }
>
>         vq = dev->virtqueue[vring_idx];
>         if (!vq) {
> -               VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
> -                               vring_idx);
> +               VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n",
> +                               dev->ifname, vring_idx);
>                 return;
>         }
>

The rest lgtm.
  
Maxime Coquelin Jan. 25, 2022, 10:50 a.m. UTC | #2
On 1/4/22 15:48, David Marchand wrote:
> On Thu, Dec 23, 2021 at 9:37 AM Maxime Coquelin
> <maxime.coquelin@redhat.com> wrote:
>>
>> This patch prepends Vhost logs with the Vhost-user socket
>> patch when available to ease filtering logs for a given port.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>>   lib/vhost/vhost.c | 104 +++++++++++++++++++++++-----------------------
>>   1 file changed, 51 insertions(+), 53 deletions(-)
>>
>> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
>> index e52d7f7bb6..dac0915ac0 100644
>> --- a/lib/vhost/vhost.c
>> +++ b/lib/vhost/vhost.c
>> @@ -58,9 +58,8 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
>>
>>                  vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
>>                  if (vhost_user_iotlb_miss(dev, iova, perm)) {
>> -                       VHOST_LOG_CONFIG(ERR,
>> -                               "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
>> -                               iova);
>> +                       VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
>> +                               dev->ifname, iova);
> 
> This change from CONFIG to DATA looks unrelated, and a separate fix
> (which could go with another fix, see below).
> 

I'm adding this one and the below one you spotted in a dedicated patch.

>>                          vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
>>                  }
>>
> 
> [snip]
> 
>>
>> @@ -422,9 +421,9 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
>>                  gpa = hva_to_gpa(dev, hva, exp_size);
>>                  if (!gpa) {
>>                          VHOST_LOG_CONFIG(ERR,
> 
> translate_log_addr is called from vring_translate which is called from datapath.
> So this log should be _DATA, in a separate fix.
> 
> 
>> -                               "VQ: Failed to find GPA for log_addr: 0x%"
>> +                               "(%s) failed to find GPA for log_addr: 0x%"
>>                                  PRIx64 " hva: 0x%" PRIx64 "\n",
>> -                               log_addr, hva);
>> +                               dev->ifname, log_addr, hva);
>>                          return 0;
>>                  }
>>                  return gpa;
>> @@ -551,16 +550,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
>>          int numa_node = SOCKET_ID_ANY;
>>
>>          if (vring_idx >= VHOST_MAX_VRING) {
>> -               VHOST_LOG_CONFIG(ERR,
>> -                               "Failed not init vring, out of bound (%d)\n",
>> -                               vring_idx);
>> +               VHOST_LOG_CONFIG(ERR, "(%s) failed not init vring, out of bound (%d)\n",
> 
> failed to*

Fixed in this patch.

> 
>> +                               dev->ifname, vring_idx);
>>                  return;
>>          }
>>
>>          vq = dev->virtqueue[vring_idx];
>>          if (!vq) {
>> -               VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
>> -                               vring_idx);
>> +               VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n",
>> +                               dev->ifname, vring_idx);
>>                  return;
>>          }
>>
> 
> The rest lgtm.
> 
> 

Thanks,
Maxime
  

Patch

diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index e52d7f7bb6..dac0915ac0 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -58,9 +58,8 @@  __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 		vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
 		if (vhost_user_iotlb_miss(dev, iova, perm)) {
-			VHOST_LOG_CONFIG(ERR,
-				"IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
-				iova);
+			VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+				dev->ifname, iova);
 			vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
 		}
 
@@ -126,8 +125,8 @@  __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
 	if (map_len != len) {
 		VHOST_LOG_DATA(ERR,
-			"Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
-			iova);
+			"(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+			dev->ifname, iova);
 		return;
 	}
 
@@ -243,8 +242,8 @@  __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
 	if (map_len != len) {
 		VHOST_LOG_DATA(ERR,
-			"Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
-			iova);
+			"(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+			dev->ifname, iova);
 		return;
 	}
 
@@ -422,9 +421,9 @@  translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		gpa = hva_to_gpa(dev, hva, exp_size);
 		if (!gpa) {
 			VHOST_LOG_CONFIG(ERR,
-				"VQ: Failed to find GPA for log_addr: 0x%"
+				"(%s) failed to find GPA for log_addr: 0x%"
 				PRIx64 " hva: 0x%" PRIx64 "\n",
-				log_addr, hva);
+				dev->ifname, log_addr, hva);
 			return 0;
 		}
 		return gpa;
@@ -551,16 +550,15 @@  init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 	int numa_node = SOCKET_ID_ANY;
 
 	if (vring_idx >= VHOST_MAX_VRING) {
-		VHOST_LOG_CONFIG(ERR,
-				"Failed not init vring, out of bound (%d)\n",
-				vring_idx);
+		VHOST_LOG_CONFIG(ERR, "(%s) failed not init vring, out of bound (%d)\n",
+				dev->ifname, vring_idx);
 		return;
 	}
 
 	vq = dev->virtqueue[vring_idx];
 	if (!vq) {
-		VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
-				vring_idx);
+		VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n",
+				dev->ifname, vring_idx);
 		return;
 	}
 
@@ -572,8 +570,8 @@  init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
 #ifdef RTE_LIBRTE_VHOST_NUMA
 	if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
-		VHOST_LOG_CONFIG(ERR, "(%d) failed to query numa node: %s\n",
-			dev->vid, rte_strerror(errno));
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
+			dev->ifname, rte_strerror(errno));
 		numa_node = SOCKET_ID_ANY;
 	}
 #endif
@@ -590,15 +588,15 @@  reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
 	if (vring_idx >= VHOST_MAX_VRING) {
 		VHOST_LOG_CONFIG(ERR,
-				"Failed not init vring, out of bound (%d)\n",
-				vring_idx);
+				"(%s) failed to reset vring, out of bound (%d)\n",
+				dev->ifname, vring_idx);
 		return;
 	}
 
 	vq = dev->virtqueue[vring_idx];
 	if (!vq) {
-		VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
-				vring_idx);
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n",
+				dev->ifname, vring_idx);
 		return;
 	}
 
@@ -620,8 +618,8 @@  alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
 		vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
 		if (vq == NULL) {
-			VHOST_LOG_CONFIG(ERR,
-				"Failed to allocate memory for vring:%u.\n", i);
+			VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n",
+					dev->ifname, i);
 			return -1;
 		}
 
@@ -673,16 +671,14 @@  vhost_new_device(void)
 	}
 
 	if (i == MAX_VHOST_DEVICE) {
-		VHOST_LOG_CONFIG(ERR,
-			"Failed to find a free slot for new device.\n");
+		VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n");
 		pthread_mutex_unlock(&vhost_dev_lock);
 		return -1;
 	}
 
 	dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
 	if (dev == NULL) {
-		VHOST_LOG_CONFIG(ERR,
-			"Failed to allocate memory for new dev.\n");
+		VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n");
 		pthread_mutex_unlock(&vhost_dev_lock);
 		return -1;
 	}
@@ -834,9 +830,8 @@  rte_vhost_get_numa_node(int vid)
 	ret = get_mempolicy(&numa_node, NULL, 0, dev,
 			    MPOL_F_NODE | MPOL_F_ADDR);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(ERR,
-			"(%d) failed to query numa node: %s\n",
-			vid, rte_strerror(errno));
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
+			dev->ifname, rte_strerror(errno));
 		return -1;
 	}
 
@@ -1470,8 +1465,8 @@  rte_vhost_rx_queue_count(int vid, uint16_t qid)
 		return 0;
 
 	if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
-		VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
-			dev->vid, __func__, qid);
+		VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+			dev->ifname, __func__, qid);
 		return 0;
 	}
 
@@ -1636,23 +1631,23 @@  async_channel_register(int vid, uint16_t queue_id,
 
 	if (unlikely(vq->async)) {
 		VHOST_LOG_CONFIG(ERR,
-				"async register failed: already registered (vid %d, qid: %d)\n",
-				vid, queue_id);
+				"(%s) async register failed: already registered (qid: %d)\n",
+				dev->ifname, queue_id);
 		return -1;
 	}
 
 	async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
 	if (!async) {
-		VHOST_LOG_CONFIG(ERR, "failed to allocate async metadata (vid %d, qid: %d)\n",
-				vid, queue_id);
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n",
+				dev->ifname, queue_id);
 		return -1;
 	}
 
 	async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
 			RTE_CACHE_LINE_SIZE, node);
 	if (!async->pkts_info) {
-		VHOST_LOG_CONFIG(ERR, "failed to allocate async_pkts_info (vid %d, qid: %d)\n",
-				vid, queue_id);
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n",
+				dev->ifname, queue_id);
 		goto out_free_async;
 	}
 
@@ -1661,8 +1656,8 @@  async_channel_register(int vid, uint16_t queue_id,
 				vq->size * sizeof(struct vring_used_elem_packed),
 				RTE_CACHE_LINE_SIZE, node);
 		if (!async->buffers_packed) {
-			VHOST_LOG_CONFIG(ERR, "failed to allocate async buffers (vid %d, qid: %d)\n",
-					vid, queue_id);
+			VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n",
+					dev->ifname, queue_id);
 			goto out_free_inflight;
 		}
 	} else {
@@ -1670,8 +1665,8 @@  async_channel_register(int vid, uint16_t queue_id,
 				vq->size * sizeof(struct vring_used_elem),
 				RTE_CACHE_LINE_SIZE, node);
 		if (!async->descs_split) {
-			VHOST_LOG_CONFIG(ERR, "failed to allocate async descs (vid %d, qid: %d)\n",
-					vid, queue_id);
+			VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n",
+					dev->ifname, queue_id);
 			goto out_free_inflight;
 		}
 	}
@@ -1712,8 +1707,8 @@  rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 
 	if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
 		VHOST_LOG_CONFIG(ERR,
-			"async copy is not supported on non-inorder mode "
-			"(vid %d, qid: %d)\n", vid, queue_id);
+			"(%s) async copy is not supported on non-inorder mode (qid: %d)\n",
+			dev->ifname, queue_id);
 		return -1;
 	}
 
@@ -1749,8 +1744,8 @@  rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
 
 	if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
 		VHOST_LOG_CONFIG(ERR,
-			"async copy is not supported on non-inorder mode "
-			"(vid %d, qid: %d)\n", vid, queue_id);
+			"(%s) async copy is not supported on non-inorder mode (qid: %d)\n",
+			dev->ifname, queue_id);
 		return -1;
 	}
 
@@ -1785,14 +1780,15 @@  rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
 		return ret;
 
 	if (!rte_spinlock_trylock(&vq->access_lock)) {
-		VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
-			"virt queue busy.\n");
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n",
+				dev->ifname);
 		return -1;
 	}
 
 	if (vq->async->pkts_inflight_n) {
-		VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
-			"async inflight packets must be completed before unregistration.\n");
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel. "
+			"async inflight packets must be completed before unregistration.\n",
+			dev->ifname);
 		ret = -1;
 		goto out;
 	}
@@ -1825,8 +1821,9 @@  rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
 		return 0;
 
 	if (vq->async->pkts_inflight_n) {
-		VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
-			"async inflight packets must be completed before unregistration.\n");
+		VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel. "
+			"async inflight packets must be completed before unregistration.\n",
+			dev->ifname);
 		return -1;
 	}
 
@@ -1857,8 +1854,9 @@  rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
 		return ret;
 
 	if (!rte_spinlock_trylock(&vq->access_lock)) {
-		VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. "
-			"virt queue busy.\n");
+		VHOST_LOG_CONFIG(DEBUG,
+			"(%s) failed to check in-flight packets. virtqueue busy.\n",
+			dev->ifname);
 		return ret;
 	}