[v2,4/4] vhost: improve RARP handling in dequeue paths

Message ID 20250115125938.2699577-5-maxime.coquelin@redhat.com (mailing list archive)
State Superseded
Delegated to: Maxime Coquelin
Headers
Series vhost: fix and improve dequeue error path |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/intel-Functional success Functional PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-marvell-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS

Commit Message

Maxime Coquelin Jan. 15, 2025, 12:59 p.m. UTC
With previous refactoring, we can now simplify the RARP
packet injection handling in both the sync and async
dequeue paths.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/virtio_net.c | 42 ++++++++++++++++++------------------------
 1 file changed, 18 insertions(+), 24 deletions(-)
  

Comments

David Marchand Jan. 15, 2025, 4:46 p.m. UTC | #1
On Wed, Jan 15, 2025 at 1:59 PM Maxime Coquelin
<maxime.coquelin@redhat.com> wrote:
>
> With previous refactoring, we can now simplify the RARP
> packet injection handling in both the sync and async
> dequeue paths.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/vhost/virtio_net.c | 42 ++++++++++++++++++------------------------
>  1 file changed, 18 insertions(+), 24 deletions(-)
>
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 59ea2d16a5..fab45ebd54 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -3662,21 +3662,23 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>                  * learning table will get updated first.
>                  */
>                 pkts[0] = rarp_mbuf;

Well, ideally it would be pkts[nb_rx], but see comment below.

> -               vhost_queue_stats_update(dev, vq, pkts, 1);
> -               pkts++;
> -               count -= 1;
> +               nb_rx += 1;
>         }

With this change, the rarp_mbuf variable is unneeded.
You can store to pkts[nb_rx] when calling rte_net_make_rarp_packet()
(and at the same time, move the comment about injecting the packet to
the head of the array).



>
>         if (vq_is_packed(dev)) {
>                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
> -                       nb_rx = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
> +                       nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx);
>                 else
> -                       nb_rx = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
> +                       nb_rx += virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx);
>         } else {
>                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
> -                       nb_rx = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
> +                       nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx);
>                 else
> -                       nb_rx = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
> +                       nb_rx += virtio_dev_tx_split_compliant(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx);
>         }
>
>         vhost_queue_stats_update(dev, vq, pkts, nb_rx);
> @@ -3687,9 +3689,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>  out_access_unlock:
>         rte_rwlock_read_unlock(&vq->access_lock);
>
> -       if (unlikely(rarp_mbuf != NULL))
> -               nb_rx += 1;
> -
>  out_no_unlock:
>         return nb_rx;
>  }
> @@ -4285,25 +4284,23 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>                  * learning table will get updated first.
>                  */
>                 pkts[0] = rarp_mbuf;
> -               vhost_queue_stats_update(dev, vq, pkts, 1);
> -               pkts++;
> -               count -= 1;
> +               nb_rx += 1;
>         }

Idem.

>
>         if (vq_is_packed(dev)) {
>                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
> -                       nb_rx = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
> -                                       pkts, count, dma_id, vchan_id);
> +                       nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>                 else
> -                       nb_rx = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
> -                                       pkts, count, dma_id, vchan_id);
> +                       nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>         } else {
>                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
> -                       nb_rx = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
> -                                       pkts, count, dma_id, vchan_id);
> +                       nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>                 else
> -                       nb_rx = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
> -                                       pkts, count, dma_id, vchan_id);
> +                       nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>         }
>
>         *nr_inflight = vq->async->pkts_inflight_n;
> @@ -4315,9 +4312,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>  out_access_unlock:
>         rte_rwlock_read_unlock(&vq->access_lock);
>
> -       if (unlikely(rarp_mbuf != NULL))
> -               nb_rx += 1;
> -
>  out_no_unlock:
>         return nb_rx;
>  }
> --
> 2.47.1
>
  
Maxime Coquelin Jan. 16, 2025, 9:50 a.m. UTC | #2
On 1/15/25 5:46 PM, David Marchand wrote:
> On Wed, Jan 15, 2025 at 1:59 PM Maxime Coquelin
> <maxime.coquelin@redhat.com> wrote:
>>
>> With previous refactoring, we can now simplify the RARP
>> packet injection handling in both the sync and async
>> dequeue paths.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>>   lib/vhost/virtio_net.c | 42 ++++++++++++++++++------------------------
>>   1 file changed, 18 insertions(+), 24 deletions(-)
>>
>> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
>> index 59ea2d16a5..fab45ebd54 100644
>> --- a/lib/vhost/virtio_net.c
>> +++ b/lib/vhost/virtio_net.c
>> @@ -3662,21 +3662,23 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>>                   * learning table will get updated first.
>>                   */
>>                  pkts[0] = rarp_mbuf;
> 
> Well, ideally it would be pkts[nb_rx], but see comment below.
> 
>> -               vhost_queue_stats_update(dev, vq, pkts, 1);
>> -               pkts++;
>> -               count -= 1;
>> +               nb_rx += 1;
>>          }
> 
> With this change, the rarp_mbuf variable is unneeded.
> You can store to pkts[nb_rx] when calling rte_net_make_rarp_packet()
> (and at the same time, move the comment about injecting the packet to
> the head of the array).

I agree with this further simplification.

Thanks,
Maxime

> 
> 
> 
>>
>>          if (vq_is_packed(dev)) {
>>                  if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
>> -                       nb_rx = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
>> +                       nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx);
>>                  else
>> -                       nb_rx = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
>> +                       nb_rx += virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx);
>>          } else {
>>                  if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
>> -                       nb_rx = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
>> +                       nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx);
>>                  else
>> -                       nb_rx = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
>> +                       nb_rx += virtio_dev_tx_split_compliant(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx);
>>          }
>>
>>          vhost_queue_stats_update(dev, vq, pkts, nb_rx);
>> @@ -3687,9 +3689,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
>>   out_access_unlock:
>>          rte_rwlock_read_unlock(&vq->access_lock);
>>
>> -       if (unlikely(rarp_mbuf != NULL))
>> -               nb_rx += 1;
>> -
>>   out_no_unlock:
>>          return nb_rx;
>>   }
>> @@ -4285,25 +4284,23 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>>                   * learning table will get updated first.
>>                   */
>>                  pkts[0] = rarp_mbuf;
>> -               vhost_queue_stats_update(dev, vq, pkts, 1);
>> -               pkts++;
>> -               count -= 1;
>> +               nb_rx += 1;
>>          }
> 
> Idem.
> 
>>
>>          if (vq_is_packed(dev)) {
>>                  if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
>> -                       nb_rx = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
>> -                                       pkts, count, dma_id, vchan_id);
>> +                       nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>>                  else
>> -                       nb_rx = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
>> -                                       pkts, count, dma_id, vchan_id);
>> +                       nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>>          } else {
>>                  if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
>> -                       nb_rx = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
>> -                                       pkts, count, dma_id, vchan_id);
>> +                       nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>>                  else
>> -                       nb_rx = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
>> -                                       pkts, count, dma_id, vchan_id);
>> +                       nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
>> +                                       pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
>>          }
>>
>>          *nr_inflight = vq->async->pkts_inflight_n;
>> @@ -4315,9 +4312,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
>>   out_access_unlock:
>>          rte_rwlock_read_unlock(&vq->access_lock);
>>
>> -       if (unlikely(rarp_mbuf != NULL))
>> -               nb_rx += 1;
>> -
>>   out_no_unlock:
>>          return nb_rx;
>>   }
>> --
>> 2.47.1
>>
> 
>
  

Patch

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 59ea2d16a5..fab45ebd54 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3662,21 +3662,23 @@  rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 		 * learning table will get updated first.
 		 */
 		pkts[0] = rarp_mbuf;
-		vhost_queue_stats_update(dev, vq, pkts, 1);
-		pkts++;
-		count -= 1;
+		nb_rx += 1;
 	}
 
 	if (vq_is_packed(dev)) {
 		if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-			nb_rx = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
+			nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx);
 		else
-			nb_rx = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
+			nb_rx += virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx);
 	} else {
 		if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-			nb_rx = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
+			nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx);
 		else
-			nb_rx = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
+			nb_rx += virtio_dev_tx_split_compliant(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx);
 	}
 
 	vhost_queue_stats_update(dev, vq, pkts, nb_rx);
@@ -3687,9 +3689,6 @@  rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 out_access_unlock:
 	rte_rwlock_read_unlock(&vq->access_lock);
 
-	if (unlikely(rarp_mbuf != NULL))
-		nb_rx += 1;
-
 out_no_unlock:
 	return nb_rx;
 }
@@ -4285,25 +4284,23 @@  rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 		 * learning table will get updated first.
 		 */
 		pkts[0] = rarp_mbuf;
-		vhost_queue_stats_update(dev, vq, pkts, 1);
-		pkts++;
-		count -= 1;
+		nb_rx += 1;
 	}
 
 	if (vq_is_packed(dev)) {
 		if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-			nb_rx = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
-					pkts, count, dma_id, vchan_id);
+			nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
 		else
-			nb_rx = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
-					pkts, count, dma_id, vchan_id);
+			nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
 	} else {
 		if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-			nb_rx = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
-					pkts, count, dma_id, vchan_id);
+			nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
 		else
-			nb_rx = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
-					pkts, count, dma_id, vchan_id);
+			nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
+					pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
 	}
 
 	*nr_inflight = vq->async->pkts_inflight_n;
@@ -4315,9 +4312,6 @@  rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 out_access_unlock:
 	rte_rwlock_read_unlock(&vq->access_lock);
 
-	if (unlikely(rarp_mbuf != NULL))
-		nb_rx += 1;
-
 out_no_unlock:
 	return nb_rx;
 }