[v1,14/14] vhost: merge sync and async mbuf to desc filling

Message ID 20211018130229.308694-15-maxime.coquelin@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: clean-up and simplify async implementation |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/github-robot: build success github build: passed
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-compile-testing fail Testing issues
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Maxime Coquelin Oct. 18, 2021, 1:02 p.m. UTC
  This patches merges copy_mbuf_to_desc() used by the sync
path with async_mbuf_to_desc() used by the async path.

Most of these complex functions are identical, so merging
them will make the maintenance easier.

In order not to degrade performance, the patch introduces
a boolean function parameter to specify whether it is called
in async context. This boolean is statically passed to this
always-inlined function, so the compiler will optimize this
out.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/virtio_net.c | 153 +++++++----------------------------------
 1 file changed, 26 insertions(+), 127 deletions(-)
  

Patch

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 7e66113006..0e1fd01e31 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -913,9 +913,9 @@  sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline int
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
-			    struct rte_mbuf *m, struct buf_vector *buf_vec,
-			    uint16_t nr_vec, uint16_t num_buffers)
+mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+		struct rte_mbuf *m, struct buf_vector *buf_vec,
+		uint16_t nr_vec, uint16_t num_buffers, bool is_async)
 {
 	uint32_t vec_idx = 0;
 	uint32_t mbuf_offset, mbuf_avail;
@@ -925,115 +925,7 @@  copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint64_t hdr_addr;
 	struct rte_mbuf *hdr_mbuf;
 	struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
-
-	if (unlikely(m == NULL))
-		return -1;
-
-	buf_addr = buf_vec[vec_idx].buf_addr;
-	buf_iova = buf_vec[vec_idx].buf_iova;
-	buf_len = buf_vec[vec_idx].buf_len;
-
-	if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
-		return -1;
-
-	hdr_mbuf = m;
-	hdr_addr = buf_addr;
-	if (unlikely(buf_len < dev->vhost_hlen)) {
-		memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
-		hdr = &tmp_hdr;
-	} else
-		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
-
-	VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
-		dev->vid, num_buffers);
-
-	if (unlikely(buf_len < dev->vhost_hlen)) {
-		buf_offset = dev->vhost_hlen - buf_len;
-		vec_idx++;
-		buf_addr = buf_vec[vec_idx].buf_addr;
-		buf_iova = buf_vec[vec_idx].buf_iova;
-		buf_len = buf_vec[vec_idx].buf_len;
-		buf_avail = buf_len - buf_offset;
-	} else {
-		buf_offset = dev->vhost_hlen;
-		buf_avail = buf_len - dev->vhost_hlen;
-	}
-
-	mbuf_avail  = rte_pktmbuf_data_len(m);
-	mbuf_offset = 0;
-	while (mbuf_avail != 0 || m->next != NULL) {
-		/* done with current buf, get the next one */
-		if (buf_avail == 0) {
-			vec_idx++;
-			if (unlikely(vec_idx >= nr_vec))
-				goto error;
-
-			buf_addr = buf_vec[vec_idx].buf_addr;
-			buf_iova = buf_vec[vec_idx].buf_iova;
-			buf_len = buf_vec[vec_idx].buf_len;
-
-			buf_offset = 0;
-			buf_avail  = buf_len;
-		}
-
-		/* done with current mbuf, get the next one */
-		if (mbuf_avail == 0) {
-			m = m->next;
-
-			mbuf_offset = 0;
-			mbuf_avail  = rte_pktmbuf_data_len(m);
-		}
-
-		if (hdr_addr) {
-			virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
-			if (rxvq_is_mergeable(dev))
-				ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
-						num_buffers);
-
-			if (unlikely(hdr == &tmp_hdr)) {
-				copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
-			} else {
-				PRINT_PACKET(dev, (uintptr_t)hdr_addr,
-						dev->vhost_hlen, 0);
-				vhost_log_cache_write_iova(dev, vq,
-						buf_vec[0].buf_iova,
-						dev->vhost_hlen);
-			}
-
-			hdr_addr = 0;
-		}
-
-		cpy_len = RTE_MIN(buf_avail, mbuf_avail);
-
-		sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
-				buf_addr + buf_offset,
-				buf_iova + buf_offset, cpy_len);
-
-		mbuf_avail  -= cpy_len;
-		mbuf_offset += cpy_len;
-		buf_avail  -= cpy_len;
-		buf_offset += cpy_len;
-	}
-
-	return 0;
-error:
-	return -1;
-}
-
-static __rte_always_inline int
-async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
-			struct rte_mbuf *m, struct buf_vector *buf_vec,
-			uint16_t nr_vec, uint16_t num_buffers)
-{
 	struct vhost_async *async = vq->async;
-	struct rte_mbuf *hdr_mbuf;
-	struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
-	uint64_t buf_addr, buf_iova;
-	uint64_t hdr_addr;
-	uint32_t vec_idx = 0;
-	uint32_t mbuf_offset, mbuf_avail;
-	uint32_t buf_offset, buf_avail;
-	uint32_t cpy_len, buf_len;
 
 	if (unlikely(m == NULL))
 		return -1;
@@ -1071,8 +963,10 @@  async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	mbuf_avail  = rte_pktmbuf_data_len(m);
 	mbuf_offset = 0;
 
-	if (async_iter_initialize(async))
-		return -1;
+	if (is_async) {
+		if (async_iter_initialize(async))
+			return -1;
+	}
 
 	while (mbuf_avail != 0 || m->next != NULL) {
 		/* done with current buf, get the next one */
@@ -1086,7 +980,7 @@  async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			buf_len = buf_vec[vec_idx].buf_len;
 
 			buf_offset = 0;
-			buf_avail = buf_len;
+			buf_avail  = buf_len;
 		}
 
 		/* done with current mbuf, get the next one */
@@ -1094,7 +988,7 @@  async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			m = m->next;
 
 			mbuf_offset = 0;
-			mbuf_avail = rte_pktmbuf_data_len(m);
+			mbuf_avail  = rte_pktmbuf_data_len(m);
 		}
 
 		if (hdr_addr) {
@@ -1118,9 +1012,14 @@  async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 		cpy_len = RTE_MIN(buf_avail, mbuf_avail);
 
-		if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
-					buf_iova + buf_offset, cpy_len) < 0) {
-			goto error;
+		if (is_async) {
+			if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+						buf_iova + buf_offset, cpy_len) < 0)
+				goto error;
+		} else {
+			sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+					buf_addr + buf_offset,
+					buf_iova + buf_offset, cpy_len);
 		}
 
 		mbuf_avail  -= cpy_len;
@@ -1129,11 +1028,13 @@  async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		buf_offset += cpy_len;
 	}
 
-	async_iter_finalize(async);
+	if (is_async)
+		async_iter_finalize(async);
 
 	return 0;
 error:
-	async_iter_cancel(async);
+	if (is_async)
+		async_iter_cancel(async);
 
 	return -1;
 }
@@ -1192,7 +1093,7 @@  vhost_enqueue_single_packed(struct virtio_net *dev,
 			avail_idx -= vq->size;
 	}
 
-	if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
+	if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
 		return -1;
 
 	vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
@@ -1236,9 +1137,8 @@  virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			dev->vid, vq->last_avail_idx,
 			vq->last_avail_idx + num_buffers);
 
-		if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
-						buf_vec, nr_vec,
-						num_buffers) < 0) {
+		if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
+					num_buffers, false) < 0) {
 			vq->shadow_used_idx -= num_buffers;
 			break;
 		}
@@ -1582,7 +1482,7 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
 			dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
 
-		if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers) < 0) {
+		if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
 			vq->shadow_used_idx -= num_buffers;
 			break;
 		}
@@ -1751,8 +1651,7 @@  vhost_enqueue_async_packed(struct virtio_net *dev,
 			avail_idx -= vq->size;
 	}
 
-	if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
-					*nr_buffers) < 0))
+	if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
 		return -1;
 
 	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);