[v4,04/14] vhost: add single packet dequeue function

Message ID 20191009133849.69002-5-yong.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost packed ring performance optimization |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Marvin Liu Oct. 9, 2019, 1:38 p.m. UTC
  Add vhost single packet dequeue function for packed ring and meanwhile
left space for shadow used ring update function.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
  

Comments

Maxime Coquelin Oct. 11, 2019, 1:04 p.m. UTC | #1
On 10/9/19 3:38 PM, Marvin Liu wrote:
> Add vhost single packet dequeue function for packed ring and meanwhile
> left space for shadow used ring update function.
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
  

Patch

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5e08f7d9b..17aabe8eb 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1571,6 +1571,60 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return i;
 }
 
+static __rte_always_inline int
+vhost_dequeue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t *buf_id,
+	uint16_t *desc_count)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	uint32_t dummy_len;
+	uint16_t nr_vec = 0;
+	int err;
+
+	if (unlikely(fill_vec_buf_packed(dev, vq,
+					 vq->last_avail_idx, desc_count,
+					 buf_vec, &nr_vec,
+					 buf_id, &dummy_len,
+					 VHOST_ACCESS_RO) < 0))
+		return -1;
+
+	*pkts = rte_pktmbuf_alloc(mbuf_pool);
+	if (unlikely(*pkts == NULL)) {
+		RTE_LOG(ERR, VHOST_DATA,
+			"Failed to allocate memory for mbuf.\n");
+		return -1;
+	}
+
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+				mbuf_pool);
+	if (unlikely(err)) {
+		rte_pktmbuf_free(*pkts);
+		return -1;
+	}
+
+	return 0;
+}
+
+static __rte_unused int
+virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts)
+{
+
+	uint16_t buf_id, desc_count;
+
+	if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+					&desc_count))
+		return -1;
+
+	vq->last_avail_idx += desc_count;
+	if (vq->last_avail_idx >= vq->size) {
+		vq->last_avail_idx -= vq->size;
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	return 0;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)