@@ -85,6 +85,10 @@ struct vhost_virtqueue {
/* Physical address of used ring, for logging */
uint64_t log_guest_addr;
+
+ /* Shadow used ring for performance */
+ struct vring_used_elem *shadow_used_ring;
+ uint32_t shadow_used_idx;
} __rte_cache_aligned;
/* Old kernels have no such macro defined */
@@ -134,16 +134,51 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
static inline void __attribute__((always_inline))
-update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint32_t desc_chain_head, uint32_t desc_chain_len)
+update_used_ring(struct vhost_virtqueue *vq, uint32_t desc_chain_head,
+ uint32_t desc_chain_len)
{
- uint32_t used_idx_round = vq->last_used_idx & (vq->size - 1);
+ vq->shadow_used_ring[vq->shadow_used_idx].id = desc_chain_head;
+ vq->shadow_used_ring[vq->shadow_used_idx].len = desc_chain_len;
+ vq->shadow_used_idx++;
+}
+
+static inline void __attribute__((always_inline))
+flush_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint32_t used_idx_start)
+{
+ if (used_idx_start + vq->shadow_used_idx < vq->size) {
+ rte_memcpy(&vq->used->ring[used_idx_start],
+ &vq->shadow_used_ring[0],
+ vq->shadow_used_idx *
+ sizeof(struct vring_used_elem));
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used,
+ ring[used_idx_start]),
+ vq->shadow_used_idx *
+ sizeof(struct vring_used_elem));
+ } else {
+ uint32_t part_1 = vq->size - used_idx_start;
+ uint32_t part_2 = vq->shadow_used_idx - part_1;
- vq->used->ring[used_idx_round].id = desc_chain_head;
- vq->used->ring[used_idx_round].len = desc_chain_len;
- vhost_log_used_vring(dev, vq, offsetof(struct vring_used,
- ring[used_idx_round]),
- sizeof(vq->used->ring[used_idx_round]));
+ rte_memcpy(&vq->used->ring[used_idx_start],
+ &vq->shadow_used_ring[0],
+ part_1 *
+ sizeof(struct vring_used_elem));
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used,
+ ring[used_idx_start]),
+ part_1 *
+ sizeof(struct vring_used_elem));
+ rte_memcpy(&vq->used->ring[0],
+ &vq->shadow_used_ring[part_1],
+ part_2 *
+ sizeof(struct vring_used_elem));
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used,
+ ring[0]),
+ part_2 *
+ sizeof(struct vring_used_elem));
+ }
}
static inline uint32_t __attribute__((always_inline))
@@ -208,7 +243,7 @@ enqueue_packet(struct virtio_net *dev, struct vhost_virtqueue *vq,
goto error;
} else if (is_mrg_rxbuf) {
/* start with the next desc chain */
- update_used_ring(dev, vq, desc_chain_head,
+ update_used_ring(vq, desc_chain_head,
desc_chain_len);
vq->last_used_idx++;
extra_buffers++;
@@ -245,7 +280,7 @@ enqueue_packet(struct virtio_net *dev, struct vhost_virtqueue *vq,
desc_chain_len += copy_len;
}
- update_used_ring(dev, vq, desc_chain_head, desc_chain_len);
+ update_used_ring(vq, desc_chain_head, desc_chain_len);
vq->last_used_idx++;
return 0;
@@ -276,6 +311,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
{
struct vhost_virtqueue *vq;
struct virtio_net *dev;
+ uint32_t used_idx_start;
uint32_t pkt_left = count;
uint32_t pkt_idx = 0;
uint32_t pkt_sent = 0;
@@ -302,6 +338,8 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
is_mrg_rxbuf = 1;
/* start enqueuing packets 1 by 1 */
+ vq->shadow_used_idx = 0;
+ used_idx_start = vq->last_used_idx & (vq->size - 1);
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
while (pkt_left && avail_idx != vq->last_used_idx) {
/* prefetch the next desc */
@@ -319,6 +357,10 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
pkt_left--;
}
+ /* batch update used ring for better performance */
+ if (likely(vq->shadow_used_idx > 0))
+ flush_used_ring(dev, vq, used_idx_start);
+
/* update used idx and kick the guest if necessary */
if (pkt_sent)
notify_guest(dev, vq);
@@ -152,10 +152,26 @@ cleanup_device(struct virtio_net *dev, int destroy)
static void
free_device(struct virtio_net *dev)
{
+ struct vhost_virtqueue *vq_0;
+ struct vhost_virtqueue *vq_1;
uint32_t i;
- for (i = 0; i < dev->virt_qp_nb; i++)
- rte_free(dev->virtqueue[i * VIRTIO_QNUM]);
+ for (i = 0; i < dev->virt_qp_nb; i++) {
+ vq_0 = dev->virtqueue[i * VIRTIO_QNUM];
+ if (vq_0->shadow_used_ring) {
+ rte_free(vq_0->shadow_used_ring);
+ vq_0->shadow_used_ring = NULL;
+ }
+
+ vq_1 = dev->virtqueue[i * VIRTIO_QNUM + 1];
+ if (vq_1->shadow_used_ring) {
+ rte_free(vq_1->shadow_used_ring);
+ vq_1->shadow_used_ring = NULL;
+ }
+
+ /* malloc together, free together */
+ rte_free(vq_0);
+ }
rte_free(dev);
}
@@ -418,13 +434,26 @@ int
vhost_set_vring_num(int vid, struct vhost_vring_state *state)
{
struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
dev = get_device(vid);
if (dev == NULL)
return -1;
/* State->index refers to the queue index. The txq is 1, rxq is 0. */
- dev->virtqueue[state->index]->size = state->num;
+ vq = dev->virtqueue[state->index];
+ vq->size = state->num;
+ if (!vq->shadow_used_ring) {
+ vq->shadow_used_ring = rte_malloc(NULL,
+ vq->size * sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE);
+ if (!vq->shadow_used_ring) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to allocate memory"
+ " for shadow used ring.\n");
+ return -1;
+ }
+ }
return 0;
}
@@ -610,6 +639,7 @@ int
vhost_get_vring_base(int vid, uint32_t index,
struct vhost_vring_state *state)
{
+ struct vhost_virtqueue *vq;
struct virtio_net *dev;
dev = get_device(vid);
@@ -617,6 +647,12 @@ vhost_get_vring_base(int vid, uint32_t index,
return -1;
state->index = index;
+ vq = dev->virtqueue[state->index];
+ if (vq->shadow_used_ring) {
+ rte_free(vq->shadow_used_ring);
+ vq->shadow_used_ring = NULL;
+ }
+
/* State->index refers to the queue index. The txq is 1, rxq is 0. */
state->num = dev->virtqueue[state->index]->last_used_idx;