@@ -269,6 +269,23 @@ struct vhost_async {
};
};
+#define VHOST_MAX_VRING 0x100
+#define VHOST_MAX_QUEUE_PAIRS 0x80
+
+struct __rte_cache_aligned vhost_reconnect_vring {
+ uint16_t last_avail_idx;
+ bool avail_wrap_counter;
+};
+
+struct vhost_reconnect_data {
+ uint32_t version;
+ uint64_t features;
+ uint8_t status;
+ struct virtio_net_config config;
+ uint32_t nr_vrings;
+ struct vhost_reconnect_vring vring[VHOST_MAX_VRING];
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
@@ -351,6 +368,7 @@ struct __rte_cache_aligned vhost_virtqueue {
struct virtqueue_stats stats;
RTE_ATOMIC(bool) irq_pending;
+ struct vhost_reconnect_vring *reconnect_log;
};
/* Virtio device status as per Virtio specification */
@@ -362,9 +380,6 @@ struct __rte_cache_aligned vhost_virtqueue {
#define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40
#define VIRTIO_DEVICE_STATUS_FAILED 0x80
-#define VHOST_MAX_VRING 0x100
-#define VHOST_MAX_QUEUE_PAIRS 0x80
-
/* Declare IOMMU related bits for older kernels */
#ifndef VIRTIO_F_IOMMU_PLATFORM
@@ -538,8 +553,26 @@ struct __rte_cache_aligned virtio_net {
struct rte_vhost_user_extern_ops extern_ops;
struct vhost_backend_ops *backend_ops;
+
+ struct vhost_reconnect_data *reconnect_log;
};
+static __rte_always_inline void
+vhost_virtqueue_reconnect_log_split(struct vhost_virtqueue *vq)
+{
+ if (vq->reconnect_log != NULL)
+ vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
+}
+
+static __rte_always_inline void
+vhost_virtqueue_reconnect_log_packed(struct vhost_virtqueue *vq)
+{
+ if (vq->reconnect_log != NULL) {
+ vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
+ vq->reconnect_log->avail_wrap_counter = vq->avail_wrap_counter;
+ }
+}
+
static inline void
vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func)
__rte_assert_exclusive_lock(&vq->access_lock)
@@ -584,6 +617,7 @@ vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
vq->avail_wrap_counter ^= 1;
vq->last_avail_idx -= vq->size;
}
+ vhost_virtqueue_reconnect_log_packed(vq);
}
void __vhost_log_cache_write(struct virtio_net *dev,
@@ -1445,6 +1445,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
vq->last_avail_idx += num_buffers;
+ vhost_virtqueue_reconnect_log_split(vq);
}
do_data_copy_enqueue(dev, vq);
@@ -1857,6 +1858,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
pkts_info[slot_idx].mbuf = pkts[pkt_idx];
vq->last_avail_idx += num_buffers;
+ vhost_virtqueue_reconnect_log_split(vq);
}
if (unlikely(pkt_idx == 0))
@@ -1885,6 +1887,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
/* recover shadow used ring and available ring */
vq->shadow_used_idx -= num_descs;
vq->last_avail_idx -= num_descs;
+ vhost_virtqueue_reconnect_log_split(vq);
}
/* keep used descriptors */
@@ -2100,6 +2103,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
vq->avail_wrap_counter ^= 1;
}
+ vhost_virtqueue_reconnect_log_packed(vq);
if (async->buffer_idx_packed >= buffers_err)
async->buffer_idx_packed -= buffers_err;
@@ -3182,6 +3186,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (likely(vq->shadow_used_idx)) {
vq->last_avail_idx += vq->shadow_used_idx;
+ vhost_virtqueue_reconnect_log_split(vq);
do_data_copy_dequeue(vq);
flush_shadow_used_ring_split(dev, vq);
vhost_vring_call_split(dev, vq);
@@ -3854,6 +3859,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
async->desc_idx_split++;
vq->last_avail_idx++;
+ vhost_virtqueue_reconnect_log_split(vq);
}
if (unlikely(dropped))
@@ -3872,6 +3878,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
pkt_idx = n_xfer;
/* recover available ring */
vq->last_avail_idx -= pkt_err;
+ vhost_virtqueue_reconnect_log_split(vq);
/**
* recover async channel copy related structures and free pktmbufs
@@ -4153,6 +4160,7 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
vq->last_avail_idx += vq->size - descs_err;
vq->avail_wrap_counter ^= 1;
}
+ vhost_virtqueue_reconnect_log_packed(vq);
}
async->pkts_idx += pkt_idx;
@@ -169,6 +169,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
cvq->last_avail_idx++;
if (cvq->last_avail_idx >= cvq->size)
cvq->last_avail_idx -= cvq->size;
+ vhost_virtqueue_reconnect_log_split(cvq);
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
vhost_avail_event(cvq) = cvq->last_avail_idx;
@@ -181,6 +182,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
cvq->last_avail_idx++;
if (cvq->last_avail_idx >= cvq->size)
cvq->last_avail_idx -= cvq->size;
+ vhost_virtqueue_reconnect_log_split(cvq);
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
vhost_avail_event(cvq) = cvq->last_avail_idx;