@@ -169,9 +169,12 @@ demonstrates how to use the async vhost APIs. It's used in combination with dmas
**--dmas**
This parameter is used to specify the assigned DMA device of a vhost device.
Async vhost-user net driver will be used if --dmas is set. For example
---dmas [txd0@00:04.0,txd1@00:04.1] means use DMA channel 00:04.0 for vhost
-device 0 enqueue operation and use DMA channel 00:04.1 for vhost device 1
-enqueue operation.
+--dmas [txd0@00:04.0,txd1@00:04.1,rxd0@00:04.2,rxd1@00:04.3] means use
+DMA channel 00:04.0/00:04.2 for vhost device 0 enqueue/dequeue operation
+and use DMA channel 00:04.1/00:04.3 for vhost device 1 enqueue/dequeue
+operation. The index of the device corresponds to the socket file in order,
+that means vhost device 0 is created through the first socket file, vhost
+device 1 is created through the second socket file, and so on.
Common Issues
-------------
@@ -62,6 +62,9 @@
#define MAX_VHOST_DEVICE 1024
#define DMA_RING_SIZE 4096
+#define ASYNC_ENQUEUE_VHOST 1
+#define ASYNC_DEQUEUE_VHOST 2
+
struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE];
struct rte_vhost_async_dma_info dma_config[RTE_DMADEV_DEFAULT_MAX];
static int dma_count;
@@ -103,8 +106,6 @@ static int client_mode;
static int builtin_net_driver;
-static int async_vhost_driver;
-
/* Specify timeout (in useconds) between retries on RX. */
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
/* Specify the number of retries on RX. */
@@ -114,6 +115,8 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
static char *socket_files;
static int nb_sockets;
+static struct vhost_queue_ops vdev_queue_ops[MAX_VHOST_DEVICE];
+
/* empty vmdq configuration structure. Filled in programatically */
static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
@@ -203,6 +206,18 @@ struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * MAX_VHOST_DEVICE];
#define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
/ US_PER_S * BURST_TX_DRAIN_US)
+static int vid2socketid[MAX_VHOST_DEVICE];
+
+static uint32_t get_async_flag_by_socketid(int socketid)
+{
+ return dma_bind[socketid].async_flag;
+}
+
+static void init_vid2socketid_array(int vid, int socketid)
+{
+ vid2socketid[vid] = socketid;
+}
+
static inline bool
is_dma_configured(int16_t dev_id)
{
@@ -224,7 +239,7 @@ open_dma(const char *value)
char *addrs = input;
char *ptrs[2];
char *start, *end, *substr;
- int64_t vid, vring_id;
+ int64_t socketid, vring_id;
struct rte_dma_info info;
struct rte_dma_conf dev_config = { .nb_vchans = 1 };
@@ -263,7 +278,9 @@ open_dma(const char *value)
while (i < args_nr) {
char *arg_temp = dma_arg[i];
+ char *txd, *rxd;
uint8_t sub_nr;
+ int async_flag;
sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
if (sub_nr != 2) {
@@ -271,21 +288,28 @@ open_dma(const char *value)
goto out;
}
- start = strstr(ptrs[0], "txd");
- if (start == NULL) {
+ txd = strstr(ptrs[0], "txd");
+ rxd = strstr(ptrs[0], "rxd");
+ if (txd) {
+ start = txd;
+ vring_id = VIRTIO_RXQ;
+ async_flag = ASYNC_ENQUEUE_VHOST;
+ } else if (rxd) {
+ start = rxd;
+ vring_id = VIRTIO_TXQ;
+ async_flag = ASYNC_DEQUEUE_VHOST;
+ } else {
ret = -1;
goto out;
}
start += 3;
- vid = strtol(start, &end, 0);
+ socketid = strtol(start, &end, 0);
if (end == start) {
ret = -1;
goto out;
}
- vring_id = 0 + VIRTIO_RXQ;
-
dev_id = rte_dma_get_dev_id_by_name(ptrs[1]);
if (dev_id < 0) {
RTE_LOG(ERR, VHOST_CONFIG, "Fail to find DMA %s.\n", ptrs[1]);
@@ -325,7 +349,8 @@ open_dma(const char *value)
dma_config[dma_count++].max_desc = DMA_RING_SIZE;
done:
- (dma_info + vid)->dmas[vring_id].dev_id = dev_id;
+ (dma_info + socketid)->dmas[vring_id].dev_id = dev_id;
+ (dma_info + socketid)->async_flag |= async_flag;
i++;
}
out:
@@ -792,7 +817,6 @@ us_vhost_parse_args(int argc, char **argv)
us_vhost_usage(prgname);
return -1;
}
- async_vhost_driver = 1;
break;
case OPT_CLIENT_NUM:
@@ -961,13 +985,13 @@ complete_async_pkts(struct vhost_dev *vdev)
{
struct rte_mbuf *p_cpl[MAX_PKT_BURST];
uint16_t complete_count;
- int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
+ int16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].dev_id;
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
if (complete_count) {
free_pkts(p_cpl, complete_count);
- __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ __atomic_sub_fetch(&vdev->pkts_enq_inflight, complete_count, __ATOMIC_SEQ_CST);
}
}
@@ -1002,23 +1026,7 @@ drain_vhost(struct vhost_dev *vdev)
uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
- if (builtin_net_driver) {
- ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
- } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
- uint16_t enqueue_fail = 0;
- int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
-
- complete_async_pkts(vdev);
- ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
- __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
-
- enqueue_fail = nr_xmit - ret;
- if (enqueue_fail)
- free_pkts(&m[ret], nr_xmit - ret);
- } else {
- ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
- m, nr_xmit);
- }
+ ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
@@ -1027,7 +1035,7 @@ drain_vhost(struct vhost_dev *vdev)
__ATOMIC_SEQ_CST);
}
- if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
+ if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled)
free_pkts(m, nr_xmit);
}
@@ -1300,6 +1308,33 @@ drain_mbuf_table(struct mbuf_table *tx_q)
}
}
+uint16_t
+async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t rx_count)
+{
+ uint16_t enqueue_count;
+ uint16_t enqueue_fail = 0;
+ uint16_t dma_id = dma_bind[vid2socketid[dev->vid]].dmas[VIRTIO_RXQ].dev_id;
+
+ complete_async_pkts(dev);
+ enqueue_count = rte_vhost_submit_enqueue_burst(dev->vid, queue_id,
+ pkts, rx_count, dma_id, 0);
+ __atomic_add_fetch(&dev->pkts_enq_inflight, enqueue_count, __ATOMIC_SEQ_CST);
+
+ enqueue_fail = rx_count - enqueue_count;
+ if (enqueue_fail)
+ free_pkts(&pkts[enqueue_count], enqueue_fail);
+
+ return enqueue_count;
+}
+
+uint16_t
+sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t rx_count)
+{
+ return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, rx_count);
+}
+
static __rte_always_inline void
drain_eth_rx(struct vhost_dev *vdev)
{
@@ -1330,26 +1365,8 @@ drain_eth_rx(struct vhost_dev *vdev)
}
}
- if (builtin_net_driver) {
- enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
- pkts, rx_count);
- } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
- uint16_t enqueue_fail = 0;
- int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
-
- complete_async_pkts(vdev);
- enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
- VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
- __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
-
- enqueue_fail = rx_count - enqueue_count;
- if (enqueue_fail)
- free_pkts(&pkts[enqueue_count], enqueue_fail);
-
- } else {
- enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
- pkts, rx_count);
- }
+ enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+ VIRTIO_RXQ, pkts, rx_count);
if (enable_stats) {
__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
@@ -1358,10 +1375,33 @@ drain_eth_rx(struct vhost_dev *vdev)
__ATOMIC_SEQ_CST);
}
- if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
+ if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled)
free_pkts(pkts, rx_count);
}
+uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ int nr_inflight;
+ uint16_t dequeue_count;
+ uint16_t dma_id = dma_bind[vid2socketid[dev->vid]].dmas[VIRTIO_TXQ].dev_id;
+
+ dequeue_count = rte_vhost_async_try_dequeue_burst(dev->vid, queue_id,
+ mbuf_pool, pkts, count, &nr_inflight, dma_id, 0);
+ if (likely(nr_inflight != -1))
+ dev->pkts_deq_inflight = nr_inflight;
+
+ return dequeue_count;
+}
+
+uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ return rte_vhost_dequeue_burst(dev->vid, queue_id, mbuf_pool, pkts, count);
+}
+
static __rte_always_inline void
drain_virtio_tx(struct vhost_dev *vdev)
{
@@ -1369,13 +1409,8 @@ drain_virtio_tx(struct vhost_dev *vdev)
uint16_t count;
uint16_t i;
- if (builtin_net_driver) {
- count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
- pkts, MAX_PKT_BURST);
- } else {
- count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
- mbuf_pool, pkts, MAX_PKT_BURST);
- }
+ count = vdev_queue_ops[vdev->vid].dequeue_pkt_burst(vdev,
+ VIRTIO_TXQ, mbuf_pool, pkts, MAX_PKT_BURST);
/* setup VMDq for the first packet */
if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
@@ -1454,6 +1489,31 @@ switch_worker(void *arg __rte_unused)
return 0;
}
+static void
+vhost_clear_queue_thread_unsafe(struct vhost_dev *vdev, uint16_t queue_id)
+{
+ uint16_t n_pkt = 0;
+ uint16_t dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
+ struct rte_mbuf *m_enq_cpl[vdev->pkts_enq_inflight];
+ struct rte_mbuf *m_deq_cpl[vdev->pkts_deq_inflight];
+
+ if (queue_id % 2 == 0) {
+ while (vdev->pkts_enq_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vdev->vid,
+ queue_id, m_enq_cpl, vdev->pkts_enq_inflight, dma_id, 0);
+ free_pkts(m_enq_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_enq_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+ } else {
+ while (vdev->pkts_deq_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vdev->vid,
+ queue_id, m_deq_cpl, vdev->pkts_deq_inflight, dma_id, 0);
+ free_pkts(m_deq_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_deq_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+ }
+}
+
/*
* Remove a device from the specific data core linked list and from the
* main linked list. Synchonization occurs through the use of the
@@ -1510,25 +1570,83 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
- uint16_t n_pkt = 0;
- int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
- struct rte_mbuf *m_cpl[vdev->pkts_inflight];
-
- while (vdev->pkts_inflight) {
- n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
- m_cpl, vdev->pkts_inflight, dma_id, 0);
- free_pkts(m_cpl, n_pkt);
- __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
- }
-
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled) {
+ vhost_clear_queue_thread_unsafe(vdev, VIRTIO_RXQ);
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
- dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = false;
+ }
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled) {
+ vhost_clear_queue_thread_unsafe(vdev, VIRTIO_TXQ);
+ rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = false;
}
rte_free(vdev);
}
+static int
+get_socketid_by_vid(int vid)
+{
+ int i;
+ char ifname[PATH_MAX];
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+
+ for (i = 0; i < nb_sockets; i++) {
+ char *file = socket_files + i * PATH_MAX;
+ if (strcmp(file, ifname) == 0)
+ return i;
+ }
+
+ return -1;
+}
+
+static int
+init_vhost_queue_ops(int vid)
+{
+ int socketid = get_socketid_by_vid(vid);
+ if (socketid == -1)
+ return -1;
+
+ init_vid2socketid_array(vid, socketid);
+ if (builtin_net_driver) {
+ vdev_queue_ops[vid].enqueue_pkt_burst = builtin_enqueue_pkts;
+ vdev_queue_ops[vid].dequeue_pkt_burst = builtin_dequeue_pkts;
+ } else {
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled)
+ vdev_queue_ops[vid].enqueue_pkt_burst = async_enqueue_pkts;
+ else
+ vdev_queue_ops[vid].enqueue_pkt_burst = sync_enqueue_pkts;
+
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled)
+ vdev_queue_ops[vid].dequeue_pkt_burst = async_dequeue_pkts;
+ else
+ vdev_queue_ops[vid].dequeue_pkt_burst = sync_dequeue_pkts;
+ }
+
+ return 0;
+}
+
+static int
+vhost_async_channel_register(int vid)
+{
+ int rx_ret = 0, tx_ret = 0;
+
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
+ rx_ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
+ if (rx_ret == 0)
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = true;
+ }
+
+ if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id != INVALID_DMA_ID) {
+ tx_ret = rte_vhost_async_channel_register(vid, VIRTIO_TXQ);
+ if (tx_ret == 0)
+ dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = true;
+ }
+
+ return rx_ret | tx_ret;
+}
+
+
/*
* A new device is added to a data core. First the device is added to the main linked list
* and then allocated to a specific data core.
@@ -1540,6 +1658,8 @@ new_device(int vid)
uint16_t i;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
+ int ret;
+
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
@@ -1593,17 +1713,12 @@ new_device(int vid)
"(%d) device has been added to data core %d\n",
vid, vdev->coreid);
- if (dma_bind[vid].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
- int ret;
+ ret = vhost_async_channel_register(vid);
- ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
- if (ret == 0) {
- dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = true;
- }
- return ret;
- }
+ if (init_vhost_queue_ops(vid) != 0)
+ return -1;
- return 0;
+ return ret;
}
static int
@@ -1621,19 +1736,9 @@ vring_state_changed(int vid, uint16_t queue_id, int enable)
if (queue_id != VIRTIO_RXQ)
return 0;
- if (dma_bind[vid].dmas[queue_id].async_enabled) {
- if (!enable) {
- uint16_t n_pkt = 0;
- int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
- struct rte_mbuf *m_cpl[vdev->pkts_inflight];
-
- while (vdev->pkts_inflight) {
- n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
- m_cpl, vdev->pkts_inflight, dma_id, 0);
- free_pkts(m_cpl, n_pkt);
- __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
- }
- }
+ if (dma_bind[vid2socketid[vid]].dmas[queue_id].async_enabled) {
+ if (!enable)
+ vhost_clear_queue_thread_unsafe(vdev, queue_id);
}
return 0;
@@ -1896,7 +2001,7 @@ main(int argc, char *argv[])
if (client_mode)
flags |= RTE_VHOST_USER_CLIENT;
- if (async_vhost_driver) {
+ if (dma_count > 0) {
if (rte_vhost_async_dma_configure(dma_config, dma_count) < 0) {
RTE_LOG(ERR, VHOST_PORT, "Failed to configure DMA in vhost.\n");
for (i = 0; i < dma_count; i++) {
@@ -1906,18 +2011,18 @@ main(int argc, char *argv[])
}
}
dma_count = 0;
- async_vhost_driver = false;
}
}
/* Register vhost user driver to handle vhost messages. */
for (i = 0; i < nb_sockets; i++) {
char *file = socket_files + i * PATH_MAX;
+ uint64_t flag = flags;
- if (async_vhost_driver)
- flags = flags | RTE_VHOST_USER_ASYNC_COPY;
+ if (dma_count > 0 && get_async_flag_by_socketid(i) != 0)
+ flag |= RTE_VHOST_USER_ASYNC_COPY;
- ret = rte_vhost_driver_register(file, flags);
+ ret = rte_vhost_driver_register(file, flag);
if (ret != 0) {
unregister_drivers(i);
rte_exit(EXIT_FAILURE,
@@ -52,7 +52,8 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
- uint16_t pkts_inflight;
+ uint16_t pkts_enq_inflight;
+ uint16_t pkts_deq_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
@@ -62,6 +63,19 @@ struct vhost_dev {
struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
} __rte_cache_aligned;
+typedef uint16_t (*vhost_enqueue_burst_t)(struct vhost_dev *dev,
+ uint16_t queue_id, struct rte_mbuf **pkts,
+ uint32_t count);
+
+typedef uint16_t (*vhost_dequeue_burst_t)(struct vhost_dev *dev,
+ uint16_t queue_id, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+
+struct vhost_queue_ops {
+ vhost_enqueue_burst_t enqueue_pkt_burst;
+ vhost_dequeue_burst_t dequeue_pkt_burst;
+};
+
TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
@@ -88,6 +102,7 @@ struct dma_info {
struct dma_for_vhost {
struct dma_info dmas[RTE_MAX_QUEUES_PER_PORT * 2];
+ uint32_t async_flag;
};
/* we implement non-extra virtio net features */
@@ -98,7 +113,19 @@ void vs_vhost_net_remove(struct vhost_dev *dev);
uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count);
-uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
- struct rte_mempool *mbuf_pool,
- struct rte_mbuf **pkts, uint16_t count);
+uint16_t builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+uint16_t sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+uint16_t async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
#endif /* _MAIN_H_ */
@@ -238,6 +238,13 @@ vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
return count;
}
+uint16_t
+builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ return vs_enqueue_pkts(dev, queue_id, pkts, count);
+}
+
static __rte_always_inline int
dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
struct rte_mbuf *m, uint16_t desc_idx,
@@ -363,7 +370,7 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
return 0;
}
-uint16_t
+static uint16_t
vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
@@ -440,3 +447,10 @@ vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
return i;
}
+
+uint16_t
+builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ return vs_dequeue_pkts(dev, queue_id, mbuf_pool, pkts, count);
+}