@@ -1,35 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2016 IGEL Co., Ltd.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IGEL Co.,Ltd. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 IGEL Co., Ltd.
+ * Copyright(c) 2016-2018 Intel Corporation
*/
+
#include <unistd.h>
#include <pthread.h>
#include <stdbool.h>
@@ -528,10 +501,13 @@ update_queuing_status(struct rte_eth_dev *dev)
unsigned int i;
int allow_queuing = 1;
- if (rte_atomic32_read(&internal->dev_attached) == 0)
+ if (!dev->data->rx_queues || !dev->data->tx_queues) {
+ RTE_LOG(ERR, PMD, "RX/TX queues not setup yet\n");
return;
+ }
- if (rte_atomic32_read(&internal->started) == 0)
+ if (rte_atomic32_read(&internal->started) == 0 ||
+ rte_atomic32_read(&internal->dev_attached) == 0)
allow_queuing = 0;
/* Wait until rx/tx_pkt_burst stops accessing vhost device */
@@ -554,25 +530,157 @@ update_queuing_status(struct rte_eth_dev *dev)
}
}
+static int
+eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ RTE_LOG(ERR, PMD, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "Failed to get rxq%d's vring\n", qid);
+ return ret;
+ }
+ RTE_LOG(INFO, PMD, "Enable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
+ rte_wmb();
+
+ return ret;
+}
+
+static int
+eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ RTE_LOG(ERR, PMD, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "Failed to get rxq%d's vring", qid);
+ return ret;
+ }
+ RTE_LOG(INFO, PMD, "Disable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
+ rte_wmb();
+
+ return 0;
+}
+
+static void
+eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ if (intr_handle) {
+ if (intr_handle->intr_vec)
+ free(intr_handle->intr_vec);
+ free(intr_handle);
+ }
+
+ dev->intr_handle = NULL;
+}
+
+static int
+eth_vhost_install_intr(struct rte_eth_dev *dev)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int count = 0;
+ int nb_rxq = dev->data->nb_rx_queues;
+ int i;
+ int ret;
+
+ /* uninstall firstly if we are reconnecting */
+ if (dev->intr_handle)
+ eth_vhost_uninstall_intr(dev);
+
+ dev->intr_handle = malloc(sizeof(*dev->intr_handle));
+ if (!dev->intr_handle) {
+ RTE_LOG(ERR, PMD, "Fail to allocate intr_handle\n");
+ return -ENOMEM;
+ }
+ memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
+
+ dev->intr_handle->efd_counter_size = sizeof(uint64_t);
+
+ dev->intr_handle->intr_vec =
+ malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
+
+ if (!dev->intr_handle->intr_vec) {
+ RTE_LOG(ERR, PMD,
+ "Failed to allocate memory for interrupt vector\n");
+ free(dev->intr_handle);
+ return -ENOMEM;
+ }
+
+ RTE_LOG(INFO, PMD, "Prepare intr vec\n");
+ for (i = 0; i < nb_rxq; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq) {
+ RTE_LOG(INFO, PMD, "rxq-%d not setup yet, skip!\n", i);
+ continue;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
+ if (ret < 0) {
+ RTE_LOG(INFO, PMD,
+ "Failed to get rxq-%d's vring, skip!\n", i);
+ continue;
+ }
+
+ if (vring.kickfd < 0) {
+ RTE_LOG(INFO, PMD,
+ "rxq-%d's kickfd is invalid, skip!\n", i);
+ continue;
+ }
+ dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ dev->intr_handle->efds[i] = vring.kickfd;
+ count++;
+ RTE_LOG(INFO, PMD, "Installed intr vec for rxq-%d\n", i);
+ }
+
+ dev->intr_handle->nb_efd = count;
+ dev->intr_handle->max_intr = count + 1;
+ dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+
+ return 0;
+}
+
static void
queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
{
struct vhost_queue *vq;
+ int vid = internal->vid;
int i;
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
vq = eth_dev->data->rx_queues[i];
if (!vq)
continue;
- vq->vid = internal->vid;
+ vq->vid = vid;
vq->internal = internal;
vq->port = eth_dev->data->port_id;
}
+
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
vq = eth_dev->data->tx_queues[i];
if (!vq)
continue;
- vq->vid = internal->vid;
+ vq->vid = vid;
vq->internal = internal;
vq->port = eth_dev->data->port_id;
}
@@ -584,8 +692,9 @@ new_device(int vid)
struct rte_eth_dev *eth_dev;
struct internal_list *list;
struct pmd_internal *internal;
- unsigned i;
+ struct rte_eth_conf *dev_conf;
char ifname[PATH_MAX];
+ int i;
#ifdef RTE_LIBRTE_VHOST_NUMA
int newnode;
#endif
@@ -599,6 +708,7 @@ new_device(int vid)
eth_dev = list->eth_dev;
internal = eth_dev->data->dev_private;
+ dev_conf = ð_dev->data->dev_conf;
#ifdef RTE_LIBRTE_VHOST_NUMA
newnode = rte_vhost_get_numa_node(vid);
@@ -609,14 +719,18 @@ new_device(int vid)
internal->vid = vid;
if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
queue_setup(eth_dev, internal);
- rte_atomic32_set(&internal->dev_attached, 1);
- } else {
- RTE_LOG(INFO, PMD, "RX/TX queues have not setup yet\n");
- rte_atomic32_set(&internal->dev_attached, 0);
- }
- for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
- rte_vhost_enable_guest_notification(vid, i, 0);
+ for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
+ rte_vhost_enable_guest_notification(vid, i, 0);
+
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ RTE_LOG(INFO, PMD,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
+ }
rte_vhost_get_mtu(vid, ð_dev->data->mtu);
@@ -626,6 +740,8 @@ new_device(int vid)
RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
+ rte_atomic32_set(&internal->dev_attached, 1);
+
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
return 0;
@@ -657,17 +773,19 @@ destroy_device(int vid)
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
+ if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
}
state = vring_states[eth_dev->data->port_id];
@@ -681,6 +799,8 @@ destroy_device(int vid)
RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
+ eth_vhost_uninstall_intr(eth_dev);
+
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -791,10 +911,30 @@ static int
eth_dev_start(struct rte_eth_dev *eth_dev)
{
struct pmd_internal *internal = eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf;
+ int vid = internal->vid;
+ int i;
- if (unlikely(rte_atomic32_read(&internal->dev_attached) == 0)) {
- queue_setup(eth_dev, internal);
- rte_atomic32_set(&internal->dev_attached, 1);
+ dev_conf = ð_dev->data->dev_conf;
+
+ if (!eth_dev->data->rx_queues || !eth_dev->data->tx_queues) {
+ RTE_LOG(ERR, PMD, "RX/TX queues not setup yet\n");
+ return -1;
+ }
+
+ queue_setup(eth_dev, internal);
+
+ if (likely(rte_atomic32_read(&internal->dev_attached) == 1)) {
+ for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
+ rte_vhost_enable_guest_notification(vid, i, 0);
+
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ RTE_LOG(INFO, PMD,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
}
rte_atomic32_set(&internal->started, 1);
@@ -836,10 +976,13 @@ eth_dev_close(struct rte_eth_dev *dev)
pthread_mutex_unlock(&internal_list_lock);
rte_free(list);
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- rte_free(dev->data->rx_queues[i]);
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- rte_free(dev->data->tx_queues[i]);
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(dev->data->rx_queues[i]);
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(dev->data->tx_queues[i]);
rte_free(dev->data->mac_addrs);
free(internal->dev_name);
@@ -1030,6 +1173,8 @@ static const struct eth_dev_ops ops = {
.xstats_reset = vhost_dev_xstats_reset,
.xstats_get = vhost_dev_xstats_get,
.xstats_get_names = vhost_dev_xstats_get_names,
+ .rx_queue_intr_enable = eth_rxq_intr_enable,
+ .rx_queue_intr_disable = eth_rxq_intr_disable,
};
static struct rte_vdev_driver pmd_vhost_drv;
@@ -545,16 +545,15 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
struct virtio_net *dev = get_device(vid);
- if (dev == NULL)
+ if (!dev)
return -1;
- if (enable) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "guest notification isn't supported.\n");
- return -1;
- }
+ if (enable)
+ dev->virtqueue[queue_id]->used->flags &=
+ ~VRING_USED_F_NO_NOTIFY;
+ else
+ dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
- dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
return 0;
}