[07/10] vdpa/sfc: add support to get queue notify area info

Message ID 20210706164418.32615-8-vsrivast@xilinx.com (mailing list archive)
State Changes Requested, archived
Delegated to: Maxime Coquelin
Headers
Series vdpa/sfc: introduce Xilinx vDPA driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Vijay Srivastava July 6, 2021, 4:44 p.m. UTC
  From: Vijay Kumar Srivastava <vsrivast@xilinx.com>

Implement the vDPA ops get_notify_area to get the notify
area info of the queue.

Signed-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>
---
 drivers/vdpa/sfc/sfc_vdpa_ops.c | 166 ++++++++++++++++++++++++++++++++++++++--
 drivers/vdpa/sfc/sfc_vdpa_ops.h |   2 +
 2 files changed, 162 insertions(+), 6 deletions(-)
  

Comments

Maxime Coquelin Aug. 30, 2021, 1:22 p.m. UTC | #1
On 7/6/21 6:44 PM, Vijay Srivastava wrote:
> From: Vijay Kumar Srivastava <vsrivast@xilinx.com>
> 
> Implement the vDPA ops get_notify_area to get the notify
> area info of the queue.
> 
> Signed-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>
> ---
>  drivers/vdpa/sfc/sfc_vdpa_ops.c | 166 ++++++++++++++++++++++++++++++++++++++--
>  drivers/vdpa/sfc/sfc_vdpa_ops.h |   2 +
>  2 files changed, 162 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c

...

> +static int
> +sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
> +{
> +	int ret;
> +	efx_nic_t *nic;
> +	int vfio_dev_fd;
> +	efx_rc_t rc;
> +	unsigned int bar_offset;
> +	struct rte_vdpa_device *vdpa_dev;
> +	struct sfc_vdpa_ops_data *ops_data;
> +	struct vfio_region_info reg = { .argsz = sizeof(reg) };
> +	const efx_nic_cfg_t *encp;
> +	int max_vring_cnt;
> +	int64_t len;
> +	void *dev;
> +
> +	vdpa_dev = rte_vhost_get_vdpa_device(vid);
> +
> +	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
> +	if (ops_data == NULL)
> +		return -1;
> +
> +	dev = ops_data->dev_handle;
> +
> +	vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
> +	max_vring_cnt =
> +		(sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
> +
> +	nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
> +	encp = efx_nic_cfg_get(nic);
> +
> +	if (qid >= max_vring_cnt) {
> +		sfc_vdpa_err(dev, "invalid qid : %d", qid);
> +		return -1;
> +	}
> +
> +	if (ops_data->vq_cxt[qid].enable != B_TRUE) {
> +		sfc_vdpa_err(dev, "vq is not enabled");
> +		return -1;
> +	}
> +
> +	rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
> +					    &bar_offset);
> +	if (rc != 0) {
> +		sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
> +			     rte_strerror(rc));
> +		return rc;
> +	}
> +
> +	reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
> +	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
> +	if (ret != 0) {
> +		sfc_vdpa_err(dev, "could not get device region info: %s",
> +			     strerror(errno));
> +		return ret;
> +	}
> +
> +	*offset = reg.offset + bar_offset;
> +
> +	len = (1U << encp->enc_vi_window_shift) / 2;
> +	if (len >= sysconf(_SC_PAGESIZE))
> +		*size = sysconf(_SC_PAGESIZE);
> +	else
> +		return -1;

This error case may deserve an error messageto be logged.

> +
> +	sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
> +		      *offset);
> +
> +	return 0;
> +}
> +
  

Patch

diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c
index 4228044..a7b9085 100644
--- a/drivers/vdpa/sfc/sfc_vdpa_ops.c
+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c
@@ -3,6 +3,8 @@ 
  * Copyright(c) 2020-2021 Xilinx, Inc.
  */
 
+#include <pthread.h>
+#include <unistd.h>
 #include <sys/ioctl.h>
 
 #include <rte_errno.h>
@@ -534,6 +536,67 @@ 
 	return 0;
 }
 
+static void *
+sfc_vdpa_notify_ctrl(void *arg)
+{
+	struct sfc_vdpa_ops_data *ops_data;
+	int vid;
+
+	ops_data = arg;
+	if (ops_data == NULL)
+		return NULL;
+
+	sfc_vdpa_adapter_lock(ops_data->dev_handle);
+
+	vid = ops_data->vid;
+
+	if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
+		sfc_vdpa_info(ops_data->dev_handle,
+			      "vDPA (%s): Notifier could not get configured",
+			      ops_data->vdpa_dev->device->name);
+
+	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+
+	return NULL;
+}
+
+static int
+sfc_vdpa_setup_notify_ctrl(int vid)
+{
+	int ret;
+	struct rte_vdpa_device *vdpa_dev;
+	struct sfc_vdpa_ops_data *ops_data;
+
+	vdpa_dev = rte_vhost_get_vdpa_device(vid);
+
+	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+	if (ops_data == NULL) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "invalid vDPA device : %p, vid : %d",
+			     vdpa_dev, vid);
+		return -1;
+	}
+
+	ops_data->is_notify_thread_started = false;
+
+	/*
+	 * Use rte_vhost_host_notifier_ctrl in a thread to avoid
+	 * dead lock scenario when multiple VFs are used in single vdpa
+	 * application and multiple VFs are passed to a single VM.
+	 */
+	ret = pthread_create(&ops_data->notify_tid, NULL,
+			     sfc_vdpa_notify_ctrl, ops_data);
+	if (ret != 0) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "failed to create notify_ctrl thread: %s",
+			     rte_strerror(ret));
+		return -1;
+	}
+	ops_data->is_notify_thread_started = true;
+
+	return 0;
+}
+
 static int
 sfc_vdpa_dev_config(int vid)
 {
@@ -567,18 +630,19 @@ 
 	if (rc != 0)
 		goto fail_vdpa_start;
 
-	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+	rc = sfc_vdpa_setup_notify_ctrl(vid);
+	if (rc != 0)
+		goto fail_vdpa_notify;
 
-	sfc_vdpa_log_init(ops_data->dev_handle, "vhost notifier ctrl");
-	if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
-		sfc_vdpa_info(ops_data->dev_handle,
-			      "vDPA (%s): software relay for notify is used.",
-			      vdpa_dev->device->name);
+	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
 
 	sfc_vdpa_log_init(ops_data->dev_handle, "done");
 
 	return 0;
 
+fail_vdpa_notify:
+	sfc_vdpa_stop(ops_data);
+
 fail_vdpa_start:
 	sfc_vdpa_close(ops_data);
 
@@ -591,6 +655,7 @@ 
 static int
 sfc_vdpa_dev_close(int vid)
 {
+	int ret;
 	struct rte_vdpa_device *vdpa_dev;
 	struct sfc_vdpa_ops_data *ops_data;
 
@@ -605,6 +670,23 @@ 
 	}
 
 	sfc_vdpa_adapter_lock(ops_data->dev_handle);
+	if (ops_data->is_notify_thread_started == true) {
+		void *status;
+		ret = pthread_cancel(ops_data->notify_tid);
+		if (ret != 0) {
+			sfc_vdpa_err(ops_data->dev_handle,
+				     "failed to cancel notify_ctrl thread: %s",
+				     rte_strerror(ret));
+		}
+
+		ret = pthread_join(ops_data->notify_tid, &status);
+		if (ret != 0) {
+			sfc_vdpa_err(ops_data->dev_handle,
+				     "failed to join terminated notify_ctrl thread: %s",
+				     rte_strerror(ret));
+		}
+	}
+	ops_data->is_notify_thread_started = false;
 
 	sfc_vdpa_stop(ops_data);
 	sfc_vdpa_close(ops_data);
@@ -655,6 +737,77 @@ 
 	return vfio_dev_fd;
 }
 
+static int
+sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+	int ret;
+	efx_nic_t *nic;
+	int vfio_dev_fd;
+	efx_rc_t rc;
+	unsigned int bar_offset;
+	struct rte_vdpa_device *vdpa_dev;
+	struct sfc_vdpa_ops_data *ops_data;
+	struct vfio_region_info reg = { .argsz = sizeof(reg) };
+	const efx_nic_cfg_t *encp;
+	int max_vring_cnt;
+	int64_t len;
+	void *dev;
+
+	vdpa_dev = rte_vhost_get_vdpa_device(vid);
+
+	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+	if (ops_data == NULL)
+		return -1;
+
+	dev = ops_data->dev_handle;
+
+	vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
+	max_vring_cnt =
+		(sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
+
+	nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
+	encp = efx_nic_cfg_get(nic);
+
+	if (qid >= max_vring_cnt) {
+		sfc_vdpa_err(dev, "invalid qid : %d", qid);
+		return -1;
+	}
+
+	if (ops_data->vq_cxt[qid].enable != B_TRUE) {
+		sfc_vdpa_err(dev, "vq is not enabled");
+		return -1;
+	}
+
+	rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
+					    &bar_offset);
+	if (rc != 0) {
+		sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
+			     rte_strerror(rc));
+		return rc;
+	}
+
+	reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
+	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+	if (ret != 0) {
+		sfc_vdpa_err(dev, "could not get device region info: %s",
+			     strerror(errno));
+		return ret;
+	}
+
+	*offset = reg.offset + bar_offset;
+
+	len = (1U << encp->enc_vi_window_shift) / 2;
+	if (len >= sysconf(_SC_PAGESIZE))
+		*size = sysconf(_SC_PAGESIZE);
+	else
+		return -1;
+
+	sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
+		      *offset);
+
+	return 0;
+}
+
 static struct rte_vdpa_dev_ops sfc_vdpa_ops = {
 	.get_queue_num = sfc_vdpa_get_queue_num,
 	.get_features = sfc_vdpa_get_features,
@@ -664,6 +817,7 @@ 
 	.set_vring_state = sfc_vdpa_set_vring_state,
 	.set_features = sfc_vdpa_set_features,
 	.get_vfio_device_fd = sfc_vdpa_get_vfio_device_fd,
+	.get_notify_area = sfc_vdpa_get_notify_area,
 };
 
 struct sfc_vdpa_ops_data *
diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h
index 8d553c5..f7523ef 100644
--- a/drivers/vdpa/sfc/sfc_vdpa_ops.h
+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h
@@ -50,6 +50,8 @@  struct sfc_vdpa_ops_data {
 	struct rte_vdpa_device		*vdpa_dev;
 	enum sfc_vdpa_context		vdpa_context;
 	enum sfc_vdpa_state		state;
+	pthread_t			notify_tid;
+	bool				is_notify_thread_started;
 
 	uint64_t			dev_features;
 	uint64_t			drv_features;