[v2,7/9] net/mlx5: add cross port shared mode for HW steering

Message ID 20230207140206.29139-7-viacheslavo@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series [v2,1/9] ethdev: sharing indirect actions between ports |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Slava Ovsiienko Feb. 7, 2023, 2:02 p.m. UTC
  Add host port option for sharing steering objects between
multiple ports of the same physical NIC.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |  6 +++
 drivers/net/mlx5/mlx5.h         |  2 +
 drivers/net/mlx5/mlx5_flow_hw.c | 78 +++++++++++++++++++++++++++++++--
 drivers/net/mlx5/mlx5_hws_cnt.c | 12 +++++
 4 files changed, 94 insertions(+), 4 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index b8643cebdd..2eca2cceef 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2013,6 +2013,12 @@  mlx5_dev_close(struct rte_eth_dev *dev)
 	}
 	if (!priv->sh)
 		return 0;
+	if (priv->shared_refcnt) {
+		DRV_LOG(ERR, "port %u is shared host in use (%u)",
+			dev->data->port_id, priv->shared_refcnt);
+		rte_errno = EBUSY;
+		return -EBUSY;
+	}
 	DRV_LOG(DEBUG, "port %u closing device \"%s\"",
 		dev->data->port_id,
 		((priv->sh->cdev->ctx != NULL) ?
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 16b33e1548..525bdd47f7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1780,6 +1780,8 @@  struct mlx5_priv {
 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
 	/**< HW steering templates used to create control flow rules. */
 #endif
+	struct rte_eth_dev *shared_host; /* Host device for HW steering. */
+	uint16_t shared_refcnt; /* HW steering host reference counter. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 20c71ff7f0..59b5d1980c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5,6 +5,8 @@ 
 #include <rte_flow.h>
 
 #include <mlx5_malloc.h>
+
+#include "mlx5.h"
 #include "mlx5_defs.h"
 #include "mlx5_flow.h"
 #include "mlx5_rx.h"
@@ -6301,6 +6303,12 @@  flow_hw_ct_pool_create(struct rte_eth_dev *dev,
 	int reg_id;
 	uint32_t flags;
 
+	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+		DRV_LOG(ERR, "Connection tracking is not supported "
+			     "in cross vHCA sharing mode");
+		rte_errno = ENOTSUP;
+		return NULL;
+	}
 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
 	if (!pool) {
 		rte_errno = ENOMEM;
@@ -6785,6 +6793,7 @@  flow_hw_configure(struct rte_eth_dev *dev,
 		  struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_priv *host_priv = NULL;
 	struct mlx5dr_context *dr_ctx = NULL;
 	struct mlx5dr_context_attr dr_ctx_attr = {0};
 	struct mlx5_hw_q *hw_q;
@@ -6799,7 +6808,8 @@  flow_hw_configure(struct rte_eth_dev *dev,
 		.free = mlx5_free,
 		.type = "mlx5_hw_action_construct_data",
 	};
-	/* Adds one queue to be used by PMD.
+	/*
+	 * Adds one queue to be used by PMD.
 	 * The last queue will be used by the PMD.
 	 */
 	uint16_t nb_q_updated = 0;
@@ -6918,6 +6928,57 @@  flow_hw_configure(struct rte_eth_dev *dev,
 	dr_ctx_attr.queues = nb_q_updated;
 	/* Queue size should all be the same. Take the first one. */
 	dr_ctx_attr.queue_size = _queue_attr[0]->size;
+	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+		struct rte_eth_dev *host_dev = NULL;
+		uint16_t port_id;
+
+		MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
+		if (is_proxy) {
+			DRV_LOG(ERR, "cross vHCA shared mode not supported "
+				     " for E-Switch confgiurations");
+			rte_errno = ENOTSUP;
+			goto err;
+		}
+		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
+			if (port_id == port_attr->host_port_id) {
+				host_dev = &rte_eth_devices[port_id];
+				break;
+			}
+		}
+		if (!host_dev || host_dev == dev ||
+		    !host_dev->data || !host_dev->data->dev_private) {
+			DRV_LOG(ERR, "Invalid cross vHCA host port %u",
+				port_attr->host_port_id);
+			rte_errno = EINVAL;
+			goto err;
+		}
+		host_priv = host_dev->data->dev_private;
+		if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
+			DRV_LOG(ERR, "Sibling ports %u and %u do not "
+				     "require cross vHCA sharing mode",
+				dev->data->port_id, port_attr->host_port_id);
+			rte_errno = EINVAL;
+			goto err;
+		}
+		if (host_priv->shared_host) {
+			DRV_LOG(ERR, "Host port %u is not the sharing base",
+				port_attr->host_port_id);
+			rte_errno = EINVAL;
+			goto err;
+		}
+		if (port_attr->nb_counters ||
+		    port_attr->nb_aging_objects ||
+		    port_attr->nb_meters ||
+		    port_attr->nb_conn_tracks) {
+			DRV_LOG(ERR,
+				"Object numbers on guest port must be zeros");
+			rte_errno = EINVAL;
+			goto err;
+		}
+		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
+		priv->shared_host = host_dev;
+		__atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+	}
 	dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
 	/* rte_errno has been updated by HWS layer. */
 	if (!dr_ctx)
@@ -6933,7 +6994,7 @@  flow_hw_configure(struct rte_eth_dev *dev,
 		goto err;
 	}
 	/* Initialize meter library*/
-	if (port_attr->nb_meters)
+	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 1, 1, nb_q_updated))
 			goto err;
 	/* Add global actions. */
@@ -6970,7 +7031,7 @@  flow_hw_configure(struct rte_eth_dev *dev,
 			goto err;
 		}
 	}
-	if (port_attr->nb_conn_tracks) {
+	if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
 		mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
 			   sizeof(*priv->ct_mng);
 		priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
@@ -6984,7 +7045,7 @@  flow_hw_configure(struct rte_eth_dev *dev,
 			goto err;
 		priv->sh->ct_aso_en = 1;
 	}
-	if (port_attr->nb_counters) {
+	if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
 		priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
 							   nb_queue);
 		if (priv->hws_cpool == NULL)
@@ -7053,6 +7114,10 @@  flow_hw_configure(struct rte_eth_dev *dev,
 	}
 	if (_queue_attr)
 		mlx5_free(_queue_attr);
+	if (priv->shared_host) {
+		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+		priv->shared_host = NULL;
+	}
 	/* Do not overwrite the internal errno information. */
 	if (ret)
 		return ret;
@@ -7131,6 +7196,11 @@  flow_hw_resource_release(struct rte_eth_dev *dev)
 	mlx5_free(priv->hw_q);
 	priv->hw_q = NULL;
 	claim_zero(mlx5dr_context_close(priv->dr_ctx));
+	if (priv->shared_host) {
+		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
+		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+		priv->shared_host = NULL;
+	}
 	priv->dr_ctx = NULL;
 	priv->nb_queue = 0;
 }
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index 51704ef754..afc93821e4 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -618,6 +618,12 @@  mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
 	int ret = 0;
 	size_t sz;
 
+	if (pattr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+		DRV_LOG(ERR, "Counters are not supported "
+			     "in cross vHCA sharing mode");
+		rte_errno = ENOTSUP;
+		return NULL;
+	}
 	/* init cnt service if not. */
 	if (priv->sh->cnt_svc == NULL) {
 		ret = mlx5_hws_cnt_svc_init(priv->sh);
@@ -1189,6 +1195,12 @@  mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
 
 	strict_queue = !!(attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
 	MLX5_ASSERT(priv->hws_cpool);
+	if (attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+		DRV_LOG(ERR, "Aging sn not supported "
+			     "in cross vHCA sharing mode");
+		rte_errno = ENOTSUP;
+		return -ENOTSUP;
+	}
 	nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool);
 	if (strict_queue) {
 		rsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts,