@@ -2013,6 +2013,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
}
if (!priv->sh)
return 0;
+ if (priv->shared_refcnt) {
+ DRV_LOG(ERR, "port %u is shared host in use (%u)",
+ dev->data->port_id, priv->shared_refcnt);
+ rte_errno = EBUSY;
+ return -EBUSY;
+ }
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
dev->data->port_id,
((priv->sh->cdev->ctx != NULL) ?
@@ -1780,6 +1780,8 @@ struct mlx5_priv {
struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
/**< HW steering templates used to create control flow rules. */
#endif
+ struct rte_eth_dev *shared_host; /* Host device for HW steering. */
+ uint16_t shared_refcnt; /* HW steering host reference counter. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -5,6 +5,8 @@
#include <rte_flow.h>
#include <mlx5_malloc.h>
+
+#include "mlx5.h"
#include "mlx5_defs.h"
#include "mlx5_flow.h"
#include "mlx5_rx.h"
@@ -6302,6 +6304,12 @@ flow_hw_ct_pool_create(struct rte_eth_dev *dev,
int reg_id;
uint32_t flags;
+ if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+ DRV_LOG(ERR, "Connection tracking is not supported "
+ "in cross vHCA sharing mode");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
@@ -6786,6 +6794,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_priv *host_priv = NULL;
struct mlx5dr_context *dr_ctx = NULL;
struct mlx5dr_context_attr dr_ctx_attr = {0};
struct mlx5_hw_q *hw_q;
@@ -6800,7 +6809,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
.free = mlx5_free,
.type = "mlx5_hw_action_construct_data",
};
- /* Adds one queue to be used by PMD.
+ /*
+ * Adds one queue to be used by PMD.
* The last queue will be used by the PMD.
*/
uint16_t nb_q_updated = 0;
@@ -6919,6 +6929,57 @@ flow_hw_configure(struct rte_eth_dev *dev,
dr_ctx_attr.queues = nb_q_updated;
/* Queue size should all be the same. Take the first one. */
dr_ctx_attr.queue_size = _queue_attr[0]->size;
+ if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+ struct rte_eth_dev *host_dev = NULL;
+ uint16_t port_id;
+
+ MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
+ if (is_proxy) {
+ DRV_LOG(ERR, "cross vHCA shared mode not supported "
+ " for E-Switch confgiurations");
+ rte_errno = ENOTSUP;
+ goto err;
+ }
+ MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
+ if (port_id == port_attr->host_port_id) {
+ host_dev = &rte_eth_devices[port_id];
+ break;
+ }
+ }
+ if (!host_dev || host_dev == dev ||
+ !host_dev->data || !host_dev->data->dev_private) {
+ DRV_LOG(ERR, "Invalid cross vHCA host port %u",
+ port_attr->host_port_id);
+ rte_errno = EINVAL;
+ goto err;
+ }
+ host_priv = host_dev->data->dev_private;
+ if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
+ DRV_LOG(ERR, "Sibling ports %u and %u do not "
+ "require cross vHCA sharing mode",
+ dev->data->port_id, port_attr->host_port_id);
+ rte_errno = EINVAL;
+ goto err;
+ }
+ if (host_priv->shared_host) {
+ DRV_LOG(ERR, "Host port %u is not the sharing base",
+ port_attr->host_port_id);
+ rte_errno = EINVAL;
+ goto err;
+ }
+ if (port_attr->nb_counters ||
+ port_attr->nb_aging_objects ||
+ port_attr->nb_meters ||
+ port_attr->nb_conn_tracks) {
+ DRV_LOG(ERR,
+ "Object numbers on guest port must be zeros");
+ rte_errno = EINVAL;
+ goto err;
+ }
+ dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
+ priv->shared_host = host_dev;
+ __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ }
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
if (!dr_ctx)
@@ -6934,7 +6995,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
goto err;
}
/* Initialize meter library*/
- if (port_attr->nb_meters)
+ if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 1, 1, nb_q_updated))
goto err;
/* Add global actions. */
@@ -6971,7 +7032,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
goto err;
}
}
- if (port_attr->nb_conn_tracks) {
+ if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
sizeof(*priv->ct_mng);
priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
@@ -6985,7 +7046,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
goto err;
priv->sh->ct_aso_en = 1;
}
- if (port_attr->nb_counters) {
+ if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
nb_queue);
if (priv->hws_cpool == NULL)
@@ -7054,6 +7115,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
if (_queue_attr)
mlx5_free(_queue_attr);
+ if (priv->shared_host) {
+ __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ priv->shared_host = NULL;
+ }
/* Do not overwrite the internal errno information. */
if (ret)
return ret;
@@ -7132,6 +7197,11 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
mlx5_free(priv->hw_q);
priv->hw_q = NULL;
claim_zero(mlx5dr_context_close(priv->dr_ctx));
+ if (priv->shared_host) {
+ struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
+ __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ priv->shared_host = NULL;
+ }
priv->dr_ctx = NULL;
priv->nb_queue = 0;
}
@@ -618,6 +618,12 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
int ret = 0;
size_t sz;
+ if (pattr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+ DRV_LOG(ERR, "Counters are not supported "
+ "in cross vHCA sharing mode");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
/* init cnt service if not. */
if (priv->sh->cnt_svc == NULL) {
ret = mlx5_hws_cnt_svc_init(priv->sh);
@@ -1189,6 +1195,12 @@ mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
strict_queue = !!(attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
MLX5_ASSERT(priv->hws_cpool);
+ if (attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+ DRV_LOG(ERR, "Aging sn not supported "
+ "in cross vHCA sharing mode");
+ rte_errno = ENOTSUP;
+ return -ENOTSUP;
+ }
nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool);
if (strict_queue) {
rsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts,