[v1,13/19] net/mlx5/hws: Add HWS context object

Message ID 20220922190345.394-14-valex@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: Add HW steering low level support |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Alex Vesker Sept. 22, 2022, 7:03 p.m. UTC
  Context is the first mlx5dr object created, all sub object:
table, matcher, rule, action are created using the context.
The context holds the capabilities and send queues used for
configuring the offloads to the HW.

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_context.c | 222 ++++++++++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_context.h |  40 +++++
 2 files changed, 262 insertions(+)
 create mode 100644 drivers/net/mlx5/hws/mlx5dr_context.c
 create mode 100644 drivers/net/mlx5/hws/mlx5dr_context.h
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr_context.c b/drivers/net/mlx5/hws/mlx5dr_context.c
new file mode 100644
index 0000000000..c0cc1bebc5
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_context.c
@@ -0,0 +1,222 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) Copyright (c) 2022 NVIDIA Corporation 2021 NVIDIA CORPORATION. All rights reserved. Affiliates
+ */
+
+#include "mlx5dr_internal.h"
+
+static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
+{
+	struct mlx5dr_pool_attr pool_attr = {0};
+	uint8_t max_log_sz;
+	int i;
+
+	if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
+		return rte_errno;
+
+	/* Create an STC pool per FT type */
+	pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
+	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
+	max_log_sz = RTE_MIN(MLX5DR_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
+	pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran);
+
+	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+		pool_attr.table_type = i;
+		ctx->stc_pool[i] = mlx5dr_pool_create(ctx, &pool_attr);
+		if (!ctx->stc_pool[i]) {
+			DR_LOG(ERR, "Failed to allocate STC pool [%d]" ,i);
+			goto free_stc_pools;
+		}
+	}
+
+	return 0;
+
+free_stc_pools:
+	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++)
+		if (ctx->stc_pool[i])
+			mlx5dr_pool_destroy(ctx->stc_pool[i]);
+
+	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
+
+	return rte_errno;
+}
+
+static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
+{
+	int i;
+
+	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
+
+	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
+		if (ctx->stc_pool[i])
+			mlx5dr_pool_destroy(ctx->stc_pool[i]);
+	}
+}
+
+static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
+				  struct ibv_pd *pd)
+{
+	struct mlx5dv_pd mlx5_pd = {0};
+	struct mlx5dv_obj obj;
+	int ret;
+
+	if (pd) {
+		ctx->pd = pd;
+	} else {
+		ctx->pd = mlx5_glue->alloc_pd(ctx->ibv_ctx);
+		if (!ctx->pd) {
+			DR_LOG(ERR, "Failed to allocate PD");
+			rte_errno = errno;
+			return rte_errno;
+		}
+		ctx->flags |= MLX5DR_CONTEXT_FLAG_PRIVATE_PD;
+	}
+
+	obj.pd.in = ctx->pd;
+	obj.pd.out = &mlx5_pd;
+
+	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
+	if (ret)
+		goto free_private_pd;
+
+	ctx->pd_num = mlx5_pd.pdn;
+
+	return 0;
+
+free_private_pd:
+	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
+		mlx5_glue->dealloc_pd(ctx->pd);
+
+	return ret;
+}
+
+static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
+{
+	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
+		return mlx5_glue->dealloc_pd(ctx->pd);
+
+	return 0;
+}
+
+static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
+{
+	struct mlx5dr_cmd_query_caps *caps = ctx->caps;
+
+	/* HWS not supported on device / FW */
+	if (!caps->wqe_based_update){
+		DR_LOG(INFO, "Required HWS WQE based insertion cap not supported");
+		return;
+	}
+
+	/* Current solution requires all rules to set reparse bit */
+	if ((!caps->nic_ft.reparse || !caps->fdb_ft.reparse) ||
+	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
+		DR_LOG(INFO, "Required HWS reparse cap not supported");
+		return;
+	}
+
+	/* FW/HW must support 8DW STE */
+	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+		DR_LOG(INFO, "Required HWS STE format not supported");
+		return;
+	}
+
+	/* All rules are add by hash */
+	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH)) {
+		DR_LOG(INFO, "Required HWS RTC index mode not supported");
+		return;
+	}
+
+	/* All rules are add by hash */
+	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
+		DR_LOG(INFO, "Required HWS Dynamic definer not supported");
+		return;
+	}
+
+	ctx->flags |= MLX5DR_CONTEXT_FLAG_HWS_SUPPORT;
+}
+
+static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx,
+				   struct mlx5dr_context_attr *attr)
+{
+	int ret;
+
+	mlx5dr_context_check_hws_supp(ctx);
+
+	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
+		return 0;
+
+	ret = mlx5dr_context_init_pd(ctx, attr->pd);
+	if (ret)
+		return ret;
+
+	ret = mlx5dr_context_pools_init(ctx);
+	if (ret)
+		goto uninit_pd;
+
+	ret = mlx5dr_send_queues_open(ctx, attr->queues, attr->queue_size);
+	if (ret)
+		goto pools_uninit;
+
+	return 0;
+
+pools_uninit:
+	mlx5dr_context_pools_uninit(ctx);
+uninit_pd:
+	mlx5dr_context_uninit_pd(ctx);
+	return ret;
+}
+
+static void mlx5dr_context_uninit_hws(struct mlx5dr_context *ctx)
+{
+	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
+		return;
+
+	mlx5dr_send_queues_close(ctx);
+	mlx5dr_context_pools_uninit(ctx);
+	mlx5dr_context_uninit_pd(ctx);
+}
+
+struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
+					   struct mlx5dr_context_attr *attr)
+{
+	struct mlx5dr_context *ctx;
+	int ret;
+
+	ctx = simple_calloc(1, sizeof(*ctx));
+	if (!ctx) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	ctx->ibv_ctx = ibv_ctx;
+	pthread_spin_init(&ctx->ctrl_lock, PTHREAD_PROCESS_PRIVATE);
+
+	ctx->caps = simple_calloc(1, sizeof(*ctx->caps));
+	if (!ctx->caps)
+		goto free_ctx;
+
+	ret = mlx5dr_cmd_query_caps(ibv_ctx, ctx->caps);
+	if (ret)
+		goto free_caps;
+
+	ret = mlx5dr_context_init_hws(ctx, attr);
+	if (ret)
+		goto free_caps;
+
+	return ctx;
+
+free_caps:
+	simple_free(ctx->caps);
+free_ctx:
+	simple_free(ctx);
+	return NULL;
+}
+
+int mlx5dr_context_close(struct mlx5dr_context *ctx)
+{
+	mlx5dr_context_uninit_hws(ctx);
+	simple_free(ctx->caps);
+	pthread_spin_destroy(&ctx->ctrl_lock);
+	simple_free(ctx);
+	return 0;
+}
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.h b/drivers/net/mlx5/hws/mlx5dr_context.h
new file mode 100644
index 0000000000..98146aaa6d
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_context.h
@@ -0,0 +1,40 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) Copyright (c) 2022 NVIDIA Corporation 2021 NVIDIA CORPORATION. All rights reserved. Affiliates
+ */
+
+#ifndef MLX5DR_CONTEXT_H_
+#define MLX5DR_CONTEXT_H_
+
+enum mlx5dr_context_flags {
+	MLX5DR_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
+	MLX5DR_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
+};
+
+enum mlx5dr_context_shared_stc_type {
+	MLX5DR_CONTEXT_SHARED_STC_DECAP = 0,
+	MLX5DR_CONTEXT_SHARED_STC_POP = 1,
+	MLX5DR_CONTEXT_SHARED_STC_MAX = 2,
+};
+
+struct mlx5dr_context_common_res {
+	struct mlx5dr_action_default_stc *default_stc;
+	struct mlx5dr_action_shared_stc *shared_stc[MLX5DR_CONTEXT_SHARED_STC_MAX];
+	struct mlx5dr_cmd_forward_tbl *default_miss;
+};
+
+struct mlx5dr_context {
+	struct ibv_context *ibv_ctx;
+	struct mlx5dr_cmd_query_caps *caps;
+	struct ibv_pd *pd;
+	uint32_t pd_num;
+	struct mlx5dr_pool *stc_pool[MLX5DR_TABLE_TYPE_MAX];
+	struct mlx5dr_context_common_res common_res[MLX5DR_TABLE_TYPE_MAX];
+	struct mlx5dr_pattern_cache *pattern_cache;
+	pthread_spinlock_t ctrl_lock;
+	enum mlx5dr_context_flags flags;
+	struct mlx5dr_send_engine *send_queue;
+	size_t queues;
+	LIST_HEAD(table_head, mlx5dr_table) head;
+};
+
+#endif