[v1,05/16] net/mlx5/hws: align RTC create command with PRM format

Message ID 20230131093346.1261066-6-valex@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5/hws: support range and partial hash matching |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Alex Vesker Jan. 31, 2023, 9:33 a.m. UTC
  Rename rtc params create for new format.

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/common/mlx5/mlx5_prm.h        | 16 ++++++++++------
 drivers/net/mlx5/hws/mlx5dr_cmd.c     | 13 +++++++++++--
 drivers/net/mlx5/hws/mlx5dr_cmd.h     | 11 +++++++----
 drivers/net/mlx5/hws/mlx5dr_matcher.c | 19 ++++++++++++-------
 4 files changed, 40 insertions(+), 19 deletions(-)
  

Patch

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6d0b5e640c..cf46296afb 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -3237,6 +3237,7 @@  enum mlx5_ifc_rtc_access_mode {
 enum mlx5_ifc_rtc_ste_format {
 	MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
 	MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
+	MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
 };
 
 enum mlx5_ifc_rtc_reparse_mode {
@@ -3251,24 +3252,27 @@  struct mlx5_ifc_rtc_bits {
 	u8 reserved_at_40[0x40];
 	u8 update_index_mode[0x2];
 	u8 reparse_mode[0x2];
-	u8 reserved_at_84[0x4];
+	u8 num_match_ste[0x4];
 	u8 pd[0x18];
 	u8 reserved_at_a0[0x9];
 	u8 access_index_mode[0x3];
 	u8 num_hash_definer[0x4];
-	u8 reserved_at_b0[0x3];
+	u8 update_method[0x1];
+	u8 reserved_at_b1[0x2];
 	u8 log_depth[0x5];
 	u8 log_hash_size[0x8];
-	u8 ste_format[0x8];
+	u8 ste_format_0[0x8];
 	u8 table_type[0x8];
-	u8 reserved_at_d0[0x10];
-	u8 match_definer_id[0x20];
+	u8 ste_format_1[0x8];
+	u8 reserved_at_d8[0x8];
+	u8 match_definer_0[0x20];
 	u8 stc_id[0x20];
 	u8 ste_table_base_id[0x20];
 	u8 ste_table_offset[0x20];
 	u8 reserved_at_160[0x8];
 	u8 miss_flow_table_id[0x18];
-	u8 reserved_at_180[0x280];
+	u8 match_definer_1[0x20];
+	u8 reserved_at_1a0[0x260];
 };
 
 struct mlx5_ifc_alias_context_bits {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index e311be780b..a8d1cf0322 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -259,17 +259,26 @@  mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC);
 
 	attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
-	MLX5_SET(rtc, attr, ste_format, rtc_attr->is_jumbo ?
+	MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
 		MLX5_IFC_RTC_STE_FORMAT_11DW :
 		MLX5_IFC_RTC_STE_FORMAT_8DW);
+
+	if (rtc_attr->is_scnd_range) {
+		MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
+		MLX5_SET(rtc, attr, num_match_ste, 2);
+	}
+
 	MLX5_SET(rtc, attr, pd, rtc_attr->pd);
+	MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
 	MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
 	MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
 	MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
 	MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
 	MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
-	MLX5_SET(rtc, attr, match_definer_id, rtc_attr->definer_id);
+	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+	MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
+	MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
 	MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
 	MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
 	MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index a42218ba74..e062cb8171 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -23,8 +23,8 @@  struct mlx5dr_cmd_ft_modify_attr {
 };
 
 struct mlx5dr_cmd_fg_attr {
-	uint32_t	table_id;
-	uint32_t	table_type;
+	uint32_t table_id;
+	uint32_t table_type;
 };
 
 struct mlx5dr_cmd_forward_tbl {
@@ -40,14 +40,17 @@  struct mlx5dr_cmd_rtc_create_attr {
 	uint32_t ste_base;
 	uint32_t ste_offset;
 	uint32_t miss_ft_id;
+	bool fw_gen_wqe;
 	uint8_t update_index_mode;
 	uint8_t access_index_mode;
 	uint8_t num_hash_definer;
 	uint8_t log_depth;
 	uint8_t log_size;
 	uint8_t table_type;
-	uint8_t definer_id;
-	bool is_jumbo;
+	uint8_t match_definer_0;
+	uint8_t match_definer_1;
+	bool is_frst_jumbo;
+	bool is_scnd_range;
 };
 
 struct mlx5dr_cmd_alias_obj_create_attr {
diff --git a/drivers/net/mlx5/hws/mlx5dr_matcher.c b/drivers/net/mlx5/hws/mlx5dr_matcher.c
index 913bb9d447..101a12d361 100644
--- a/drivers/net/mlx5/hws/mlx5dr_matcher.c
+++ b/drivers/net/mlx5/hws/mlx5dr_matcher.c
@@ -413,6 +413,8 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 	struct mlx5dr_pool *ste_pool, *stc_pool;
 	struct mlx5dr_devx_obj *devx_obj;
 	struct mlx5dr_pool_chunk *ste;
+	uint8_t first_definer_id;
+	bool is_jumbo;
 	int ret;
 
 	switch (rtc_type) {
@@ -426,12 +428,15 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 		rtc_attr.log_depth = attr->table.sz_col_log;
 		rtc_attr.miss_ft_id = matcher->end_ft->id;
 
+		is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt->definer);
+		first_definer_id = mlx5dr_definer_get_id(matcher->mt->definer);
+
 		if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_HASH) {
 			/* The usual Hash Table */
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
 			/* The first match template is used since all share the same definer */
-			rtc_attr.definer_id = mlx5dr_definer_get_id(matcher->mt->definer);
-			rtc_attr.is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt->definer);
+			rtc_attr.match_definer_0 = first_definer_id;
+			rtc_attr.is_frst_jumbo = is_jumbo;
 		} else if (attr->insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
 			rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
 			rtc_attr.num_hash_definer = 1;
@@ -439,12 +444,12 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 			if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_HASH) {
 				/* Hash Split Table */
 				rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
-				rtc_attr.definer_id = mlx5dr_definer_get_id(matcher->mt->definer);
-				rtc_attr.is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt->definer);
+				rtc_attr.match_definer_0 = first_definer_id;
+				rtc_attr.is_frst_jumbo = is_jumbo;
 			} else if (attr->distribute_mode == MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR) {
 				/* Linear Lookup Table */
 				rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
-				rtc_attr.definer_id = ctx->caps->linear_match_definer;
+				rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
 			}
 		}
 
@@ -468,8 +473,8 @@  static int mlx5dr_matcher_create_rtc(struct mlx5dr_matcher *matcher,
 		rtc_attr.log_depth = 0;
 		rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
 		/* The action STEs use the default always hit definer */
-		rtc_attr.definer_id = ctx->caps->trivial_match_definer;
-		rtc_attr.is_jumbo = false;
+		rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+		rtc_attr.is_frst_jumbo = false;
 		rtc_attr.miss_ft_id = 0;
 		break;