[v1,07/16] net/mlx5/hws: add send FW range STE WQE

Message ID 20230131093346.1261066-8-valex@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5/hws: support range and partial hash matching |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Alex Vesker Jan. 31, 2023, 9:33 a.m. UTC
  FW WQE supports complex rules, constructed from 2 STEs,
for example:
        Hash(DefinerA)
        SteMatch(DefinerB)
        SteRange(DefinerC)
        DefinerA is a subset of DefinerB

This complex rule is written using a single FW command which
has a single WQE control, STE match data0 and STE range data1.
FW manages STEs/ICM and coherency between deletion and creation.
It is possible to also pass the definer value as part of the
STE, this is not supported with current HW.

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_send.c | 19 +++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_send.h |  3 +++
 2 files changed, 22 insertions(+)
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index a9958df4f2..51aaf5c8e2 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -242,11 +242,15 @@  int mlx5dr_send_wqe_fw(struct ibv_context *ibv_ctx,
 		       struct mlx5dr_wqe_gta_ctrl_seg *send_wqe_ctrl,
 		       void *send_wqe_match_data,
 		       void *send_wqe_match_tag,
+		       void *send_wqe_range_data,
+		       void *send_wqe_range_tag,
 		       bool is_jumbo,
 		       uint8_t gta_opcode)
 {
+	bool has_range = send_wqe_range_data || send_wqe_range_tag;
 	bool has_match = send_wqe_match_data || send_wqe_match_tag;
 	struct mlx5dr_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+	struct mlx5dr_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
 	struct mlx5dr_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
 	struct mlx5dr_cmd_generate_wqe_attr attr = {0};
 	struct mlx5dr_wqe_ctrl_seg wqe_ctrl = {0};
@@ -278,6 +282,17 @@  int mlx5dr_send_wqe_fw(struct ibv_context *ibv_ctx,
 		attr.gta_data_0 = (uint8_t *)&gta_wqe_data0;
 	}
 
+	/* Set GTA range WQE DATA */
+	if (has_range) {
+		if (send_wqe_range_data)
+			memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
+		else
+			mlx5dr_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
+
+		gta_wqe_data1.rsvd1_definer = htobe32(send_attr->range_definer_id << 8);
+		attr.gta_data_1 = (uint8_t *)&gta_wqe_data1;
+	}
+
 	attr.pdn = pd_num;
 	attr.wqe_ctrl = (uint8_t *)&wqe_ctrl;
 	attr.gta_ctrl = (uint8_t *)&gta_wqe_ctrl;
@@ -336,6 +351,8 @@  void mlx5dr_send_stes_fw(struct mlx5dr_send_engine *queue,
 					 ste_attr->wqe_ctrl,
 					 ste_attr->wqe_data,
 					 ste_attr->wqe_tag,
+					 ste_attr->range_wqe_data,
+					 ste_attr->range_wqe_tag,
 					 ste_attr->wqe_tag_is_jumbo,
 					 ste_attr->gta_opcode);
 		if (ret)
@@ -350,6 +367,8 @@  void mlx5dr_send_stes_fw(struct mlx5dr_send_engine *queue,
 					 ste_attr->wqe_ctrl,
 					 ste_attr->wqe_data,
 					 ste_attr->wqe_tag,
+					 ste_attr->range_wqe_data,
+					 ste_attr->range_wqe_tag,
 					 ste_attr->wqe_tag_is_jumbo,
 					 ste_attr->gta_opcode);
 		if (ret)
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.h b/drivers/net/mlx5/hws/mlx5dr_send.h
index 1e845b1c7a..47bb66b3c7 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.h
+++ b/drivers/net/mlx5/hws/mlx5dr_send.h
@@ -161,6 +161,7 @@  struct mlx5dr_send_engine_post_attr {
 	uint8_t notify_hw;
 	uint8_t fence;
 	uint8_t match_definer_id;
+	uint8_t range_definer_id;
 	size_t len;
 	struct mlx5dr_rule *rule;
 	uint32_t id;
@@ -182,8 +183,10 @@  struct mlx5dr_send_ste_attr {
 	uint32_t direct_index;
 	struct mlx5dr_send_engine_post_attr send_attr;
 	struct mlx5dr_rule_match_tag *wqe_tag;
+	struct mlx5dr_rule_match_tag *range_wqe_tag;
 	struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
 	struct mlx5dr_wqe_gta_data_seg_ste *wqe_data;
+	struct mlx5dr_wqe_gta_data_seg_ste *range_wqe_data;
 };
 
 /**