[v2,01/16] net/mlx5/hws: support synchronous drain

Message ID 20230201072815.1329101-2-valex@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5/hws: support range and partial hash matching |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-testing warning apply patch failure

Commit Message

Alex Vesker Feb. 1, 2023, 7:28 a.m. UTC
  Until now we supported asynchronous drain, triggering the queue
to start the drain, now we added support for synchronous which
assures all the work was processed on the queue.

This is useful when working over a FW command and HW queue in parallel
sending arguments over the HW queue and match over the FW command
which requires synchronization.

This also fixes an issue with shared arguments send that require more than
one WQE.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr.h         |  6 ++++--
 drivers/net/mlx5/hws/mlx5dr_pat_arg.c | 27 ++++-----------------------
 drivers/net/mlx5/hws/mlx5dr_send.c    | 16 ++++++++++++++--
 drivers/net/mlx5/hws/mlx5dr_send.h    |  5 +++++
 drivers/net/mlx5/mlx5_flow_hw.c       |  2 +-
 5 files changed, 28 insertions(+), 28 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index b3b2bf34f2..2b02884dc3 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -86,8 +86,10 @@  enum mlx5dr_match_template_flags {
 };
 
 enum mlx5dr_send_queue_actions {
-	/* Start executing all pending queued rules and write to HW */
-	MLX5DR_SEND_QUEUE_ACTION_DRAIN = 1 << 0,
+	/* Start executing all pending queued rules */
+	MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0,
+	/* Start executing all pending queued rules wait till completion */
+	MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1,
 };
 
 struct mlx5dr_context_attr {
diff --git a/drivers/net/mlx5/hws/mlx5dr_pat_arg.c b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c
index df451f1ae0..152025d302 100644
--- a/drivers/net/mlx5/hws/mlx5dr_pat_arg.c
+++ b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c
@@ -306,27 +306,6 @@  void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,
 	mlx5dr_send_engine_post_end(&ctrl, &send_attr);
 }
 
-static int
-mlx5dr_arg_poll_for_comp(struct mlx5dr_context *ctx, uint16_t queue_id)
-{
-	struct rte_flow_op_result comp[1];
-	int ret;
-
-	while (true) {
-		ret = mlx5dr_send_queue_poll(ctx, queue_id, comp, 1);
-		if (ret) {
-			if (ret < 0) {
-				DR_LOG(ERR, "Failed mlx5dr_send_queue_poll");
-			} else if (comp[0].status == RTE_FLOW_OP_ERROR) {
-				DR_LOG(ERR, "Got comp with error");
-				rte_errno = ENOENT;
-			}
-			break;
-		}
-	}
-	return (ret == 1 ? 0 : ret);
-}
-
 void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,
 		      void *comp_data,
 		      uint32_t arg_idx,
@@ -388,9 +367,11 @@  int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,
 	mlx5dr_send_engine_flush_queue(queue);
 
 	/* Poll for completion */
-	ret = mlx5dr_arg_poll_for_comp(ctx, ctx->queues - 1);
+	ret = mlx5dr_send_queue_action(ctx, ctx->queues - 1,
+				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
 	if (ret)
-		DR_LOG(ERR, "Failed to get completions for shared action");
+		DR_LOG(ERR, "Failed to drain arg queue");
 
 	pthread_spin_unlock(&ctx->ctrl_lock);
 
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index 5c8bbe6fc6..a507e5f626 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -830,18 +830,30 @@  int mlx5dr_send_queue_action(struct mlx5dr_context *ctx,
 {
 	struct mlx5dr_send_ring_sq *send_sq;
 	struct mlx5dr_send_engine *queue;
+	bool wait_comp = false;
+	int64_t polled = 0;
 
 	queue = &ctx->send_queue[queue_id];
 	send_sq = &queue->send_ring->send_sq;
 
-	if (actions == MLX5DR_SEND_QUEUE_ACTION_DRAIN) {
+	switch (actions) {
+	case MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC:
+		wait_comp = true;
+		/* FALLTHROUGH */
+	case MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC:
 		if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
 			/* Send dependent WQEs to drain the queue */
 			mlx5dr_send_all_dep_wqe(queue);
 		else
 			/* Signal on the last posted WQE */
 			mlx5dr_send_engine_flush_queue(queue);
-	} else {
+
+		/* Poll queue until empty */
+		while (wait_comp && !mlx5dr_send_engine_empty(queue))
+			mlx5dr_send_engine_poll_cqs(queue, NULL, &polled, 0);
+
+		break;
+	default:
 		rte_errno = -EINVAL;
 		return rte_errno;
 	}
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.h b/drivers/net/mlx5/hws/mlx5dr_send.h
index 8d4769495d..fcddcc6366 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.h
+++ b/drivers/net/mlx5/hws/mlx5dr_send.h
@@ -240,6 +240,11 @@  void mlx5dr_send_ste(struct mlx5dr_send_engine *queue,
 
 void mlx5dr_send_engine_flush_queue(struct mlx5dr_send_engine *queue);
 
+static inline bool mlx5dr_send_engine_empty(struct mlx5dr_send_engine *queue)
+{
+	return (queue->send_ring->send_sq.cur_post == queue->send_ring->send_cq.poll_wqe);
+}
+
 static inline bool mlx5dr_send_engine_full(struct mlx5dr_send_engine *queue)
 {
 	return queue->used_entries >= queue->th_entries;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 20c71ff7f0..7e87d589cb 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2851,7 +2851,7 @@  flow_hw_push(struct rte_eth_dev *dev,
 
 	__flow_hw_push_action(dev, queue);
 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
-				       MLX5DR_SEND_QUEUE_ACTION_DRAIN);
+				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
 	if (ret) {
 		rte_flow_error_set(error, rte_errno,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,