[RFC,4/5] net/mlx5: add comprehensive send completion trace

Message ID 20230420100803.494-5-viacheslavo@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: introduce Tx datapath tracing |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Slava Ovsiienko April 20, 2023, 10:08 a.m. UTC
  There is the demand to trace the send completions of
every WQE if time scheduling is enabled.

The patch extends the size of completion queue and
requests completion on every issued WQE in the
send queue. As the result hardware provides CQE on
each completed WQE and driver is able to fetch
completion timestamp for dedicated operation.

The add code is under conditional compilation
RTE_ENABLE_TRACE_FP flag and does not impact the
release code.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_verbs.c |  8 +++-
 drivers/net/mlx5/mlx5_devx.c        |  8 +++-
 drivers/net/mlx5/mlx5_tx.h          | 63 +++++++++++++++++++++++++++--
 3 files changed, 71 insertions(+), 8 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 67a7bec22b..f3f717f17b 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -968,8 +968,12 @@  mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
-	cqe_n = desc / MLX5_TX_COMP_THRESH +
-		1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
+	if (__rte_trace_point_fp_is_enabled() &&
+	    txq_data->offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP)
+		cqe_n = UINT16_MAX / 2 - 1;
+	else
+		cqe_n = desc / MLX5_TX_COMP_THRESH +
+			1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
 	txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n,
 					   NULL, NULL, 0);
 	if (txq_obj->cq == NULL) {
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 4369d2557e..5082a7e178 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -1465,8 +1465,12 @@  mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	MLX5_ASSERT(ppriv);
 	txq_obj->txq_ctrl = txq_ctrl;
 	txq_obj->dev = dev;
-	cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
-		1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
+	if (__rte_trace_point_fp_is_enabled() &&
+	    txq_data->offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP)
+		cqe_n = UINT16_MAX / 2 - 1;
+	else
+		cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
+			1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
 	log_desc_n = log2above(cqe_n);
 	cqe_n = 1UL << log_desc_n;
 	if (cqe_n > UINT16_MAX) {
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 7f624de58e..9f29df280f 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -728,6 +728,54 @@  mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
 	}
 }
 
+/**
+ * Set completion request flag for all issued WQEs.
+ * This routine is intended to be used with enabled fast path tracing
+ * and send scheduling on time to provide the detailed report in trace
+ * for send completions on every WQE.
+ *
+ * @param txq
+ *   Pointer to TX queue structure.
+ * @param loc
+ *   Pointer to burst routine local context.
+ * @param olx
+ *   Configured Tx offloads mask. It is fully defined at
+ *   compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_request_completion_trace(struct mlx5_txq_data *__rte_restrict txq,
+				 struct mlx5_txq_local *__rte_restrict loc,
+				 unsigned int olx)
+{
+	uint16_t head = txq->elts_comp;
+
+	while (txq->wqe_comp != txq->wqe_ci) {
+		volatile struct mlx5_wqe *wqe;
+		uint32_t wqe_n;
+
+		MLX5_ASSERT(loc->wqe_last);
+		wqe = txq->wqes + (txq->wqe_comp & txq->wqe_m);
+		if (wqe == loc->wqe_last) {
+			head = txq->elts_head;
+			head +=	MLX5_TXOFF_CONFIG(INLINE) ?
+				0 : loc->pkts_sent - loc->pkts_copy;
+			txq->elts_comp = head;
+		}
+		/* Completion request flag was set on cseg constructing. */
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+		txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
+			  (wqe->cseg.opcode >> 8) << 16;
+#else
+		txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
+#endif
+		/* A CQE slot must always be available. */
+		MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
+		/* Advance to the next WQE in the queue. */
+		wqe_n = rte_be_to_cpu_32(wqe->cseg.sq_ds) & 0x3F;
+		txq->wqe_comp += RTE_ALIGN(wqe_n, 4) / 4;
+	}
+}
+
 /**
  * Build the Control Segment with specified opcode:
  * - MLX5_OPCODE_SEND
@@ -754,7 +802,7 @@  mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
 		  struct mlx5_wqe *__rte_restrict wqe,
 		  unsigned int ds,
 		  unsigned int opcode,
-		  unsigned int olx __rte_unused)
+		  unsigned int olx)
 {
 	struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
 
@@ -763,8 +811,12 @@  mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
 		opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
 	cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
 	cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
-	cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
-			     MLX5_COMP_MODE_OFFSET);
+	if (MLX5_TXOFF_CONFIG(TXPP) && __rte_trace_point_fp_is_enabled())
+		cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+				     MLX5_COMP_MODE_OFFSET);
+	else
+		cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
+				     MLX5_COMP_MODE_OFFSET);
 	cs->misc = RTE_BE32(0);
 	if (__rte_trace_point_fp_is_enabled() && !loc->pkts_sent)
 		rte_pmd_mlx5_trace_tx_entry(txq->port_id, txq->idx);
@@ -3662,7 +3714,10 @@  mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
 	if (unlikely(loc.pkts_sent == loc.pkts_loop))
 		goto burst_exit;
 	/* Request CQE generation if limits are reached. */
-	mlx5_tx_request_completion(txq, &loc, olx);
+	if (MLX5_TXOFF_CONFIG(TXPP) && __rte_trace_point_fp_is_enabled())
+		mlx5_tx_request_completion_trace(txq, &loc, olx);
+	else
+		mlx5_tx_request_completion(txq, &loc, olx);
 	/*
 	 * Ring QP doorbell immediately after WQE building completion
 	 * to improve latencies. The pure software related data treatment