[v1,3/3] net/cnxk: add debug check for number of Tx descriptors

Message ID 20221117072558.3582292-3-asekhar@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [v1,1/3] net/cnxk: rework no-fast-free offload handling |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/intel-Testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/github-robot: build success github build: passed
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS

Commit Message

Ashwin Sekhar T K Nov. 17, 2022, 7:25 a.m. UTC
  When SG2 descriptors are used and more than 5 segments
are present, in certain combination of segments the
number of descriptors required will be greater than
16.

In debug builds, add an assert to capture this scenario.

Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)
  

Comments

Jerin Jacob Jan. 6, 2023, 8:58 a.m. UTC | #1
On Thu, Nov 17, 2022 at 12:56 PM Ashwin Sekhar T K <asekhar@marvell.com> wrote:
>
> When SG2 descriptors are used and more than 5 segments
> are present, in certain combination of segments the
> number of descriptors required will be greater than
> 16.
>
> In debug builds, add an assert to capture this scenario.
>
> Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>


Series applied to dpdk-next-net-mrvl/for-next-net. Thanks


> ---
>  drivers/net/cnxk/cn10k_tx.h | 20 ++++++++++++++++++++
>  1 file changed, 20 insertions(+)
>
> diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
> index 3f08a8a473..09c332b2b5 100644
> --- a/drivers/net/cnxk/cn10k_tx.h
> +++ b/drivers/net/cnxk/cn10k_tx.h
> @@ -84,6 +84,22 @@ cn10k_nix_mbuf_sg_dwords(struct rte_mbuf *m)
>         return (segw + 1) / 2;
>  }
>
> +static __plt_always_inline void
> +cn10k_nix_tx_mbuf_validate(struct rte_mbuf *m, const uint32_t flags)
> +{
> +#ifdef RTE_LIBRTE_MBUF_DEBUG
> +       uint16_t segdw;
> +
> +       segdw = cn10k_nix_mbuf_sg_dwords(m);
> +       segdw += 1 + !!(flags & NIX_TX_NEED_EXT_HDR) + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
> +
> +       PLT_ASSERT(segdw <= 8);
> +#else
> +       RTE_SET_USED(m);
> +       RTE_SET_USED(flags);
> +#endif
> +}
> +
>  static __plt_always_inline void
>  cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
>  {
> @@ -1307,6 +1323,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
>         }
>
>         for (i = 0; i < burst; i++) {
> +               cn10k_nix_tx_mbuf_validate(tx_pkts[i], flags);
> +
>                 /* Perform header writes for TSO, barrier at
>                  * lmt steorl will suffice.
>                  */
> @@ -1906,6 +1924,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
>                         for (j = 0; j < NIX_DESCS_PER_LOOP; j++) {
>                                 struct rte_mbuf *m = tx_pkts[j];
>
> +                               cn10k_nix_tx_mbuf_validate(m, flags);
> +
>                                 /* Get dwords based on nb_segs. */
>                                 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F &&
>                                       flags & NIX_TX_MULTI_SEG_F))
> --
> 2.25.1
>
  

Patch

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 3f08a8a473..09c332b2b5 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -84,6 +84,22 @@  cn10k_nix_mbuf_sg_dwords(struct rte_mbuf *m)
 	return (segw + 1) / 2;
 }
 
+static __plt_always_inline void
+cn10k_nix_tx_mbuf_validate(struct rte_mbuf *m, const uint32_t flags)
+{
+#ifdef RTE_LIBRTE_MBUF_DEBUG
+	uint16_t segdw;
+
+	segdw = cn10k_nix_mbuf_sg_dwords(m);
+	segdw += 1 + !!(flags & NIX_TX_NEED_EXT_HDR) + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
+
+	PLT_ASSERT(segdw <= 8);
+#else
+	RTE_SET_USED(m);
+	RTE_SET_USED(flags);
+#endif
+}
+
 static __plt_always_inline void
 cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
 {
@@ -1307,6 +1323,8 @@  cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 	}
 
 	for (i = 0; i < burst; i++) {
+		cn10k_nix_tx_mbuf_validate(tx_pkts[i], flags);
+
 		/* Perform header writes for TSO, barrier at
 		 * lmt steorl will suffice.
 		 */
@@ -1906,6 +1924,8 @@  cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 			for (j = 0; j < NIX_DESCS_PER_LOOP; j++) {
 				struct rte_mbuf *m = tx_pkts[j];
 
+				cn10k_nix_tx_mbuf_validate(m, flags);
+
 				/* Get dwords based on nb_segs. */
 				if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F &&
 				      flags & NIX_TX_MULTI_SEG_F))