[v2] event/cnxk: wait for CPT flow control on WQE path

Message ID 20230119055315.915251-1-rbhansali@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [v2] event/cnxk: wait for CPT flow control on WQE path |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/intel-Testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-testing fail Testing issues
ci/iol-x86_64-unit-testing fail Testing issues
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-abi-testing success Testing PASS

Commit Message

Rahul Bhansali Jan. 19, 2023, 5:53 a.m. UTC
  This is to avoid CPT queue overflow and thereby a CPT misc
interrupt.

Fixes: 1a7da795f64 ("net/cnxk: support Tx security offload on cn9k")

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: Updated commit message.

 drivers/event/cnxk/cn9k_worker.h |  1 +
 drivers/net/cnxk/cn9k_tx.h       | 10 ++++++++++
 2 files changed, 11 insertions(+)

--
2.25.1
  

Comments

Jerin Jacob Jan. 24, 2023, 8:52 a.m. UTC | #1
On Thu, Jan 19, 2023 at 11:23 AM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> This is to avoid CPT queue overflow and thereby a CPT misc
> interrupt.
>
> Fixes: 1a7da795f64 ("net/cnxk: support Tx security offload on cn9k")
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>

Acked-by: Jerin Jacob <jerinj@marvell.com>


> ---
> Changes in v2: Updated commit message.
>
>  drivers/event/cnxk/cn9k_worker.h |  1 +
>  drivers/net/cnxk/cn9k_tx.h       | 10 ++++++++++
>  2 files changed, 11 insertions(+)
>
> diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
> index 54213db3b4..1ce4b044e8 100644
> --- a/drivers/event/cnxk/cn9k_worker.h
> +++ b/drivers/event/cnxk/cn9k_worker.h
> @@ -730,6 +730,7 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
>
>         rte_io_wmb();
>         cn9k_sso_txq_fc_wait(txq);
> +       cn9k_nix_sec_fc_wait_one(txq);
>
>         /* Write CPT instruction to lmt line */
>         vst1q_u64(lmt_addr, cmd01);
> diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
> index 17bbdce3a0..b4ef45d65c 100644
> --- a/drivers/net/cnxk/cn9k_tx.h
> +++ b/drivers/net/cnxk/cn9k_tx.h
> @@ -411,6 +411,16 @@ cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags)
>         roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
>  }
>
> +static __rte_always_inline void
> +cn9k_nix_sec_fc_wait_one(const struct cn9k_eth_txq *txq)
> +{
> +       uint64_t nb_desc = txq->cpt_desc;
> +       uint64_t *fc = txq->cpt_fc;
> +
> +       while (nb_desc <= __atomic_load_n(fc, __ATOMIC_RELAXED))
> +               ;
> +}
> +
>  static __rte_always_inline uint64_t
>  cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr)
>  {
> --
> 2.25.1
>
  
Jerin Jacob Jan. 24, 2023, 8:53 a.m. UTC | #2
On Tue, Jan 24, 2023 at 2:22 PM Jerin Jacob <jerinjacobk@gmail.com> wrote:
>
> On Thu, Jan 19, 2023 at 11:23 AM Rahul Bhansali <rbhansali@marvell.com> wrote:
> >
> > This is to avoid CPT queue overflow and thereby a CPT misc
> > interrupt.
> >
> > Fixes: 1a7da795f64 ("net/cnxk: support Tx security offload on cn9k")
> >
> > Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
>
> Acked-by: Jerin Jacob <jerinj@marvell.com>

Applied to dpdk-next-net-eventdev/for-main. Thanks


>
>
> > ---
> > Changes in v2: Updated commit message.
> >
> >  drivers/event/cnxk/cn9k_worker.h |  1 +
> >  drivers/net/cnxk/cn9k_tx.h       | 10 ++++++++++
> >  2 files changed, 11 insertions(+)
> >
> > diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
> > index 54213db3b4..1ce4b044e8 100644
> > --- a/drivers/event/cnxk/cn9k_worker.h
> > +++ b/drivers/event/cnxk/cn9k_worker.h
> > @@ -730,6 +730,7 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
> >
> >         rte_io_wmb();
> >         cn9k_sso_txq_fc_wait(txq);
> > +       cn9k_nix_sec_fc_wait_one(txq);
> >
> >         /* Write CPT instruction to lmt line */
> >         vst1q_u64(lmt_addr, cmd01);
> > diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
> > index 17bbdce3a0..b4ef45d65c 100644
> > --- a/drivers/net/cnxk/cn9k_tx.h
> > +++ b/drivers/net/cnxk/cn9k_tx.h
> > @@ -411,6 +411,16 @@ cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags)
> >         roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
> >  }
> >
> > +static __rte_always_inline void
> > +cn9k_nix_sec_fc_wait_one(const struct cn9k_eth_txq *txq)
> > +{
> > +       uint64_t nb_desc = txq->cpt_desc;
> > +       uint64_t *fc = txq->cpt_fc;
> > +
> > +       while (nb_desc <= __atomic_load_n(fc, __ATOMIC_RELAXED))
> > +               ;
> > +}
> > +
> >  static __rte_always_inline uint64_t
> >  cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr)
> >  {
> > --
> > 2.25.1
> >
  

Patch

diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 54213db3b4..1ce4b044e8 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -730,6 +730,7 @@  cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,

 	rte_io_wmb();
 	cn9k_sso_txq_fc_wait(txq);
+	cn9k_nix_sec_fc_wait_one(txq);

 	/* Write CPT instruction to lmt line */
 	vst1q_u64(lmt_addr, cmd01);
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index 17bbdce3a0..b4ef45d65c 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -411,6 +411,16 @@  cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags)
 	roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
 }

+static __rte_always_inline void
+cn9k_nix_sec_fc_wait_one(const struct cn9k_eth_txq *txq)
+{
+	uint64_t nb_desc = txq->cpt_desc;
+	uint64_t *fc = txq->cpt_fc;
+
+	while (nb_desc <= __atomic_load_n(fc, __ATOMIC_RELAXED))
+		;
+}
+
 static __rte_always_inline uint64_t
 cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr)
 {