[10/15] net/cnxk: aura handle for fastpath Rx queues

Message ID 20230303081013.589868-10-ndabilpuram@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series [01/15] net/cnxk: resolve sefgault caused during transmit completion |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Nithin Dabilpuram March 3, 2023, 8:10 a.m. UTC
  From: Rahul Bhansali <rbhansali@marvell.com>

Meta aura for RQs is created during queue enable process, so
aura handle for fastpath Rx queues should be updated after
this.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev.c | 33 ++++++++++++++++++++++++++-------
 1 file changed, 26 insertions(+), 7 deletions(-)
  

Patch

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index cb88bd2dc1..2dbca698af 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -283,7 +283,6 @@  cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			 struct rte_mempool *mp)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	struct cnxk_eth_rxq_sp *rxq_sp;
 	struct cn10k_eth_rxq *rxq;
 	struct roc_nix_rq *rq;
 	struct roc_nix_cq *cq;
@@ -335,17 +334,34 @@  cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		rxq->lmt_base = dev->nix.lmt_base;
 		rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
 							   dev->inb.inl_dev);
+	}
+
+	/* Lookup mem */
+	rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+	return 0;
+}
+
+static void
+cn10k_nix_rx_queue_meta_aura_update(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cnxk_eth_rxq_sp *rxq_sp;
+	struct cn10k_eth_rxq *rxq;
+	struct roc_nix_rq *rq;
+	int i;
+
+	/* Update Aura handle for fastpath rx queues */
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		rq = &dev->rqs[i];
+		rxq = eth_dev->data->rx_queues[i];
 		rxq->meta_aura = rq->meta_aura_handle;
-		rxq_sp = cnxk_eth_rxq_to_sp(rxq);
 		/* Assume meta packet from normal aura if meta aura is not setup
 		 */
-		if (!rxq->meta_aura)
+		if (!rxq->meta_aura) {
+			rxq_sp = cnxk_eth_rxq_to_sp(rxq);
 			rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
+		}
 	}
-
-	/* Lookup mem */
-	rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
-	return 0;
 }
 
 static int
@@ -557,6 +573,9 @@  cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
 	dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
 	dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
 
+	if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)
+		cn10k_nix_rx_queue_meta_aura_update(eth_dev);
+
 	cn10k_eth_set_tx_function(eth_dev);
 	cn10k_eth_set_rx_function(eth_dev);
 	return 0;