[RFC,v2,4/9] net/gve: support queue release and stop for DQO

Message ID 20230130062642.3337239-5-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series gve PMD enhancement |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Junfeng Guo Jan. 30, 2023, 6:26 a.m. UTC
  Add support for queue operations:
 - gve_tx_queue_release_dqo
 - gve_rx_queue_release_dqo
 - gve_stop_tx_queues_dqo
 - gve_stop_rx_queues_dqo

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Jordan Kimbrough <jrkim@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c | 18 +++++++++---
 drivers/net/gve/gve_ethdev.h | 12 ++++++++
 drivers/net/gve/gve_rx.c     |  3 ++
 drivers/net/gve/gve_rx_dqo.c | 57 ++++++++++++++++++++++++++++++++++++
 drivers/net/gve/gve_tx.c     |  3 ++
 drivers/net/gve/gve_tx_dqo.c | 55 ++++++++++++++++++++++++++++++++++
 6 files changed, 144 insertions(+), 4 deletions(-)
  

Patch

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 3543378978..7c4be3a1cb 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -292,11 +292,19 @@  gve_dev_close(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
 	}
 
-	for (i = 0; i < dev->data->nb_tx_queues; i++)
-		gve_tx_queue_release(dev, i);
+	if (gve_is_gqi(priv)) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++)
+			gve_tx_queue_release(dev, i);
+
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			gve_rx_queue_release(dev, i);
+	} else {
+		for (i = 0; i < dev->data->nb_tx_queues; i++)
+			gve_tx_queue_release_dqo(dev, i);
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++)
-		gve_rx_queue_release(dev, i);
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			gve_rx_queue_release_dqo(dev, i);
+	}
 
 	gve_free_qpls(priv);
 	rte_free(priv->adminq);
@@ -408,6 +416,8 @@  gve_eth_dev_ops_override(struct eth_dev_ops *local_eth_dev_ops)
 	/* override eth_dev ops for DQO */
 	local_eth_dev_ops->tx_queue_setup = gve_tx_queue_setup_dqo;
 	local_eth_dev_ops->rx_queue_setup = gve_rx_queue_setup_dqo;
+	local_eth_dev_ops->tx_queue_release = gve_tx_queue_release_dqo;
+	local_eth_dev_ops->rx_queue_release = gve_rx_queue_release_dqo;
 }
 
 static void
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 0adfc90554..93314f2db3 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -353,4 +353,16 @@  gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 		       uint16_t nb_desc, unsigned int socket_id,
 		       const struct rte_eth_txconf *conf);
 
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
+
 #endif /* _GVE_ETHDEV_H_ */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 518c9d109c..9ba975c9b4 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -343,6 +343,9 @@  gve_stop_rx_queues(struct rte_eth_dev *dev)
 	uint16_t i;
 	int err;
 
+	if (!gve_is_gqi(hw))
+		return gve_stop_rx_queues_dqo(dev);
+
 	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
 	if (err != 0)
 		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index e8a6d575fc..aca6f8ea2d 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -5,6 +5,38 @@ 
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)
+{
+	uint16_t i;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		if (rxq->sw_ring[i]) {
+			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+			rxq->sw_ring[i] = NULL;
+		}
+	}
+
+	rxq->nb_avail = rxq->nb_rx_desc;
+}
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+	struct gve_rx_queue *q = dev->data->rx_queues[qid];
+
+	if (q == NULL)
+		return;
+
+	gve_release_rxq_mbufs_dqo(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->compl_ring_mz);
+	rte_memzone_free(q->mz);
+	rte_memzone_free(q->qres_mz);
+	q->qres = NULL;
+	rte_free(q);
+}
+
 static void
 gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
 {
@@ -54,6 +86,12 @@  gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	nb_desc = hw->rx_desc_cnt;
 
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_id]) {
+		gve_rx_queue_release_dqo(dev, queue_id);
+		dev->data->rx_queues[queue_id] = NULL;
+	}
+
 	/* Allocate the RX queue data structure. */
 	rxq = rte_zmalloc_socket("gve rxq",
 				 sizeof(struct gve_rx_queue),
@@ -146,3 +184,22 @@  gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	rte_free(rxq);
 	return err;
 }
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	struct gve_rx_queue *rxq;
+	uint16_t i;
+	int err;
+
+	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
+	if (err != 0)
+		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		gve_release_rxq_mbufs_dqo(rxq);
+		gve_reset_rxq_dqo(rxq);
+	}
+}
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index bf4e8fea2c..0eb42b1216 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -658,6 +658,9 @@  gve_stop_tx_queues(struct rte_eth_dev *dev)
 	uint16_t i;
 	int err;
 
+	if (!gve_is_gqi(hw))
+		return gve_stop_tx_queues_dqo(dev);
+
 	err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
 	if (err != 0)
 		PMD_DRV_LOG(WARNING, "failed to destroy txqs");
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 4f8bad31bb..e2e4153f27 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -5,6 +5,36 @@ 
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq)
+{
+	uint16_t i;
+
+	for (i = 0; i < txq->sw_size; i++) {
+		if (txq->sw_ring[i]) {
+			rte_pktmbuf_free_seg(txq->sw_ring[i]);
+			txq->sw_ring[i] = NULL;
+		}
+	}
+}
+
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+	struct gve_tx_queue *q = dev->data->tx_queues[qid];
+
+	if (q == NULL)
+		return;
+
+	gve_release_txq_mbufs_dqo(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_memzone_free(q->compl_ring_mz);
+	rte_memzone_free(q->qres_mz);
+	q->qres = NULL;
+	rte_free(q);
+}
+
 static int
 check_tx_thresh_dqo(uint16_t nb_desc, uint16_t tx_rs_thresh,
 		    uint16_t tx_free_thresh)
@@ -90,6 +120,12 @@  gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	nb_desc = hw->tx_desc_cnt;
 
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_id]) {
+		gve_tx_queue_release_dqo(dev, queue_id);
+		dev->data->tx_queues[queue_id] = NULL;
+	}
+
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("gve txq",
 				 sizeof(struct gve_tx_queue),
@@ -176,3 +212,22 @@  gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	rte_free(txq);
 	return err;
 }
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	struct gve_tx_queue *txq;
+	uint16_t i;
+	int err;
+
+	err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
+	if (err != 0)
+		PMD_DRV_LOG(WARNING, "failed to destroy txqs");
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		gve_release_txq_mbufs_dqo(txq);
+		gve_reset_txq_dqo(txq);
+	}
+}