From patchwork Tue May 9 03:07:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 126779 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EDD9942A8C; Tue, 9 May 2023 05:07:51 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 80C13410D7; Tue, 9 May 2023 05:07:51 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id E5E4C410D0 for ; Tue, 9 May 2023 05:07:48 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1683601669; x=1715137669; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=AuD0+ziV1zCZNVg00xQz8hKT9t3HcoBuX3HRECibNhA=; b=NtsMJKCOH9KcdmxYjBx3jcV4LH+CrWrH9ttBPH60+bG/8nbDVWJRXp2A fod4GacP7j6ZgkM7nuEhk8FkOI8DO6+irJ6hrjIlGUKBBdwh+T32AneVp GcnCPV7lcWyqpZ2rfzrMEBcWdcexK6HIRlskL1YB+I724vCtY/a5oWZkL T4Q/4PYcqTST5fj44vYg8UuQMoal4e6tidVL8XbsN7UELb7sM+jy5ysU9 Cqy7/qdjGv6347rOzfw3VDFLSs/meEgfEZuMl1O6opTBSNxIZsYJezbZR HRXGNEQ7FDfKfhAGWa4QnPjvCuIPZCz8KJWrQcvMu9ERLeMrC+JA2yCNJ Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10704"; a="334249846" X-IronPort-AV: E=Sophos;i="5.99,259,1677571200"; d="scan'208";a="334249846" Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 May 2023 20:07:47 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10704"; a="768286131" X-IronPort-AV: E=Sophos;i="5.99,259,1677571200"; d="scan'208";a="768286131" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.149]) by fmsmga004.fm.intel.com with ESMTP; 08 May 2023 20:07:44 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, ferruh.yigit@amd.com, rushilg@google.com Cc: dev@dpdk.org, joshwash@google.com, jeroendb@google.com, Junfeng Guo Subject: [PATCH] net/gve: support queue start and stop operations Date: Tue, 9 May 2023 11:07:29 +0800 Message-Id: <20230509030729.2680451-1-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for queue operations for GQI: - gve_rx_queue_start - gve_tx_queue_start - gve_rx_queue_stop - gve_tx_queue_stop Add support for queue operations for DQO: - gve_rx_queue_start_dqo - gve_tx_queue_start_dqo - gve_rx_queue_stop_dqo - gve_tx_queue_stop_dqo Also move the funcs of rxq_mbufs_alloc into the corresponding files. Signed-off-by: Junfeng Guo tested-by: Rushil Gupta --- drivers/net/gve/gve_ethdev.c | 166 +++++++++++------------------------ drivers/net/gve/gve_ethdev.h | 36 ++++++++ drivers/net/gve/gve_rx.c | 96 ++++++++++++++++++-- drivers/net/gve/gve_rx_dqo.c | 97 ++++++++++++++++++-- drivers/net/gve/gve_tx.c | 54 ++++++++++-- drivers/net/gve/gve_tx_dqo.c | 54 ++++++++++-- 6 files changed, 364 insertions(+), 139 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index 8b6861a24f..1dcb3b3a01 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -104,81 +104,6 @@ gve_dev_configure(struct rte_eth_dev *dev) return 0; } -static int -gve_refill_pages(struct gve_rx_queue *rxq) -{ - struct rte_mbuf *nmb; - uint16_t i; - int diag; - - diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); - if (diag < 0) { - for (i = 0; i < rxq->nb_rx_desc - 1; i++) { - nmb = rte_pktmbuf_alloc(rxq->mpool); - if (!nmb) - break; - rxq->sw_ring[i] = nmb; - } - if (i < rxq->nb_rx_desc - 1) - return -ENOMEM; - } - rxq->nb_avail = 0; - rxq->next_avail = rxq->nb_rx_desc - 1; - - for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->is_gqi_qpl) { - rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * PAGE_SIZE); - } else { - if (i == rxq->nb_rx_desc - 1) - break; - nmb = rxq->sw_ring[i]; - rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(rte_mbuf_data_iova(nmb)); - } - } - - rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail); - - return 0; -} - -static int -gve_refill_dqo(struct gve_rx_queue *rxq) -{ - struct rte_mbuf *nmb; - uint16_t i; - int diag; - - diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); - if (diag < 0) { - rxq->stats.no_mbufs_bulk++; - for (i = 0; i < rxq->nb_rx_desc - 1; i++) { - nmb = rte_pktmbuf_alloc(rxq->mpool); - if (!nmb) - break; - rxq->sw_ring[i] = nmb; - } - if (i < rxq->nb_rx_desc - 1) { - rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i; - return -ENOMEM; - } - } - - for (i = 0; i < rxq->nb_rx_desc; i++) { - if (i == rxq->nb_rx_desc - 1) - break; - nmb = rxq->sw_ring[i]; - rxq->rx_ring[i].buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); - rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i); - } - - rxq->nb_rx_hold = 0; - rxq->bufq_tail = rxq->nb_rx_desc - 1; - - rte_write32(rxq->bufq_tail, rxq->qrx_tail); - - return 0; -} - static int gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { @@ -208,65 +133,68 @@ gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) } static int -gve_dev_start(struct rte_eth_dev *dev) +gve_start_queues(struct rte_eth_dev *dev) { - uint16_t num_queues = dev->data->nb_tx_queues; struct gve_priv *priv = dev->data->dev_private; - struct gve_tx_queue *txq; - struct gve_rx_queue *rxq; + uint16_t num_queues; uint16_t i; - int err; + int ret; + num_queues = dev->data->nb_tx_queues; priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues; - err = gve_adminq_create_tx_queues(priv, num_queues); - if (err) { - PMD_DRV_LOG(ERR, "failed to create %u tx queues.", num_queues); - return err; - } - for (i = 0; i < num_queues; i++) { - txq = priv->txqs[i]; - txq->qtx_tail = - &priv->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)]; - txq->qtx_head = - &priv->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)]; - - rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); - } + ret = gve_adminq_create_tx_queues(priv, num_queues); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues); + return ret; + } + for (i = 0; i < num_queues; i++) + if (gve_tx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i); + goto err_tx; + } num_queues = dev->data->nb_rx_queues; priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues; - err = gve_adminq_create_rx_queues(priv, num_queues); - if (err) { - PMD_DRV_LOG(ERR, "failed to create %u rx queues.", num_queues); + ret = gve_adminq_create_rx_queues(priv, num_queues); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues); goto err_tx; } for (i = 0; i < num_queues; i++) { - rxq = priv->rxqs[i]; - rxq->qrx_tail = - &priv->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)]; - - rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr); - if (gve_is_gqi(priv)) - err = gve_refill_pages(rxq); + ret = gve_rx_queue_start(dev, i); else - err = gve_refill_dqo(rxq); - if (err) { - PMD_DRV_LOG(ERR, "Failed to refill for RX"); + ret = gve_rx_queue_start_dqo(dev, i); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i); goto err_rx; } } - dev->data->dev_started = 1; - gve_link_update(dev, 0); - return 0; err_rx: gve_stop_rx_queues(dev); err_tx: gve_stop_tx_queues(dev); - return err; + return ret; +} + +static int +gve_dev_start(struct rte_eth_dev *dev) +{ + int ret; + + ret = gve_start_queues(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to start queues"); + return ret; + } + + dev->data->dev_started = 1; + gve_link_update(dev, 0); + + return 0; } static int @@ -573,6 +501,10 @@ static const struct eth_dev_ops gve_eth_dev_ops = { .tx_queue_setup = gve_tx_queue_setup, .rx_queue_release = gve_rx_queue_release, .tx_queue_release = gve_tx_queue_release, + .rx_queue_start = gve_rx_queue_start, + .tx_queue_start = gve_tx_queue_start, + .rx_queue_stop = gve_rx_queue_stop, + .tx_queue_stop = gve_tx_queue_stop, .link_update = gve_link_update, .stats_get = gve_dev_stats_get, .stats_reset = gve_dev_stats_reset, @@ -591,6 +523,10 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = { .tx_queue_setup = gve_tx_queue_setup_dqo, .rx_queue_release = gve_rx_queue_release_dqo, .tx_queue_release = gve_tx_queue_release_dqo, + .rx_queue_start = gve_rx_queue_start_dqo, + .tx_queue_start = gve_tx_queue_start_dqo, + .rx_queue_stop = gve_rx_queue_stop_dqo, + .tx_queue_stop = gve_tx_queue_stop_dqo, .link_update = gve_link_update, .stats_get = gve_dev_stats_get, .stats_reset = gve_dev_stats_reset, @@ -877,12 +813,12 @@ gve_dev_init(struct rte_eth_dev *eth_dev) if (gve_is_gqi(priv)) { eth_dev->dev_ops = &gve_eth_dev_ops; - eth_dev->rx_pkt_burst = gve_rx_burst; - eth_dev->tx_pkt_burst = gve_tx_burst; + gve_set_rx_function(eth_dev); + gve_set_tx_function(eth_dev); } else { eth_dev->dev_ops = &gve_eth_dev_ops_dqo; - eth_dev->rx_pkt_burst = gve_rx_burst_dqo; - eth_dev->tx_pkt_burst = gve_tx_burst_dqo; + gve_set_rx_function_dqo(eth_dev); + gve_set_tx_function_dqo(eth_dev); } eth_dev->data->mac_addrs = &priv->dev_addr; diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 53a75044c5..cd62debd22 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -367,6 +367,18 @@ gve_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); void gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); +int +gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int +gve_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int +gve_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int +gve_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + void gve_stop_tx_queues(struct rte_eth_dev *dev); @@ -379,6 +391,12 @@ gve_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +void +gve_set_rx_function(struct rte_eth_dev *dev); + +void +gve_set_tx_function(struct rte_eth_dev *dev); + /* Below functions are used for DQO */ int @@ -397,6 +415,18 @@ gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid); void gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid); +int +gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int +gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int +gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int +gve_tx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id); + void gve_stop_tx_queues_dqo(struct rte_eth_dev *dev); @@ -409,4 +439,10 @@ gve_rx_burst_dqo(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +void +gve_set_rx_function_dqo(struct rte_eth_dev *dev); + +void +gve_set_tx_function_dqo(struct rte_eth_dev *dev); + #endif /* _GVE_ETHDEV_H_ */ diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index f2f6202404..b8c92ccda0 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -414,11 +414,91 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, return err; } +static int +gve_rxq_mbufs_alloc(struct gve_rx_queue *rxq) +{ + struct rte_mbuf *nmb; + uint16_t i; + int diag; + + diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); + if (diag < 0) { + for (i = 0; i < rxq->nb_rx_desc - 1; i++) { + nmb = rte_pktmbuf_alloc(rxq->mpool); + if (!nmb) + break; + rxq->sw_ring[i] = nmb; + } + if (i < rxq->nb_rx_desc - 1) + return -ENOMEM; + } + rxq->nb_avail = 0; + rxq->next_avail = rxq->nb_rx_desc - 1; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->is_gqi_qpl) { + rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * PAGE_SIZE); + } else { + if (i == rxq->nb_rx_desc - 1) + break; + nmb = rxq->sw_ring[i]; + rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(rte_mbuf_data_iova(nmb)); + } + } + + rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail); + + return 0; +} + +int +gve_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct gve_priv *hw = dev->data->dev_private; + struct gve_rx_queue *rxq; + int ret; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + + rxq->qrx_tail = &hw->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)]; + + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr); + + ret = gve_rxq_mbufs_alloc(rxq); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to alloc Rx queue mbuf"); + return ret; + } + + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int +gve_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct gve_rx_queue *rxq; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + gve_release_rxq_mbufs(rxq); + gve_reset_rxq(rxq); + + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + void gve_stop_rx_queues(struct rte_eth_dev *dev) { struct gve_priv *hw = dev->data->dev_private; - struct gve_rx_queue *rxq; uint16_t i; int err; @@ -429,9 +509,13 @@ gve_stop_rx_queues(struct rte_eth_dev *dev) if (err != 0) PMD_DRV_LOG(WARNING, "failed to destroy rxqs"); - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - gve_release_rxq_mbufs(rxq); - gve_reset_rxq(rxq); - } + for (i = 0; i < dev->data->nb_rx_queues; i++) + if (gve_rx_queue_stop(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i); +} + +void +gve_set_rx_function(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = gve_rx_burst; } diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index 1d6b21359c..236aefd2a8 100644 --- a/drivers/net/gve/gve_rx_dqo.c +++ b/drivers/net/gve/gve_rx_dqo.c @@ -333,11 +333,92 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, return err; } +static int +gve_rxq_mbufs_alloc_dqo(struct gve_rx_queue *rxq) +{ + struct rte_mbuf *nmb; + uint16_t i; + int diag; + + diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); + if (diag < 0) { + rxq->stats.no_mbufs_bulk++; + for (i = 0; i < rxq->nb_rx_desc - 1; i++) { + nmb = rte_pktmbuf_alloc(rxq->mpool); + if (!nmb) + break; + rxq->sw_ring[i] = nmb; + } + if (i < rxq->nb_rx_desc - 1) { + rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i; + return -ENOMEM; + } + } + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (i == rxq->nb_rx_desc - 1) + break; + nmb = rxq->sw_ring[i]; + rxq->rx_ring[i].buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i); + } + + rxq->nb_rx_hold = 0; + rxq->bufq_tail = rxq->nb_rx_desc - 1; + + rte_write32(rxq->bufq_tail, rxq->qrx_tail); + + return 0; +} + +int +gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct gve_priv *hw = dev->data->dev_private; + struct gve_rx_queue *rxq; + int ret; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + + rxq->qrx_tail = &hw->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)]; + + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr); + + ret = gve_rxq_mbufs_alloc_dqo(rxq); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to alloc Rx queue mbuf"); + return ret; + } + + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int +gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct gve_rx_queue *rxq; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + gve_release_rxq_mbufs_dqo(rxq); + gve_reset_rxq_dqo(rxq); + + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + void gve_stop_rx_queues_dqo(struct rte_eth_dev *dev) { struct gve_priv *hw = dev->data->dev_private; - struct gve_rx_queue *rxq; uint16_t i; int err; @@ -345,9 +426,13 @@ gve_stop_rx_queues_dqo(struct rte_eth_dev *dev) if (err != 0) PMD_DRV_LOG(WARNING, "failed to destroy rxqs"); - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - gve_release_rxq_mbufs_dqo(rxq); - gve_reset_rxq_dqo(rxq); - } + for (i = 0; i < dev->data->nb_rx_queues; i++) + if (gve_rx_queue_stop_dqo(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i); +} + +void +gve_set_rx_function_dqo(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = gve_rx_burst_dqo; } diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c index 13dc807623..2e0d001109 100644 --- a/drivers/net/gve/gve_tx.c +++ b/drivers/net/gve/gve_tx.c @@ -664,11 +664,49 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, return err; } +int +gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct gve_priv *hw = dev->data->dev_private; + struct gve_tx_queue *txq; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + + txq->qtx_tail = &hw->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)]; + txq->qtx_head = + &hw->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)]; + + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); + + dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int +gve_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct gve_tx_queue *txq; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + gve_release_txq_mbufs(txq); + gve_reset_txq(txq); + + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + void gve_stop_tx_queues(struct rte_eth_dev *dev) { struct gve_priv *hw = dev->data->dev_private; - struct gve_tx_queue *txq; uint16_t i; int err; @@ -679,9 +717,13 @@ gve_stop_tx_queues(struct rte_eth_dev *dev) if (err != 0) PMD_DRV_LOG(WARNING, "failed to destroy txqs"); - for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - gve_release_txq_mbufs(txq); - gve_reset_txq(txq); - } + for (i = 0; i < dev->data->nb_tx_queues; i++) + if (gve_tx_queue_stop(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i); +} + +void +gve_set_tx_function(struct rte_eth_dev *dev) +{ + dev->tx_pkt_burst = gve_tx_burst; } diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index b38eeaea4b..e0d144835b 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -373,11 +373,49 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, return err; } +int +gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct gve_priv *hw = dev->data->dev_private; + struct gve_tx_queue *txq; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + + txq->qtx_tail = &hw->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)]; + txq->qtx_head = + &hw->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)]; + + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); + + dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int +gve_tx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct gve_tx_queue *txq; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + gve_release_txq_mbufs_dqo(txq); + gve_reset_txq_dqo(txq); + + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + void gve_stop_tx_queues_dqo(struct rte_eth_dev *dev) { struct gve_priv *hw = dev->data->dev_private; - struct gve_tx_queue *txq; uint16_t i; int err; @@ -385,9 +423,13 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev) if (err != 0) PMD_DRV_LOG(WARNING, "failed to destroy txqs"); - for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = dev->data->tx_queues[i]; - gve_release_txq_mbufs_dqo(txq); - gve_reset_txq_dqo(txq); - } + for (i = 0; i < dev->data->nb_tx_queues; i++) + if (gve_tx_queue_stop_dqo(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i); +} + +void +gve_set_tx_function_dqo(struct rte_eth_dev *dev) +{ + dev->tx_pkt_burst = gve_tx_burst_dqo; }