From patchwork Thu Apr 13 06:16:41 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125981 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 991994292F; Thu, 13 Apr 2023 08:17:16 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5C38C410F9; Thu, 13 Apr 2023 08:17:15 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 9502D410F9 for ; Thu, 13 Apr 2023 08:17:13 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366633; x=1712902633; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=yklh252yNfNFvwLC1eLbai6xmuqwdL5eWDwjKlKEtHk=; b=GCCo/++g4cyUMBC6SnDUD3V0n7u+cm1+4lzjjT/LzyCReqvxaainDp9o 5n+iyfUvYecKZLWuG/TRfgBhPeuU7yiMICamy9ItxiyZAuDi3uvDj0pyp pHdO9cgTFvbYmOq6ZWF0AjhPHWODE3qHUv55et/A++MECuPC4F/8+ZBpx iWkGZFMsGcvK6bc7gV66k/sIFnOwX82rgwf335pZ4aYdMvIyPV8kzzhSy JwktDbwECyNUrpIKDCWyc2Ob30vjPDUYd9yT62A/xL70K4eLeckTBxAgP 1S19HQ7lyvBQJXQ3fekrwJMsqh/M8y52pbehERd1gRMINDWmxXvrgYFeF A==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595338" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595338" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:13 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824250" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824250" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:10 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 01/10] net/gve: add Tx queue setup for DQO Date: Thu, 13 Apr 2023 14:16:41 +0800 Message-Id: <20230413061650.796940-2-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for tx_queue_setup_dqo ops. DQO format has submission and completion queue pair for each Tx/Rx queue. Note that with DQO format all descriptors and doorbells, as well as counters are written in little-endian. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 21 +++- drivers/net/gve/gve_ethdev.h | 27 ++++- drivers/net/gve/gve_tx_dqo.c | 185 +++++++++++++++++++++++++++++++++++ drivers/net/gve/meson.build | 1 + 4 files changed, 230 insertions(+), 4 deletions(-) create mode 100644 drivers/net/gve/gve_tx_dqo.c diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index cf28a4a3b7..90345b193d 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -298,6 +298,7 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH, .offloads = 0, }; @@ -528,6 +529,21 @@ static const struct eth_dev_ops gve_eth_dev_ops = { .xstats_get_names = gve_xstats_get_names, }; +static const struct eth_dev_ops gve_eth_dev_ops_dqo = { + .dev_configure = gve_dev_configure, + .dev_start = gve_dev_start, + .dev_stop = gve_dev_stop, + .dev_close = gve_dev_close, + .dev_infos_get = gve_dev_info_get, + .tx_queue_setup = gve_tx_queue_setup_dqo, + .link_update = gve_link_update, + .stats_get = gve_dev_stats_get, + .stats_reset = gve_dev_stats_reset, + .mtu_set = gve_dev_mtu_set, + .xstats_get = gve_xstats_get, + .xstats_get_names = gve_xstats_get_names, +}; + static void gve_free_counter_array(struct gve_priv *priv) { @@ -770,8 +786,6 @@ gve_dev_init(struct rte_eth_dev *eth_dev) rte_be32_t *db_bar; int err; - eth_dev->dev_ops = &gve_eth_dev_ops; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -807,10 +821,11 @@ gve_dev_init(struct rte_eth_dev *eth_dev) return err; if (gve_is_gqi(priv)) { + eth_dev->dev_ops = &gve_eth_dev_ops; eth_dev->rx_pkt_burst = gve_rx_burst; eth_dev->tx_pkt_burst = gve_tx_burst; } else { - PMD_DRV_LOG(ERR, "DQO_RDA is not implemented and will be added in the future"); + eth_dev->dev_ops = &gve_eth_dev_ops_dqo; } eth_dev->data->mac_addrs = &priv->dev_addr; diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 0b825113f6..6c6defa045 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -28,7 +28,8 @@ #define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */ #define GVE_DEFAULT_RX_FREE_THRESH 512 -#define GVE_DEFAULT_TX_FREE_THRESH 256 +#define GVE_DEFAULT_TX_FREE_THRESH 32 +#define GVE_DEFAULT_TX_RS_THRESH 32 #define GVE_TX_MAX_FREE_SZ 512 #define GVE_MIN_BUF_SIZE 1024 @@ -53,6 +54,13 @@ union gve_tx_desc { struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ }; +/* Tx desc for DQO format */ +union gve_tx_desc_dqo { + struct gve_tx_pkt_desc_dqo pkt; + struct gve_tx_tso_context_desc_dqo tso_ctx; + struct gve_tx_general_context_desc_dqo general_ctx; +}; + /* Offload features */ union gve_tx_offload { uint64_t data; @@ -100,8 +108,10 @@ struct gve_tx_queue { uint32_t tx_tail; uint16_t nb_tx_desc; uint16_t nb_free; + uint16_t nb_used; uint32_t next_to_clean; uint16_t free_thresh; + uint16_t rs_thresh; /* Only valid for DQO_QPL queue format */ uint16_t sw_tail; @@ -128,7 +138,15 @@ struct gve_tx_queue { struct gve_queue_resources *qres; /* newly added for DQO */ + volatile union gve_tx_desc_dqo *tx_ring; + struct gve_tx_compl_desc *compl_ring; + const struct rte_memzone *compl_ring_mz; uint64_t compl_ring_phys_addr; + uint32_t complq_tail; + uint16_t sw_size; + uint8_t cur_gen_bit; + uint32_t last_desc_cleaned; + void **txqs; /* Only valid for DQO_RDA queue format */ struct gve_tx_queue *complq; @@ -342,4 +360,11 @@ gve_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +/* Below functions are used for DQO */ + +int +gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *conf); + #endif /* _GVE_ETHDEV_H_ */ diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c new file mode 100644 index 0000000000..22d20ff16f --- /dev/null +++ b/drivers/net/gve/gve_tx_dqo.c @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022-2023 Google LLC + * Copyright (c) 2022-2023 Intel Corporation + */ + +#include "gve_ethdev.h" +#include "base/gve_adminq.h" + +static int +check_tx_thresh_dqo(uint16_t nb_desc, uint16_t tx_rs_thresh, + uint16_t tx_free_thresh) +{ + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than the " + "number of TX descriptors (%u) minus 2", + tx_rs_thresh, nb_desc); + return -EINVAL; + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_DRV_LOG(ERR, "tx_free_thresh (%u) must be less than the " + "number of TX descriptors (%u) minus 3.", + tx_free_thresh, nb_desc); + return -EINVAL; + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than or " + "equal to tx_free_thresh (%u).", + tx_rs_thresh, tx_free_thresh); + return -EINVAL; + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_DRV_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the " + "number of TX descriptors (%u).", + tx_rs_thresh, nb_desc); + return -EINVAL; + } + + return 0; +} + +static void +gve_reset_txq_dqo(struct gve_tx_queue *txq) +{ + struct rte_mbuf **sw_ring; + uint32_t size, i; + + if (txq == NULL) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + size = txq->nb_tx_desc * sizeof(union gve_tx_desc_dqo); + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + size = txq->sw_size * sizeof(struct gve_tx_compl_desc); + for (i = 0; i < size; i++) + ((volatile char *)txq->compl_ring)[i] = 0; + + sw_ring = txq->sw_ring; + for (i = 0; i < txq->sw_size; i++) + sw_ring[i] = NULL; + + txq->tx_tail = 0; + txq->nb_used = 0; + + txq->last_desc_cleaned = 0; + txq->sw_tail = 0; + txq->nb_free = txq->nb_tx_desc - 1; + + txq->complq_tail = 0; + txq->cur_gen_bit = 1; +} + +int +gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *conf) +{ + struct gve_priv *hw = dev->data->dev_private; + const struct rte_memzone *mz; + struct gve_tx_queue *txq; + uint16_t free_thresh; + uint16_t rs_thresh; + uint16_t sw_size; + int err = 0; + + if (nb_desc != hw->tx_desc_cnt) { + PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.", + hw->tx_desc_cnt); + } + nb_desc = hw->tx_desc_cnt; + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("gve txq", + sizeof(struct gve_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for tx queue structure"); + return -ENOMEM; + } + + /* need to check free_thresh here */ + free_thresh = conf->tx_free_thresh ? + conf->tx_free_thresh : GVE_DEFAULT_TX_FREE_THRESH; + rs_thresh = conf->tx_rs_thresh ? + conf->tx_rs_thresh : GVE_DEFAULT_TX_RS_THRESH; + if (check_tx_thresh_dqo(nb_desc, rs_thresh, free_thresh)) + return -EINVAL; + + txq->nb_tx_desc = nb_desc; + txq->free_thresh = free_thresh; + txq->rs_thresh = rs_thresh; + txq->queue_id = queue_id; + txq->port_id = dev->data->port_id; + txq->ntfy_id = queue_id; + txq->hw = hw; + txq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[txq->ntfy_id].id)]; + + /* Allocate software ring */ + sw_size = nb_desc * DQO_TX_MULTIPLIER; + txq->sw_ring = rte_zmalloc_socket("gve tx sw ring", + sw_size * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring"); + err = -ENOMEM; + goto free_txq; + } + txq->sw_size = sw_size; + + /* Allocate TX hardware ring descriptors. */ + mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id, + nb_desc * sizeof(union gve_tx_desc_dqo), + PAGE_SIZE, socket_id); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX"); + err = -ENOMEM; + goto free_txq_sw_ring; + } + txq->tx_ring = (union gve_tx_desc_dqo *)mz->addr; + txq->tx_ring_phys_addr = mz->iova; + txq->mz = mz; + + /* Allocate TX completion ring descriptors. */ + mz = rte_eth_dma_zone_reserve(dev, "tx_compl_ring", queue_id, + sw_size * sizeof(struct gve_tx_compl_desc), + PAGE_SIZE, socket_id); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX completion queue"); + err = -ENOMEM; + goto free_txq_mz; + } + txq->compl_ring = (struct gve_tx_compl_desc *)mz->addr; + txq->compl_ring_phys_addr = mz->iova; + txq->compl_ring_mz = mz; + txq->txqs = dev->data->tx_queues; + + mz = rte_eth_dma_zone_reserve(dev, "txq_res", queue_id, + sizeof(struct gve_queue_resources), + PAGE_SIZE, socket_id); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX resource"); + err = -ENOMEM; + goto free_txq_cq_mz; + } + txq->qres = (struct gve_queue_resources *)mz->addr; + txq->qres_mz = mz; + + gve_reset_txq_dqo(txq); + + dev->data->tx_queues[queue_id] = txq; + + return 0; + +free_txq_cq_mz: + rte_memzone_free(txq->compl_ring_mz); +free_txq_mz: + rte_memzone_free(txq->mz); +free_txq_sw_ring: + rte_free(txq->sw_ring); +free_txq: + rte_free(txq); + return err; +} diff --git a/drivers/net/gve/meson.build b/drivers/net/gve/meson.build index af0010c01c..2ddb0cbf9e 100644 --- a/drivers/net/gve/meson.build +++ b/drivers/net/gve/meson.build @@ -11,6 +11,7 @@ sources = files( 'base/gve_adminq.c', 'gve_rx.c', 'gve_tx.c', + 'gve_tx_dqo.c', 'gve_ethdev.c', ) includes += include_directories('base') From patchwork Thu Apr 13 06:16:42 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125982 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 13EB74292F; Thu, 13 Apr 2023 08:17:24 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 087A442BB1; Thu, 13 Apr 2023 08:17:18 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 765C141143 for ; Thu, 13 Apr 2023 08:17:16 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366636; x=1712902636; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=RQG/88s8hOEVmZKfxuuQuFLStPOq9493+z/0+uWH9F8=; b=JJem639JfHnK7TiWATIUthQJ2+hdzBGFxdcdEe2odkmZG7yJSHg1YSao GEjcD8bVnRJ0WeHnijc5R2AoIplrAq8cZ0F1aFTY5rc1HHscjqxMrFVJ1 9XzJo0M9bmFVsg1zk99dk4lKT1oVDhF6fonsNa+0MOl8bjk2Tnf73VxXz kzubaiFY8RIlTaLbgb2RDkCqE//e7uDt0IqzCFwIfJBipSUXY3Dny93Yv jQo/m9qEYz2+5SyPqz22U45yi5OmariJRy6lCRpdLW7gPEbX/elOWpU5Z iehRSSksO53xatzlvUY2el4+FGwxZAGEmTJykmJBBKpHnsAe3+SuB4u2q w==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595346" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595346" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:16 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824259" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824259" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:13 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 02/10] net/gve: add Rx queue setup for DQO Date: Thu, 13 Apr 2023 14:16:42 +0800 Message-Id: <20230413061650.796940-3-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for rx_queue_setup_dqo ops. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 1 + drivers/net/gve/gve_ethdev.h | 11 +++ drivers/net/gve/gve_rx_dqo.c | 156 +++++++++++++++++++++++++++++++++++ drivers/net/gve/meson.build | 1 + 4 files changed, 169 insertions(+) create mode 100644 drivers/net/gve/gve_rx_dqo.c diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index 90345b193d..d387d7154b 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -535,6 +535,7 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = { .dev_stop = gve_dev_stop, .dev_close = gve_dev_close, .dev_infos_get = gve_dev_info_get, + .rx_queue_setup = gve_rx_queue_setup_dqo, .tx_queue_setup = gve_tx_queue_setup_dqo, .link_update = gve_link_update, .stats_get = gve_dev_stats_get, diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 6c6defa045..cb8cd62886 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -167,6 +167,7 @@ struct gve_rx_queue { uint16_t nb_rx_desc; uint16_t expected_seqno; /* the next expected seqno */ uint16_t free_thresh; + uint16_t nb_rx_hold; uint32_t next_avail; uint32_t nb_avail; @@ -189,7 +190,12 @@ struct gve_rx_queue { uint16_t rx_buf_len; /* newly added for DQO */ + volatile struct gve_rx_desc_dqo *rx_ring; + struct gve_rx_compl_desc_dqo *compl_ring; + const struct rte_memzone *compl_ring_mz; uint64_t compl_ring_phys_addr; + uint8_t cur_gen_bit; + uint16_t bufq_tail; /* Only valid for DQO_RDA queue format */ struct gve_rx_queue *bufq; @@ -362,6 +368,11 @@ gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); /* Below functions are used for DQO */ +int +gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *conf, + struct rte_mempool *pool); int gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c new file mode 100644 index 0000000000..c419c4dd2f --- /dev/null +++ b/drivers/net/gve/gve_rx_dqo.c @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022-2023 Google LLC + * Copyright (c) 2022-2023 Intel Corporation + */ + + +#include "gve_ethdev.h" +#include "base/gve_adminq.h" + +static void +gve_reset_rxq_dqo(struct gve_rx_queue *rxq) +{ + struct rte_mbuf **sw_ring; + uint32_t size, i; + + if (rxq == NULL) { + PMD_DRV_LOG(ERR, "pointer to rxq is NULL"); + return; + } + + size = rxq->nb_rx_desc * sizeof(struct gve_rx_desc_dqo); + for (i = 0; i < size; i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + size = rxq->nb_rx_desc * sizeof(struct gve_rx_compl_desc_dqo); + for (i = 0; i < size; i++) + ((volatile char *)rxq->compl_ring)[i] = 0; + + sw_ring = rxq->sw_ring; + for (i = 0; i < rxq->nb_rx_desc; i++) + sw_ring[i] = NULL; + + rxq->bufq_tail = 0; + rxq->next_avail = 0; + rxq->nb_rx_hold = rxq->nb_rx_desc - 1; + + rxq->rx_tail = 0; + rxq->cur_gen_bit = 1; +} + +int +gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *conf, + struct rte_mempool *pool) +{ + struct gve_priv *hw = dev->data->dev_private; + const struct rte_memzone *mz; + struct gve_rx_queue *rxq; + uint16_t free_thresh; + int err = 0; + + if (nb_desc != hw->rx_desc_cnt) { + PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.", + hw->rx_desc_cnt); + } + nb_desc = hw->rx_desc_cnt; + + /* Allocate the RX queue data structure. */ + rxq = rte_zmalloc_socket("gve rxq", + sizeof(struct gve_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (rxq == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for rx queue structure"); + return -ENOMEM; + } + + /* check free_thresh here */ + free_thresh = conf->rx_free_thresh ? + conf->rx_free_thresh : GVE_DEFAULT_RX_FREE_THRESH; + if (free_thresh >= nb_desc) { + PMD_DRV_LOG(ERR, "rx_free_thresh (%u) must be less than nb_desc (%u).", + free_thresh, rxq->nb_rx_desc); + err = -EINVAL; + goto free_rxq; + } + + rxq->nb_rx_desc = nb_desc; + rxq->free_thresh = free_thresh; + rxq->queue_id = queue_id; + rxq->port_id = dev->data->port_id; + rxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id; + + rxq->mpool = pool; + rxq->hw = hw; + rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)]; + + rxq->rx_buf_len = + rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM; + + /* Allocate software ring */ + rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring", + nb_desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_ring == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for SW RX ring"); + err = -ENOMEM; + goto free_rxq; + } + + /* Allocate RX buffer queue */ + mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id, + nb_desc * sizeof(struct gve_rx_desc_dqo), + PAGE_SIZE, socket_id); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue"); + err = -ENOMEM; + goto free_rxq_sw_ring; + } + rxq->rx_ring = (struct gve_rx_desc_dqo *)mz->addr; + rxq->rx_ring_phys_addr = mz->iova; + rxq->mz = mz; + + /* Allocate RX completion queue */ + mz = rte_eth_dma_zone_reserve(dev, "compl_ring", queue_id, + nb_desc * sizeof(struct gve_rx_compl_desc_dqo), + PAGE_SIZE, socket_id); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX completion queue"); + err = -ENOMEM; + goto free_rxq_mz; + } + /* Zero all the descriptors in the ring */ + memset(mz->addr, 0, nb_desc * sizeof(struct gve_rx_compl_desc_dqo)); + rxq->compl_ring = (struct gve_rx_compl_desc_dqo *)mz->addr; + rxq->compl_ring_phys_addr = mz->iova; + rxq->compl_ring_mz = mz; + + mz = rte_eth_dma_zone_reserve(dev, "rxq_res", queue_id, + sizeof(struct gve_queue_resources), + PAGE_SIZE, socket_id); + if (mz == NULL) { + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource"); + err = -ENOMEM; + goto free_rxq_cq_mz; + } + rxq->qres = (struct gve_queue_resources *)mz->addr; + rxq->qres_mz = mz; + + gve_reset_rxq_dqo(rxq); + + dev->data->rx_queues[queue_id] = rxq; + + return 0; + +free_rxq_cq_mz: + rte_memzone_free(rxq->compl_ring_mz); +free_rxq_mz: + rte_memzone_free(rxq->mz); +free_rxq_sw_ring: + rte_free(rxq->sw_ring); +free_rxq: + rte_free(rxq); + return err; +} diff --git a/drivers/net/gve/meson.build b/drivers/net/gve/meson.build index 2ddb0cbf9e..c9d87903f9 100644 --- a/drivers/net/gve/meson.build +++ b/drivers/net/gve/meson.build @@ -11,6 +11,7 @@ sources = files( 'base/gve_adminq.c', 'gve_rx.c', 'gve_tx.c', + 'gve_rx_dqo.c', 'gve_tx_dqo.c', 'gve_ethdev.c', ) From patchwork Thu Apr 13 06:16:43 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125983 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CADBB4292F; Thu, 13 Apr 2023 08:17:29 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3226941153; Thu, 13 Apr 2023 08:17:21 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 2FB1F41143 for ; Thu, 13 Apr 2023 08:17:19 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366639; x=1712902639; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=vBVu0pjpFzuU2jfEm57liWrA35tgkh5FFU1B2bfPR+g=; b=hd2heL4cYyRnkVMFw9ZH/heX5rsFd6M1cr9S20ZMXaXpIKav3zeNitoQ iY8Xs9QVKCNOzcyQ9M4p8V7qbptVPJyVjve3q6OcU1yAz5EkBuVoExQvw HiPPYENeNzU4YyNPxJ6gcK8MGiXqogOyZ4oaP681UyJz+RF02A6jBovp0 OgcGe/3Kgf+aaVRezgFWJT67Ju+Keg10hIY+HYtadGS1tv9xsSgZUU13y cdD1vPdtOEj96kstH3I5RZs0lDu6CCeXgSHmQR/LcL2iZGQyR6GqmZiqi hYzv0Fg4z1sQHDoGQHqU4qLO/tJmMpLtswKeznFvongXGZxTyNslDbhg7 Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595352" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595352" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:18 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824264" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824264" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:16 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 03/10] net/gve: support device start and close for DQO Date: Thu, 13 Apr 2023 14:16:43 +0800 Message-Id: <20230413061650.796940-4-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add device start and close support for DQO. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 43 +++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index d387d7154b..fc60db63c5 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -78,6 +78,9 @@ gve_free_qpls(struct gve_priv *priv) uint16_t nb_rxqs = priv->max_nb_rxq; uint32_t i; + if (priv->queue_format != GVE_GQI_QPL_FORMAT) + return; + for (i = 0; i < nb_txqs + nb_rxqs; i++) { if (priv->qpl[i].mz != NULL) rte_memzone_free(priv->qpl[i].mz); @@ -138,6 +141,41 @@ gve_refill_pages(struct gve_rx_queue *rxq) return 0; } +static int +gve_refill_dqo(struct gve_rx_queue *rxq) +{ + struct rte_mbuf *nmb; + uint16_t i; + int diag; + + diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); + if (diag < 0) { + for (i = 0; i < rxq->nb_rx_desc - 1; i++) { + nmb = rte_pktmbuf_alloc(rxq->mpool); + if (!nmb) + break; + rxq->sw_ring[i] = nmb; + } + if (i < rxq->nb_rx_desc - 1) + return -ENOMEM; + } + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (i == rxq->nb_rx_desc - 1) + break; + nmb = rxq->sw_ring[i]; + rxq->rx_ring[i].buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i); + } + + rxq->nb_rx_hold = 0; + rxq->bufq_tail = rxq->nb_rx_desc - 1; + + rte_write32(rxq->bufq_tail, rxq->qrx_tail); + + return 0; +} + static int gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { @@ -206,7 +244,10 @@ gve_dev_start(struct rte_eth_dev *dev) rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr); - err = gve_refill_pages(rxq); + if (gve_is_gqi(priv)) + err = gve_refill_pages(rxq); + else + err = gve_refill_dqo(rxq); if (err) { PMD_DRV_LOG(ERR, "Failed to refill for RX"); goto err_rx; From patchwork Thu Apr 13 06:16:44 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125984 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E70574292F; Thu, 13 Apr 2023 08:17:35 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9BF5E42C76; Thu, 13 Apr 2023 08:17:24 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 2513342B71 for ; Thu, 13 Apr 2023 08:17:21 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366642; x=1712902642; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Wf9eX5u2eiVCB/cUctwUs0dl4M/TuYWnuQ78akWkP4M=; b=cXjhOmFhrE8fc0jc4Aj4x+vMmOcpMf6LaMQYLvH3rTgXCLLvIG7poyQq mfOeqtG0KTO9tFGNMuF9yFac2AF/3Zu+kpAB2IORXSkRG1McVIshgLiwO LqYVq4o7h3o0XmzKKYSDsvtSvKt7w8SsmrOSVz1nsTAmVYfBY++7IkBPV 6joY0q63l11UWpnXzA9MO/eboQYRd/vjvPohMyTiXA4DPDZIByPHgwvMT M9IcKIEYkdbrbb7aTUmcURh2XEKBdd0Ps25MG4DLBPxbsVQ2lLEYRYJS2 nzC0jX2yYUuZ/G338+b/KftWi/gSoR2Ra4oJuGrvyy7GZ33xJyau/eeLQ Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595357" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595357" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:21 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824269" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824269" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:19 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 04/10] net/gve: support queue release and stop for DQO Date: Thu, 13 Apr 2023 14:16:44 +0800 Message-Id: <20230413061650.796940-5-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for queue operations: - gve_tx_queue_release_dqo - gve_rx_queue_release_dqo - gve_stop_tx_queues_dqo - gve_stop_rx_queues_dqo Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 18 +++++++++--- drivers/net/gve/gve_ethdev.h | 12 ++++++++ drivers/net/gve/gve_rx.c | 3 ++ drivers/net/gve/gve_rx_dqo.c | 57 ++++++++++++++++++++++++++++++++++++ drivers/net/gve/gve_tx.c | 3 ++ drivers/net/gve/gve_tx_dqo.c | 55 ++++++++++++++++++++++++++++++++++ 6 files changed, 144 insertions(+), 4 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index fc60db63c5..340315a1a3 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -292,11 +292,19 @@ gve_dev_close(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "Failed to stop dev."); } - for (i = 0; i < dev->data->nb_tx_queues; i++) - gve_tx_queue_release(dev, i); + if (gve_is_gqi(priv)) { + for (i = 0; i < dev->data->nb_tx_queues; i++) + gve_tx_queue_release(dev, i); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + gve_rx_queue_release(dev, i); + } else { + for (i = 0; i < dev->data->nb_tx_queues; i++) + gve_tx_queue_release_dqo(dev, i); - for (i = 0; i < dev->data->nb_rx_queues; i++) - gve_rx_queue_release(dev, i); + for (i = 0; i < dev->data->nb_rx_queues; i++) + gve_rx_queue_release_dqo(dev, i); + } gve_free_qpls(priv); rte_free(priv->adminq); @@ -578,6 +586,8 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = { .dev_infos_get = gve_dev_info_get, .rx_queue_setup = gve_rx_queue_setup_dqo, .tx_queue_setup = gve_tx_queue_setup_dqo, + .rx_queue_release = gve_rx_queue_release_dqo, + .tx_queue_release = gve_tx_queue_release_dqo, .link_update = gve_link_update, .stats_get = gve_dev_stats_get, .stats_reset = gve_dev_stats_reset, diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index cb8cd62886..c8e1dd1435 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -378,4 +378,16 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *conf); +void +gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid); + +void +gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid); + +void +gve_stop_tx_queues_dqo(struct rte_eth_dev *dev); + +void +gve_stop_rx_queues_dqo(struct rte_eth_dev *dev); + #endif /* _GVE_ETHDEV_H_ */ diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index 8d8f94efff..3dd3f578f9 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -359,6 +359,9 @@ gve_stop_rx_queues(struct rte_eth_dev *dev) uint16_t i; int err; + if (!gve_is_gqi(hw)) + return gve_stop_rx_queues_dqo(dev); + err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues); if (err != 0) PMD_DRV_LOG(WARNING, "failed to destroy rxqs"); diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index c419c4dd2f..7f58844839 100644 --- a/drivers/net/gve/gve_rx_dqo.c +++ b/drivers/net/gve/gve_rx_dqo.c @@ -7,6 +7,38 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +static inline void +gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq) +{ + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i]) { + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + rxq->sw_ring[i] = NULL; + } + } + + rxq->nb_avail = rxq->nb_rx_desc; +} + +void +gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid) +{ + struct gve_rx_queue *q = dev->data->rx_queues[qid]; + + if (q == NULL) + return; + + gve_release_rxq_mbufs_dqo(q); + rte_free(q->sw_ring); + rte_memzone_free(q->compl_ring_mz); + rte_memzone_free(q->mz); + rte_memzone_free(q->qres_mz); + q->qres = NULL; + rte_free(q); +} + static void gve_reset_rxq_dqo(struct gve_rx_queue *rxq) { @@ -56,6 +88,12 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, } nb_desc = hw->rx_desc_cnt; + /* Free memory if needed */ + if (dev->data->rx_queues[queue_id]) { + gve_rx_queue_release_dqo(dev, queue_id); + dev->data->rx_queues[queue_id] = NULL; + } + /* Allocate the RX queue data structure. */ rxq = rte_zmalloc_socket("gve rxq", sizeof(struct gve_rx_queue), @@ -154,3 +192,22 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, rte_free(rxq); return err; } + +void +gve_stop_rx_queues_dqo(struct rte_eth_dev *dev) +{ + struct gve_priv *hw = dev->data->dev_private; + struct gve_rx_queue *rxq; + uint16_t i; + int err; + + err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues); + if (err != 0) + PMD_DRV_LOG(WARNING, "failed to destroy rxqs"); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + gve_release_rxq_mbufs_dqo(rxq); + gve_reset_rxq_dqo(rxq); + } +} diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c index fee3b939c7..13dc807623 100644 --- a/drivers/net/gve/gve_tx.c +++ b/drivers/net/gve/gve_tx.c @@ -672,6 +672,9 @@ gve_stop_tx_queues(struct rte_eth_dev *dev) uint16_t i; int err; + if (!gve_is_gqi(hw)) + return gve_stop_tx_queues_dqo(dev); + err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues); if (err != 0) PMD_DRV_LOG(WARNING, "failed to destroy txqs"); diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index 22d20ff16f..ea6d5ff85e 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -6,6 +6,36 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +static inline void +gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq) +{ + uint16_t i; + + for (i = 0; i < txq->sw_size; i++) { + if (txq->sw_ring[i]) { + rte_pktmbuf_free_seg(txq->sw_ring[i]); + txq->sw_ring[i] = NULL; + } + } +} + +void +gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid) +{ + struct gve_tx_queue *q = dev->data->tx_queues[qid]; + + if (q == NULL) + return; + + gve_release_txq_mbufs_dqo(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_memzone_free(q->compl_ring_mz); + rte_memzone_free(q->qres_mz); + q->qres = NULL; + rte_free(q); +} + static int check_tx_thresh_dqo(uint16_t nb_desc, uint16_t tx_rs_thresh, uint16_t tx_free_thresh) @@ -91,6 +121,12 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, } nb_desc = hw->tx_desc_cnt; + /* Free memory if needed. */ + if (dev->data->tx_queues[queue_id]) { + gve_tx_queue_release_dqo(dev, queue_id); + dev->data->tx_queues[queue_id] = NULL; + } + /* Allocate the TX queue data structure. */ txq = rte_zmalloc_socket("gve txq", sizeof(struct gve_tx_queue), @@ -183,3 +219,22 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id, rte_free(txq); return err; } + +void +gve_stop_tx_queues_dqo(struct rte_eth_dev *dev) +{ + struct gve_priv *hw = dev->data->dev_private; + struct gve_tx_queue *txq; + uint16_t i; + int err; + + err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues); + if (err != 0) + PMD_DRV_LOG(WARNING, "failed to destroy txqs"); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + gve_release_txq_mbufs_dqo(txq); + gve_reset_txq_dqo(txq); + } +} From patchwork Thu Apr 13 06:16:45 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125985 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 52F7B4292F; Thu, 13 Apr 2023 08:17:41 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E380942C24; Thu, 13 Apr 2023 08:17:25 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 0AC8C42D10 for ; Thu, 13 Apr 2023 08:17:24 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366645; x=1712902645; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=iG5YKTsgb0A5CVyco8WDn7iyjyrferfL8aiE05n6b8w=; b=B0Y9FDgia4cvxaJd75zXjQoo7Q6qHIZskLq+MYcPLVB929VUgfpsYYiP wiVR2L1voFKAIAu7K0HscDrnomm+Dy8CZxUN4PfC4PA6bT120vVkfesTW uJLuSFNh40x6w4Awxp0rMVheOHIN49cJuckITfuv8r5AFIqr0OeUOzc+b wvAZnHsh1H1sbcUaJ+c0GSsx+e4h3avajrCpmZ+c7VthblAl1Sri2caMm IqAe/O0/vgMxTgWXHywUiiIhZiXc90ncyD6sudvQUKawzMTNLKQwqDpKg cDQpLXDYvl6DyIlQt9vynnUV+y3y9BE1E9kkVtq/IB5yEk1MYGEyKrkOv g==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595364" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595364" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:24 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824279" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824279" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:21 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 05/10] net/gve: support basic Tx data path for DQO Date: Thu, 13 Apr 2023 14:16:45 +0800 Message-Id: <20230413061650.796940-6-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add basic Tx data path support for DQO. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 1 + drivers/net/gve/gve_ethdev.h | 4 + drivers/net/gve/gve_tx_dqo.c | 141 +++++++++++++++++++++++++++++++++++ 3 files changed, 146 insertions(+) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index 340315a1a3..37bd8da12d 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -878,6 +878,7 @@ gve_dev_init(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = gve_tx_burst; } else { eth_dev->dev_ops = &gve_eth_dev_ops_dqo; + eth_dev->tx_pkt_burst = gve_tx_burst_dqo; } eth_dev->data->mac_addrs = &priv->dev_addr; diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index c8e1dd1435..1b8f511668 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -147,6 +147,7 @@ struct gve_tx_queue { uint8_t cur_gen_bit; uint32_t last_desc_cleaned; void **txqs; + uint16_t re_cnt; /* Only valid for DQO_RDA queue format */ struct gve_tx_queue *complq; @@ -390,4 +391,7 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev); void gve_stop_rx_queues_dqo(struct rte_eth_dev *dev); +uint16_t +gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + #endif /* _GVE_ETHDEV_H_ */ diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index ea6d5ff85e..2ea38a8f8e 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -6,6 +6,147 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +static inline void +gve_tx_clean_dqo(struct gve_tx_queue *txq) +{ + struct gve_tx_compl_desc *compl_ring; + struct gve_tx_compl_desc *compl_desc; + struct gve_tx_queue *aim_txq; + uint16_t nb_desc_clean; + struct rte_mbuf *txe; + uint16_t compl_tag; + uint16_t next; + + next = txq->complq_tail; + compl_ring = txq->compl_ring; + compl_desc = &compl_ring[next]; + + if (compl_desc->generation != txq->cur_gen_bit) + return; + + compl_tag = rte_le_to_cpu_16(compl_desc->completion_tag); + + aim_txq = txq->txqs[compl_desc->id]; + + switch (compl_desc->type) { + case GVE_COMPL_TYPE_DQO_DESC: + /* need to clean Descs from last_cleaned to compl_tag */ + if (aim_txq->last_desc_cleaned > compl_tag) + nb_desc_clean = aim_txq->nb_tx_desc - aim_txq->last_desc_cleaned + + compl_tag; + else + nb_desc_clean = compl_tag - aim_txq->last_desc_cleaned; + aim_txq->nb_free += nb_desc_clean; + aim_txq->last_desc_cleaned = compl_tag; + break; + case GVE_COMPL_TYPE_DQO_REINJECTION: + PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_REINJECTION !!!"); + /* FALLTHROUGH */ + case GVE_COMPL_TYPE_DQO_PKT: + txe = aim_txq->sw_ring[compl_tag]; + if (txe != NULL) { + rte_pktmbuf_free_seg(txe); + txe = NULL; + } + break; + case GVE_COMPL_TYPE_DQO_MISS: + rte_delay_us_sleep(1); + PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_MISS ignored !!!"); + break; + default: + PMD_DRV_LOG(ERR, "unknown completion type."); + return; + } + + next++; + if (next == txq->nb_tx_desc * DQO_TX_MULTIPLIER) { + next = 0; + txq->cur_gen_bit ^= 1; + } + + txq->complq_tail = next; +} + +uint16_t +gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct gve_tx_queue *txq = tx_queue; + volatile union gve_tx_desc_dqo *txr; + volatile union gve_tx_desc_dqo *txd; + struct rte_mbuf **sw_ring; + struct rte_mbuf *tx_pkt; + uint16_t mask, sw_mask; + uint16_t nb_to_clean; + uint16_t nb_tx = 0; + uint16_t nb_used; + uint16_t tx_id; + uint16_t sw_id; + + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + + mask = txq->nb_tx_desc - 1; + sw_mask = txq->sw_size - 1; + tx_id = txq->tx_tail; + sw_id = txq->sw_tail; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = tx_pkts[nb_tx]; + + if (txq->nb_free <= txq->free_thresh) { + nb_to_clean = DQO_TX_MULTIPLIER * txq->rs_thresh; + while (nb_to_clean--) + gve_tx_clean_dqo(txq); + } + + if (txq->nb_free < tx_pkt->nb_segs) + break; + + nb_used = tx_pkt->nb_segs; + + do { + txd = &txr[tx_id]; + + sw_ring[sw_id] = tx_pkt; + + /* fill Tx descriptor */ + txd->pkt.buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt)); + txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO; + txd->pkt.compl_tag = rte_cpu_to_le_16(sw_id); + txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, GVE_TX_MAX_BUF_SIZE_DQO); + + /* size of desc_ring and sw_ring could be different */ + tx_id = (tx_id + 1) & mask; + sw_id = (sw_id + 1) & sw_mask; + + tx_pkt = tx_pkt->next; + } while (tx_pkt); + + /* fill the last descriptor with End of Packet (EOP) bit */ + txd->pkt.end_of_packet = 1; + + txq->nb_free -= nb_used; + txq->nb_used += nb_used; + } + + /* update the tail pointer if any packets were processed */ + if (nb_tx > 0) { + /* Request a descriptor completion on the last descriptor */ + txq->re_cnt += nb_tx; + if (txq->re_cnt >= GVE_TX_MIN_RE_INTERVAL) { + txd = &txr[(tx_id - 1) & mask]; + txd->pkt.report_event = true; + txq->re_cnt = 0; + } + + rte_write32(tx_id, txq->qtx_tail); + txq->tx_tail = tx_id; + txq->sw_tail = sw_id; + } + + return nb_tx; +} + static inline void gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq) { From patchwork Thu Apr 13 06:16:46 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125986 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6D0AF4292F; Thu, 13 Apr 2023 08:17:49 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D185A42BFE; Thu, 13 Apr 2023 08:17:29 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id D3F1842D20 for ; Thu, 13 Apr 2023 08:17:27 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366648; x=1712902648; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Jitc29KGyoOBGrDwPOGPEOauaQqPZB3mlvHQGQto5WA=; b=NiraoZ4ZPWiIth7VeX8KiagrehAWUfysncDUHw73bdPN53N0xvxrqBbH nPGgeRGeLdO7+PfpL1ZqX3QeLvGu6W9ZUxZ3loTR4rBq3/X/n3XVWmb/I 9vp5JuSlTw6+i/rpCnvr8VsOxUuFuhL9Cto0El4ti+0sZEJWplZCd5Frd CDlQXpYTv+DVgn4jeIit/zPkLBixIBTHzAX4Gd6qyPT4jAvsXS5DH8Bux AGYPw6nMLZFA5MApglO6tBkVs9LwduozKjfvuvnXzUlmz239xf21kyt2o 7HF91VJXqRFzi2w4WvO4140Ok44OkxHQ/rlXLnk48TmrEs6UtjjzNrlm4 Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595377" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595377" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:27 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824293" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824293" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:24 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 06/10] net/gve: support basic Rx data path for DQO Date: Thu, 13 Apr 2023 14:16:46 +0800 Message-Id: <20230413061650.796940-7-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add basic Rx data path support for DQO. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 1 + drivers/net/gve/gve_ethdev.h | 3 + drivers/net/gve/gve_rx_dqo.c | 128 +++++++++++++++++++++++++++++++++++ 3 files changed, 132 insertions(+) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index 37bd8da12d..a532b8a93a 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -878,6 +878,7 @@ gve_dev_init(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = gve_tx_burst; } else { eth_dev->dev_ops = &gve_eth_dev_ops_dqo; + eth_dev->rx_pkt_burst = gve_rx_burst_dqo; eth_dev->tx_pkt_burst = gve_tx_burst_dqo; } diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 1b8f511668..617bb55a85 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -391,6 +391,9 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev); void gve_stop_rx_queues_dqo(struct rte_eth_dev *dev); +uint16_t +gve_rx_burst_dqo(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + uint16_t gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index 7f58844839..d0eaea9c24 100644 --- a/drivers/net/gve/gve_rx_dqo.c +++ b/drivers/net/gve/gve_rx_dqo.c @@ -7,6 +7,134 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +static inline void +gve_rx_refill_dqo(struct gve_rx_queue *rxq) +{ + volatile struct gve_rx_desc_dqo *rx_buf_ring; + volatile struct gve_rx_desc_dqo *rx_buf_desc; + struct rte_mbuf *nmb[rxq->free_thresh]; + uint16_t nb_refill = rxq->free_thresh; + uint16_t nb_desc = rxq->nb_rx_desc; + uint16_t next_avail = rxq->bufq_tail; + struct rte_eth_dev *dev; + uint64_t dma_addr; + uint16_t delta; + int i; + + if (rxq->nb_rx_hold < rxq->free_thresh) + return; + + rx_buf_ring = rxq->rx_ring; + delta = nb_desc - next_avail; + if (unlikely(delta < nb_refill)) { + if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, delta) == 0)) { + for (i = 0; i < delta; i++) { + rx_buf_desc = &rx_buf_ring[next_avail + i]; + rxq->sw_ring[next_avail + i] = nmb[i]; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); + rx_buf_desc->header_buf_addr = 0; + rx_buf_desc->buf_addr = dma_addr; + } + nb_refill -= delta; + next_avail = 0; + rxq->nb_rx_hold -= delta; + } else { + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; + PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + return; + } + } + + if (nb_desc - next_avail >= nb_refill) { + if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill) == 0)) { + for (i = 0; i < nb_refill; i++) { + rx_buf_desc = &rx_buf_ring[next_avail + i]; + rxq->sw_ring[next_avail + i] = nmb[i]; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); + rx_buf_desc->header_buf_addr = 0; + rx_buf_desc->buf_addr = dma_addr; + } + next_avail += nb_refill; + rxq->nb_rx_hold -= nb_refill; + } else { + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; + PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + } + } + + rte_write32(next_avail, rxq->qrx_tail); + + rxq->bufq_tail = next_avail; +} + +uint16_t +gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile struct gve_rx_compl_desc_dqo *rx_compl_ring; + volatile struct gve_rx_compl_desc_dqo *rx_desc; + struct gve_rx_queue *rxq; + struct rte_mbuf *rxm; + uint16_t rx_id_bufq; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + + nb_rx = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_id_bufq = rxq->next_avail; + rx_compl_ring = rxq->compl_ring; + + while (nb_rx < nb_pkts) { + rx_desc = &rx_compl_ring[rx_id]; + + /* check status */ + if (rx_desc->generation != rxq->cur_gen_bit) + break; + + if (unlikely(rx_desc->rx_error)) + continue; + + pkt_len = rx_desc->packet_len; + + rx_id++; + if (rx_id == rxq->nb_rx_desc) { + rx_id = 0; + rxq->cur_gen_bit ^= 1; + } + + rxm = rxq->sw_ring[rx_id_bufq]; + rx_id_bufq++; + if (rx_id_bufq == rxq->nb_rx_desc) + rx_id_bufq = 0; + rxq->nb_rx_hold++; + + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + + rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; + rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash); + + rx_pkts[nb_rx++] = rxm; + } + + if (nb_rx > 0) { + rxq->rx_tail = rx_id; + if (rx_id_bufq != rxq->next_avail) + rxq->next_avail = rx_id_bufq; + + gve_rx_refill_dqo(rxq); + } + + return nb_rx; +} + static inline void gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq) { From patchwork Thu Apr 13 06:16:47 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125987 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A93684292F; Thu, 13 Apr 2023 08:17:55 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2EFD142D31; Thu, 13 Apr 2023 08:17:32 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id AB4E442D31 for ; Thu, 13 Apr 2023 08:17:30 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366650; x=1712902650; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=eATX7Vyrl0jFsLsEHqazHHkFPgveqgXRPsGFYiBgccY=; b=FDiOPSYFmjF+ORdu4sADcEEBPPREJ6iceoRHGGe9+buU1Iq4eXPjUnbL iK66tTFvO0vi5jFyyHEy97dJzpn5mG3SgES0xbeDiOwbEWJpVWiVYqcQI aIcyQG49grQaePy1JJyl+M3QHIDgkBLg8HeHBfUkZ2rxefigSGnjt1uNL V7po8juzaQ9NBReda4NDP28PFwdeG1kxwrGoI83+gXK1r+DOLAF/2Qqa8 UeBAEbwPXKZhFGMdlcokvS+bhLfhqqXVrKAm5cCGrwR1WuzfDyNByWohJ Ja+K0fj7EhGk49qf9JQwnJN38+7UlE5Gu6E90RIAaIMQ7KrLUC5AuOSvU w==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595383" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595383" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824300" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824300" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:27 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 07/10] net/gve: support basic stats for DQO Date: Thu, 13 Apr 2023 14:16:47 +0800 Message-Id: <20230413061650.796940-8-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add basic stats support for DQO. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 5 ++++- drivers/net/gve/gve_rx_dqo.c | 14 +++++++++++++- drivers/net/gve/gve_tx_dqo.c | 7 +++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index a532b8a93a..8b6861a24f 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -150,14 +150,17 @@ gve_refill_dqo(struct gve_rx_queue *rxq) diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); if (diag < 0) { + rxq->stats.no_mbufs_bulk++; for (i = 0; i < rxq->nb_rx_desc - 1; i++) { nmb = rte_pktmbuf_alloc(rxq->mpool); if (!nmb) break; rxq->sw_ring[i] = nmb; } - if (i < rxq->nb_rx_desc - 1) + if (i < rxq->nb_rx_desc - 1) { + rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i; return -ENOMEM; + } } for (i = 0; i < rxq->nb_rx_desc; i++) { diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index d0eaea9c24..1d6b21359c 100644 --- a/drivers/net/gve/gve_rx_dqo.c +++ b/drivers/net/gve/gve_rx_dqo.c @@ -39,6 +39,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq) next_avail = 0; rxq->nb_rx_hold -= delta; } else { + rxq->stats.no_mbufs_bulk++; + rxq->stats.no_mbufs += nb_desc - next_avail; dev = &rte_eth_devices[rxq->port_id]; dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", @@ -59,6 +61,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq) next_avail += nb_refill; rxq->nb_rx_hold -= nb_refill; } else { + rxq->stats.no_mbufs_bulk++; + rxq->stats.no_mbufs += nb_desc - next_avail; dev = &rte_eth_devices[rxq->port_id]; dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", @@ -82,7 +86,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) uint16_t pkt_len; uint16_t rx_id; uint16_t nb_rx; + uint64_t bytes; + bytes = 0; nb_rx = 0; rxq = rx_queue; rx_id = rxq->rx_tail; @@ -96,8 +102,10 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (rx_desc->generation != rxq->cur_gen_bit) break; - if (unlikely(rx_desc->rx_error)) + if (unlikely(rx_desc->rx_error)) { + rxq->stats.errors++; continue; + } pkt_len = rx_desc->packet_len; @@ -122,6 +130,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash); rx_pkts[nb_rx++] = rxm; + bytes += pkt_len; } if (nb_rx > 0) { @@ -130,6 +139,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxq->next_avail = rx_id_bufq; gve_rx_refill_dqo(rxq); + + rxq->stats.packets += nb_rx; + rxq->stats.bytes += bytes; } return nb_rx; diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index 2ea38a8f8e..578a409616 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -81,10 +81,12 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t nb_used; uint16_t tx_id; uint16_t sw_id; + uint64_t bytes; sw_ring = txq->sw_ring; txr = txq->tx_ring; + bytes = 0; mask = txq->nb_tx_desc - 1; sw_mask = txq->sw_size - 1; tx_id = txq->tx_tail; @@ -119,6 +121,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) tx_id = (tx_id + 1) & mask; sw_id = (sw_id + 1) & sw_mask; + bytes += tx_pkt->pkt_len; tx_pkt = tx_pkt->next; } while (tx_pkt); @@ -142,6 +145,10 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_write32(tx_id, txq->qtx_tail); txq->tx_tail = tx_id; txq->sw_tail = sw_id; + + txq->stats.packets += nb_tx; + txq->stats.bytes += bytes; + txq->stats.errors += nb_pkts - nb_tx; } return nb_tx; From patchwork Thu Apr 13 06:16:48 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125988 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3014C4292F; Thu, 13 Apr 2023 08:18:02 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BE8CE42D36; Thu, 13 Apr 2023 08:17:34 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 5676042D39 for ; Thu, 13 Apr 2023 08:17:33 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366653; x=1712902653; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Ka50OVYICO3H0ccRSyg3wHWWdaW7/2rj1nTiKpIzHtA=; b=JT/uY4fWKWV3pM9PPXo4CKnb2smvLZ38YFCPJVsMRDFaZ6niCUTCUuLk Im6JpkhPLNFZWGIyShCWKpWUgjEZxm5F0nLV2P9JMAO796ExThS8qnZOX A6PBefpZVf9N5uoel/4IewRmWUjg1/s03SVjU2yUbKuMAn7UyLLH7E8Yv lwdA8+SoO7q8HQQGLLptNslnV42NthETEFNLuE7RxYuc4vp6BfBS8gbZr eJltjFsUIwrgL1vWCh4QHRWJVcRrk5Zz/jtdz1DPGCTr8IGAx7tYy5wuT K/idDn933+RSkRp6k4Ez9zF1BoPEyRqbhd2s6Ma3FLdjoxC8m47iIVG6b Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595392" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595392" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824309" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824309" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:30 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 08/10] net/gve: enable Tx checksum offload for DQO Date: Thu, 13 Apr 2023 14:16:48 +0800 Message-Id: <20230413061650.796940-9-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Enable Tx checksum offload once any flag of L4 checksum is set. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.h | 4 ++++ drivers/net/gve/gve_tx_dqo.c | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 617bb55a85..4a0e860afa 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -38,6 +38,10 @@ #define GVE_MAX_MTU RTE_ETHER_MTU #define GVE_MIN_MTU RTE_ETHER_MIN_MTU +#define GVE_TX_CKSUM_OFFLOAD_MASK ( \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG) + /* A list of pages registered with the device during setup and used by a queue * as buffers */ diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index 578a409616..b38eeaea4b 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -78,6 +78,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t mask, sw_mask; uint16_t nb_to_clean; uint16_t nb_tx = 0; + uint64_t ol_flags; uint16_t nb_used; uint16_t tx_id; uint16_t sw_id; @@ -104,6 +105,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (txq->nb_free < tx_pkt->nb_segs) break; + ol_flags = tx_pkt->ol_flags; nb_used = tx_pkt->nb_segs; do { @@ -128,6 +130,9 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* fill the last descriptor with End of Packet (EOP) bit */ txd->pkt.end_of_packet = 1; + if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK) + txd->pkt.checksum_offload_enable = 1; + txq->nb_free -= nb_used; txq->nb_used += nb_used; } From patchwork Thu Apr 13 06:16:49 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125989 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A2AA24292F; Thu, 13 Apr 2023 08:18:09 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EDB5442D10; Thu, 13 Apr 2023 08:17:37 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id A663D41138 for ; Thu, 13 Apr 2023 08:17:35 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366655; x=1712902655; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=krUB5AemjJXhBDEA7HRFLZIm6uzaBDs+Mbz1AbDYhM8=; b=n204w2KKrCRsQzOHGB7jsPH4BMbl1McwqN2zcjDsNTeHGvBKT2P/9esz CvzkczGS2r+YkrmdQTz5X81Xqel50PFmspRe7urOEYXx8wS1nVDWih+Cc WGMK163+trzkcp6qqEthJai4E62AWi1Q8mZxFVp0D2boKNt1EFxu1As07 vBSMncDWeKTqlOLxM+Rul6MNKtsiCO9AwWsIYZs3D8DM/CGiNaIyn2n6T SmQBKhtUKDcpqYx2Reta0fO3l6a66RJ9US7uPF2rknt2xVOV3TKEnnkEQ Osf/Vp3u3q8Hx0O+3JkxozLr0GGJl6IF4kLA69BwMc+iCFq/dZtTaABSC A==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595402" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595402" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824314" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824314" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:33 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta Subject: [PATCH 09/10] net/gve: add maintainers for GVE Date: Thu, 13 Apr 2023 14:16:49 +0800 Message-Id: <20230413061650.796940-10-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add maintainers from Google for GVE. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta --- MAINTAINERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 8df23e5099..08001751b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -713,6 +713,9 @@ F: doc/guides/nics/features/enic.ini Google Virtual Ethernet M: Junfeng Guo +M: Jeroen de Borst +M: Rushil Gupta +M: Joshua Washington F: drivers/net/gve/ F: doc/guides/nics/gve.rst F: doc/guides/nics/features/gve.ini From patchwork Thu Apr 13 06:16:50 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125990 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1AC434292F; Thu, 13 Apr 2023 08:18:16 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4869042D3B; Thu, 13 Apr 2023 08:17:41 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id C6CD442BC9 for ; Thu, 13 Apr 2023 08:17:38 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366658; x=1712902658; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=bc9E7YTlV16soLycH8jZnuHs9tzucD8XvuzDm/rPsZs=; b=lriW7Ul0S96Z7xPiP2zy6xx+/u2qs1REgYA/q6zIFfXW64MPY1wKMcZ2 jc3OImfm+JfZWnCHYn8+sl9TgoZsMbzlMRVTA4ePFaHMc1TY+JxWGEtkF QW3I1TNw/SworTq9HRvULMqpAUlZzsUSM43DIZuqbeiNedA6ZaEpQzxiH gIuoaYtUFdWA6ufpS4RkmufQrHsyU3BGfFic7vtmkRxdFnXICFEtnK9Hb 1JbnEZjm2RZI/fYPDYuEympEIKF+dYyZ+FZ5amCPopfrVKQPARiBfbZbA /d5RYHR5xvk16nAbRil+1R5OltoHOhXLW2qsi1SBtLgnGapmDv8HwpJZs A==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595417" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595417" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824321" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824321" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:35 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 10/10] net/gve: support jumbo frame for GQI Date: Thu, 13 Apr 2023 14:16:50 +0800 Message-Id: <20230413061650.796940-11-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add multi-segment support to enable GQI Rx Jumbo Frame. Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Junfeng Guo Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.h | 8 ++ drivers/net/gve/gve_rx.c | 137 +++++++++++++++++++++++++---------- 2 files changed, 108 insertions(+), 37 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 4a0e860afa..53a75044c5 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -159,6 +159,13 @@ struct gve_tx_queue { uint8_t is_gqi_qpl; }; +struct gve_rx_ctx { + struct rte_mbuf *mbuf_head; + struct rte_mbuf *mbuf_tail; + uint16_t total_frags; + bool drop_pkt; +}; + struct gve_rx_queue { volatile struct gve_rx_desc *rx_desc_ring; volatile union gve_rx_data_slot *rx_data_ring; @@ -167,6 +174,7 @@ struct gve_rx_queue { uint64_t rx_ring_phys_addr; struct rte_mbuf **sw_ring; struct rte_mempool *mpool; + struct gve_rx_ctx ctx; uint16_t rx_tail; uint16_t nb_rx_desc; diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index 3dd3f578f9..f2f6202404 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -5,6 +5,8 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +#define GVE_PKT_CONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) + static inline void gve_rx_refill(struct gve_rx_queue *rxq) { @@ -87,43 +89,72 @@ gve_rx_refill(struct gve_rx_queue *rxq) } } -uint16_t -gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +/* + * This method processes a single rte_mbuf and handles packet segmentation + * In QPL mode it copies data from the mbuf to the gve_rx_queue. + */ +static void +gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len, + uint16_t rx_id) { - volatile struct gve_rx_desc *rxr, *rxd; - struct gve_rx_queue *rxq = rx_queue; - uint16_t rx_id = rxq->rx_tail; - struct rte_mbuf *rxe; - uint16_t nb_rx, len; - uint64_t bytes = 0; + uint16_t padding = 0; uint64_t addr; - uint16_t i; - - rxr = rxq->rx_desc_ring; - nb_rx = 0; - for (i = 0; i < nb_pkts; i++) { - rxd = &rxr[rx_id]; - if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) - break; - - if (rxd->flags_seq & GVE_RXF_ERR) { - rxq->stats.errors++; - continue; - } - - len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD; - rxe = rxq->sw_ring[rx_id]; - if (rxq->is_gqi_qpl) { - addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD; - rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), - (void *)(size_t)addr, len); - } + rxe->data_len = len; + if (!rxq->ctx.mbuf_head) { + rxq->ctx.mbuf_head = rxe; + rxq->ctx.mbuf_tail = rxe; + rxe->nb_segs = 1; rxe->pkt_len = len; rxe->data_len = len; rxe->port = rxq->port_id; rxe->ol_flags = 0; + padding = GVE_RX_PAD; + } else { + rxq->ctx.mbuf_head->pkt_len += len; + rxq->ctx.mbuf_head->nb_segs += 1; + rxq->ctx.mbuf_tail->next = rxe; + rxq->ctx.mbuf_tail = rxe; + } + if (rxq->is_gqi_qpl) { + addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding; + rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), + (void *)(size_t)addr, len); + } +} + +/* + * This method processes a single packet fragment associated with the + * passed packet descriptor. + * This methods returns whether the fragment is the last fragment + * of a packet. + */ +static bool +gve_rx(struct gve_rx_queue *rxq, volatile struct gve_rx_desc *rxd, uint16_t rx_id) +{ + bool is_last_frag = !GVE_PKT_CONT_BIT_IS_SET(rxd->flags_seq); + uint16_t frag_size = rte_be_to_cpu_16(rxd->len); + struct gve_rx_ctx *ctx = &rxq->ctx; + bool is_first_frag = ctx->total_frags == 0; + struct rte_mbuf *rxe; + + if (ctx->drop_pkt) + goto finish_frag; + if (rxd->flags_seq & GVE_RXF_ERR) { + ctx->drop_pkt = true; + rxq->stats.errors++; + goto finish_frag; + } + + if (is_first_frag) + frag_size -= GVE_RX_PAD; + + rxe = rxq->sw_ring[rx_id]; + gve_rx_mbuf(rxq, rxe, frag_size, rx_id); + rxq->stats.bytes += frag_size; + + if (is_first_frag) { if (rxd->flags_seq & GVE_RXF_TCP) rxe->packet_type |= RTE_PTYPE_L4_TCP; if (rxd->flags_seq & GVE_RXF_UDP) @@ -137,28 +168,60 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash); } + } - rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); +finish_frag: + ctx->total_frags++; + return is_last_frag; +} + +static void +gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->mbuf_head = NULL; + ctx->mbuf_tail = NULL; + ctx->drop_pkt = false; + ctx->total_frags = 0; +} + +uint16_t +gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile struct gve_rx_desc *rxr, *rxd; + struct gve_rx_queue *rxq = rx_queue; + struct gve_rx_ctx *ctx = &rxq->ctx; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx; + + rxr = rxq->rx_desc_ring; + nb_rx = 0; + + while (nb_rx < nb_pkts) { + rxd = &rxr[rx_id]; + if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) + break; + + if (gve_rx(rxq, rxd, rx_id)) { + if (!ctx->drop_pkt) + rx_pkts[nb_rx++] = ctx->mbuf_head; + rxq->nb_avail += ctx->total_frags; + gve_rx_ctx_clear(ctx); + } rx_id++; if (rx_id == rxq->nb_rx_desc) rx_id = 0; - rx_pkts[nb_rx] = rxe; - bytes += len; - nb_rx++; + rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); } - rxq->nb_avail += nb_rx; rxq->rx_tail = rx_id; if (rxq->nb_avail > rxq->free_thresh) gve_rx_refill(rxq); - if (nb_rx) { + if (nb_rx) rxq->stats.packets += nb_rx; - rxq->stats.bytes += bytes; - } return nb_rx; }