From patchwork Thu Apr 13 06:16:47 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125987 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A93684292F; Thu, 13 Apr 2023 08:17:55 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2EFD142D31; Thu, 13 Apr 2023 08:17:32 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id AB4E442D31 for ; Thu, 13 Apr 2023 08:17:30 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366650; x=1712902650; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=eATX7Vyrl0jFsLsEHqazHHkFPgveqgXRPsGFYiBgccY=; b=FDiOPSYFmjF+ORdu4sADcEEBPPREJ6iceoRHGGe9+buU1Iq4eXPjUnbL iK66tTFvO0vi5jFyyHEy97dJzpn5mG3SgES0xbeDiOwbEWJpVWiVYqcQI aIcyQG49grQaePy1JJyl+M3QHIDgkBLg8HeHBfUkZ2rxefigSGnjt1uNL V7po8juzaQ9NBReda4NDP28PFwdeG1kxwrGoI83+gXK1r+DOLAF/2Qqa8 UeBAEbwPXKZhFGMdlcokvS+bhLfhqqXVrKAm5cCGrwR1WuzfDyNByWohJ Ja+K0fj7EhGk49qf9JQwnJN38+7UlE5Gu6E90RIAaIMQ7KrLUC5AuOSvU w==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595383" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595383" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824300" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824300" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:27 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 07/10] net/gve: support basic stats for DQO Date: Thu, 13 Apr 2023 14:16:47 +0800 Message-Id: <20230413061650.796940-8-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add basic stats support for DQO. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 5 ++++- drivers/net/gve/gve_rx_dqo.c | 14 +++++++++++++- drivers/net/gve/gve_tx_dqo.c | 7 +++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index a532b8a93a..8b6861a24f 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -150,14 +150,17 @@ gve_refill_dqo(struct gve_rx_queue *rxq) diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); if (diag < 0) { + rxq->stats.no_mbufs_bulk++; for (i = 0; i < rxq->nb_rx_desc - 1; i++) { nmb = rte_pktmbuf_alloc(rxq->mpool); if (!nmb) break; rxq->sw_ring[i] = nmb; } - if (i < rxq->nb_rx_desc - 1) + if (i < rxq->nb_rx_desc - 1) { + rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i; return -ENOMEM; + } } for (i = 0; i < rxq->nb_rx_desc; i++) { diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index d0eaea9c24..1d6b21359c 100644 --- a/drivers/net/gve/gve_rx_dqo.c +++ b/drivers/net/gve/gve_rx_dqo.c @@ -39,6 +39,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq) next_avail = 0; rxq->nb_rx_hold -= delta; } else { + rxq->stats.no_mbufs_bulk++; + rxq->stats.no_mbufs += nb_desc - next_avail; dev = &rte_eth_devices[rxq->port_id]; dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", @@ -59,6 +61,8 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq) next_avail += nb_refill; rxq->nb_rx_hold -= nb_refill; } else { + rxq->stats.no_mbufs_bulk++; + rxq->stats.no_mbufs += nb_desc - next_avail; dev = &rte_eth_devices[rxq->port_id]; dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", @@ -82,7 +86,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) uint16_t pkt_len; uint16_t rx_id; uint16_t nb_rx; + uint64_t bytes; + bytes = 0; nb_rx = 0; rxq = rx_queue; rx_id = rxq->rx_tail; @@ -96,8 +102,10 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (rx_desc->generation != rxq->cur_gen_bit) break; - if (unlikely(rx_desc->rx_error)) + if (unlikely(rx_desc->rx_error)) { + rxq->stats.errors++; continue; + } pkt_len = rx_desc->packet_len; @@ -122,6 +130,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash); rx_pkts[nb_rx++] = rxm; + bytes += pkt_len; } if (nb_rx > 0) { @@ -130,6 +139,9 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxq->next_avail = rx_id_bufq; gve_rx_refill_dqo(rxq); + + rxq->stats.packets += nb_rx; + rxq->stats.bytes += bytes; } return nb_rx; diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index 2ea38a8f8e..578a409616 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -81,10 +81,12 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t nb_used; uint16_t tx_id; uint16_t sw_id; + uint64_t bytes; sw_ring = txq->sw_ring; txr = txq->tx_ring; + bytes = 0; mask = txq->nb_tx_desc - 1; sw_mask = txq->sw_size - 1; tx_id = txq->tx_tail; @@ -119,6 +121,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) tx_id = (tx_id + 1) & mask; sw_id = (sw_id + 1) & sw_mask; + bytes += tx_pkt->pkt_len; tx_pkt = tx_pkt->next; } while (tx_pkt); @@ -142,6 +145,10 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_write32(tx_id, txq->qtx_tail); txq->tx_tail = tx_id; txq->sw_tail = sw_id; + + txq->stats.packets += nb_tx; + txq->stats.bytes += bytes; + txq->stats.errors += nb_pkts - nb_tx; } return nb_tx;