From patchwork Fri Sep 23 09:38:29 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 116733 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0AF48A0544; Fri, 23 Sep 2022 11:40:21 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 83A9242BC4; Fri, 23 Sep 2022 11:39:37 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 8EFDF42BBF for ; Fri, 23 Sep 2022 11:39:35 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1663925975; x=1695461975; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=G+wyKHcd3EiLrGxcRZQJb6xgQ9WdGy6KR+zzGOiPV0c=; b=cGyMGRO8yggiWzDb4authsZMDF0zEQSyGS7+0150X4Wsh/0O1lv6C8HG kBJbPg7Zz3aPMRlG4ocer4910hlzvI9q8+V2v52+n4+sobZMIpoQ8lmue NE2SbU+wob4kJLrE88zAgdRXcuLUK81shyNsyEGvV6Xv3Ujv0Fm7tE79v TXUwxclNXxIBZ5j+8CpM2d1KdHRwNeiHkICkpH+78AVBJOKQgus8rGOry CnGMOYsWUJhmAQShY3OKRRGlxvbVBTZ3Tc983a/2gptINtTWq2SWwCvY0 niPK8L0EFxC5ggsLz7NRps7b3zkaFHH9fHp1mWSVD0Odzr8lbXLC11VDq g==; X-IronPort-AV: E=McAfee;i="6500,9779,10478"; a="326885939" X-IronPort-AV: E=Sophos;i="5.93,337,1654585200"; d="scan'208";a="326885939" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Sep 2022 02:39:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,337,1654585200"; d="scan'208";a="650891208" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by orsmga008.jf.intel.com with ESMTP; 23 Sep 2022 02:39:32 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: ferruh.yigit@xilinx.com, dev@dpdk.org, xiaoyun.li@intel.com, awogbemila@google.com, bruce.richardson@intel.com, xueqin.lin@intel.com, junfeng.guo@intel.com Subject: [PATCH v3 9/9] net/gve: add stats support Date: Fri, 23 Sep 2022 17:38:29 +0800 Message-Id: <20220923093829.3019525-10-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220923093829.3019525-1-junfeng.guo@intel.com> References: <20220829084127.934183-11-junfeng.guo@intel.com> <20220923093829.3019525-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Update stats add support of dev_ops stats_get/reset. Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- doc/guides/nics/features/gve.ini | 2 + drivers/net/gve/gve_ethdev.c | 71 ++++++++++++++++++++++++++++++++ drivers/net/gve/gve_ethdev.h | 12 ++++++ drivers/net/gve/gve_rx.c | 15 ++++++- drivers/net/gve/gve_tx.c | 13 ++++++ 5 files changed, 111 insertions(+), 2 deletions(-) diff --git a/doc/guides/nics/features/gve.ini b/doc/guides/nics/features/gve.ini index cdc46b08a3..180408aa80 100644 --- a/doc/guides/nics/features/gve.ini +++ b/doc/guides/nics/features/gve.ini @@ -10,6 +10,8 @@ MTU update = Y TSO = Y RSS hash = Y L4 checksum offload = Y +Basic stats = Y +Stats per queue = Y Linux = Y x86-32 = Y x86-64 = Y diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index e3195376c4..7730835ed5 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -328,6 +328,75 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) return 0; } +static int +gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct gve_tx_queue *txq = dev->data->tx_queues[i]; + if (txq == NULL) + continue; + + stats->opackets += txq->packets; + stats->obytes += txq->bytes; + stats->oerrors += txq->errors; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = txq->packets; + stats->q_obytes[i] = txq->bytes; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct gve_rx_queue *rxq = dev->data->rx_queues[i]; + if (rxq == NULL) + continue; + + stats->ipackets += rxq->packets; + stats->ibytes += rxq->bytes; + stats->ierrors += rxq->errors; + stats->rx_nombuf += rxq->no_mbufs; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = rxq->packets; + stats->q_ibytes[i] = rxq->bytes; + stats->q_errors[i] = rxq->errors; + } + } + + return 0; +} + +static int +gve_dev_stats_reset(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct gve_tx_queue *txq = dev->data->tx_queues[i]; + if (txq == NULL) + continue; + + txq->packets = 0; + txq->bytes = 0; + txq->errors = 0; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct gve_rx_queue *rxq = dev->data->rx_queues[i]; + if (rxq == NULL) + continue; + + rxq->packets = 0; + rxq->bytes = 0; + rxq->no_mbufs = 0; + rxq->errors = 0; + } + + return 0; +} + static int gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -365,6 +434,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = { .rx_queue_setup = gve_rx_queue_setup, .tx_queue_setup = gve_tx_queue_setup, .link_update = gve_link_update, + .stats_get = gve_dev_stats_get, + .stats_reset = gve_dev_stats_reset, .mtu_set = gve_dev_mtu_set, }; diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 0624085517..a07c438b5d 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -76,6 +76,11 @@ struct gve_tx_queue { struct gve_queue_page_list *qpl; struct gve_tx_iovec *iov_ring; + /* Stats */ + uint64_t errors; + uint64_t packets; + uint64_t bytes; + uint16_t port_id; uint16_t queue_id; @@ -114,6 +119,12 @@ struct gve_rx_queue { /* only valid for GQI_QPL queue format */ struct gve_queue_page_list *qpl; + /* stats */ + uint64_t no_mbufs; + uint64_t errors; + uint64_t packets; + uint64_t bytes; + struct gve_priv *hw; const struct rte_memzone *qres_mz; struct gve_queue_resources *qres; @@ -125,6 +136,7 @@ struct gve_rx_queue { /* Only valid for DQO_RDA queue format */ struct gve_rx_queue *bufq; + uint8_t is_gqi_qpl; }; diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index e29f979a4e..8d3ee35472 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -26,8 +26,10 @@ gve_rx_refill(struct gve_rx_queue *rxq) break; rxq->sw_ring[idx + i] = nmb; } - if (i != nb_alloc) + if (i != nb_alloc) { + rxq->no_mbufs += nb_alloc - i; nb_alloc = i; + } } rxq->nb_avail -= nb_alloc; next_avail += nb_alloc; @@ -88,6 +90,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) uint16_t rx_id = rxq->rx_tail; struct rte_mbuf *rxe; uint16_t nb_rx, len; + uint64_t bytes = 0; uint64_t addr; rxr = rxq->rx_desc_ring; @@ -97,8 +100,10 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) break; - if (rxd->flags_seq & GVE_RXF_ERR) + if (rxd->flags_seq & GVE_RXF_ERR) { + rxq->errors++; continue; + } len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD; rxe = rxq->sw_ring[rx_id]; @@ -137,6 +142,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_id = 0; rx_pkts[nb_rx] = rxe; + bytes += len; } rxq->nb_avail += nb_rx; @@ -145,6 +151,11 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (rxq->nb_avail > rxq->free_thresh) gve_rx_refill(rxq); + if (nb_rx) { + rxq->packets += nb_rx; + rxq->bytes += bytes; + } + return nb_rx; } diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c index 6196c29e24..81778840cf 100644 --- a/drivers/net/gve/gve_tx.c +++ b/drivers/net/gve/gve_tx.c @@ -260,6 +260,7 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct rte_mbuf *tx_pkt, *first; uint16_t sw_id = txq->sw_tail; uint16_t nb_used, i; + uint64_t bytes = 0; uint16_t nb_tx = 0; uint32_t hlen; @@ -355,6 +356,8 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->nb_free -= nb_used; txq->sw_nb_free -= first->nb_segs; tx_tail += nb_used; + + bytes += first->pkt_len; } end_of_tx: @@ -362,6 +365,10 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail); txq->tx_tail = tx_tail; txq->sw_tail = sw_id; + + txq->errors += nb_pkts - nb_tx; + txq->packets += nb_tx; + txq->bytes += bytes; } return nb_tx; @@ -380,6 +387,7 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct rte_mbuf *tx_pkt, *first; uint16_t nb_used, hlen, i; uint64_t ol_flags, addr; + uint64_t bytes = 0; uint16_t nb_tx = 0; txr = txq->tx_desc_ring; @@ -438,12 +446,17 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->nb_free -= nb_used; tx_tail += nb_used; + + bytes += first->pkt_len; } end_of_tx: if (nb_tx) { rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail); txq->tx_tail = tx_tail; + + txq->packets += nb_tx; + txq->bytes += bytes; } return nb_tx;