From patchwork Fri May 17 17:35:16 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stephen Hemminger X-Patchwork-Id: 140181 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9639F44052; Fri, 17 May 2024 19:41:51 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1B76740DCB; Fri, 17 May 2024 19:41:02 +0200 (CEST) Received: from mail-pl1-f171.google.com (mail-pl1-f171.google.com [209.85.214.171]) by mails.dpdk.org (Postfix) with ESMTP id A6FA7409FA for ; Fri, 17 May 2024 19:40:55 +0200 (CEST) Received: by mail-pl1-f171.google.com with SMTP id d9443c01a7336-1ec41d82b8bso19065805ad.2 for ; Fri, 17 May 2024 10:40:55 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=networkplumber-org.20230601.gappssmtp.com; s=20230601; t=1715967655; x=1716572455; darn=dpdk.org; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=lYXDUrkpOH74UzCatXaIUkTb9X4azvwOXkFfc0kL1is=; b=q+PVZu7TD/WiE26AQ3ekZ+SFQR4PFNKz7LJV1+qCfvWOYsCD8Uk+xiA+STk7cTZEYd 1k43IMjw4jMboXH4vy+OdZjzTAXrgph0fjhfokcFIu4bvxQMT2iFXRfk0t9j515qYueu 6pBkjLZQK387/fWsGPraBvAqhOgnA0J/xkx49hkdGg3QLyU+yh3KjfvOlP1D3s7k/9J0 XQQaGQ0yw5CrAhoDrjCBCy9eUQsMOVyBWpE8phM1cAUyZm5u6mH2IbkNwnaFPsmtxmz9 4VLYAMc4h8CO/fIuM5nnxGf9c4fIySL2/ju7aKwGBxDIk1FT9vNmvd66g7Aex5K0bN7I OfQQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1715967655; x=1716572455; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=lYXDUrkpOH74UzCatXaIUkTb9X4azvwOXkFfc0kL1is=; b=MSTf7Jn4iE664YP0IYVSN8amgrBBFL3cnwmHOm/IDHuC5G+MmxuKNtMwU0I7FWwNTq DlX3HauGRXsjU9mChL6Pn4hVZ1C0OWxxkvqmPKN2jd3/bMh20OjjzC3C23xweQ3jXYsJ Mds5NJNlOYz3LqRNyVq0kehmovDVjY+x21TOurhB0eNkO/Fj0hng+FY5umvGaLIlaalc AznLgVT9K6vG8mowoLrspbKURRP5JM8nMo0xd6O1RwmFiy+loAOrE961PtJf3escNX18 iT85IJxuLG+0AkPiVCzGpJUwAXXVn7ta2icHsJJfRbMpCcSvBZ416JZXOlr6LymgYlE1 OjYQ== X-Gm-Message-State: AOJu0Yy+0oYJR9GE4AzPDGVsa2hSGraO+VMVzUKLaiDbdmS/XGqxn2NZ DyOiSZOzglbwJCygvvkAVZMTOigL8TrDSBGR+6Q+Vo9/FdgvW9gQWlFM4rqUb2c2lojWzAvrYRV l75k= X-Google-Smtp-Source: AGHT+IEMx7COjsJticqmW2zTmO8G3HXjTTgUVM2jeYoNS2SkOmATLOAHpsy5lj9EQ9ziDXuSvloGxw== X-Received: by 2002:a17:902:e881:b0:1e3:dfdd:21bd with SMTP id d9443c01a7336-1ef43f51ff5mr217367815ad.55.1715967654849; Fri, 17 May 2024 10:40:54 -0700 (PDT) Received: from hermes.lan (204-195-96-226.wavecable.com. [204.195.96.226]) by smtp.gmail.com with ESMTPSA id d9443c01a7336-1ef0bf31032sm158830485ad.131.2024.05.17.10.40.54 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 17 May 2024 10:40:54 -0700 (PDT) From: Stephen Hemminger To: dev@dpdk.org Cc: Stephen Hemminger , Tetsuya Mukawa Subject: [PATCH v7 9/9] net/null: use generic SW stats Date: Fri, 17 May 2024 10:35:16 -0700 Message-ID: <20240517174044.90952-10-stephen@networkplumber.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240517174044.90952-1-stephen@networkplumber.org> References: <20240510050507.14381-1-stephen@networkplumber.org> <20240517174044.90952-1-stephen@networkplumber.org> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Use the new common code for statistics. This also fixes the bug that this driver was not accounting for bytes. Signed-off-by: Stephen Hemminger --- drivers/net/null/rte_eth_null.c | 80 +++++++-------------------------- 1 file changed, 17 insertions(+), 63 deletions(-) diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c index f4ed3b8a7f..7786982732 100644 --- a/drivers/net/null/rte_eth_null.c +++ b/drivers/net/null/rte_eth_null.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -37,8 +38,8 @@ struct null_queue { struct rte_mempool *mb_pool; struct rte_mbuf *dummy_packet; - RTE_ATOMIC(uint64_t) rx_pkts; - RTE_ATOMIC(uint64_t) tx_pkts; + struct rte_eth_counters tx_stats; + struct rte_eth_counters rx_stats; }; struct pmd_options { @@ -99,11 +100,9 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) bufs[i]->data_len = (uint16_t)packet_size; bufs[i]->pkt_len = packet_size; bufs[i]->port = h->internals->port_id; + rte_eth_count_mbuf(&h->rx_stats, bufs[i]); } - /* NOTE: review for potential ordering optimization */ - rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst); - return i; } @@ -127,11 +126,9 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) bufs[i]->data_len = (uint16_t)packet_size; bufs[i]->pkt_len = packet_size; bufs[i]->port = h->internals->port_id; + rte_eth_count_mbuf(&h->rx_stats, bufs[i]); } - /* NOTE: review for potential ordering optimization */ - rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst); - return i; } @@ -151,11 +148,10 @@ eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) if ((q == NULL) || (bufs == NULL)) return 0; - for (i = 0; i < nb_bufs; i++) + for (i = 0; i < nb_bufs; i++) { + rte_eth_count_mbuf(&h->tx_stats, bufs[i]); rte_pktmbuf_free(bufs[i]); - - /* NOTE: review for potential ordering optimization */ - rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst); + } return i; } @@ -174,12 +170,10 @@ eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) for (i = 0; i < nb_bufs; i++) { rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *), packet_size); + rte_eth_count_mbuf(&h->tx_stats, bufs[i]); rte_pktmbuf_free(bufs[i]); } - /* NOTE: review for potential ordering optimization */ - rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst); - return i; } @@ -322,60 +316,20 @@ eth_dev_info(struct rte_eth_dev *dev, } static int -eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - unsigned int i, num_stats; - unsigned long rx_total = 0, tx_total = 0; - const struct pmd_internals *internal; - - if ((dev == NULL) || (igb_stats == NULL)) - return -EINVAL; - - internal = dev->data->dev_private; - num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS, - RTE_MIN(dev->data->nb_rx_queues, - RTE_DIM(internal->rx_null_queues))); - for (i = 0; i < num_stats; i++) { - /* NOTE: review for atomic access */ - igb_stats->q_ipackets[i] = - internal->rx_null_queues[i].rx_pkts; - rx_total += igb_stats->q_ipackets[i]; - } - - num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS, - RTE_MIN(dev->data->nb_tx_queues, - RTE_DIM(internal->tx_null_queues))); - for (i = 0; i < num_stats; i++) { - /* NOTE: review for atomic access */ - igb_stats->q_opackets[i] = - internal->tx_null_queues[i].tx_pkts; - tx_total += igb_stats->q_opackets[i]; - } - - igb_stats->ipackets = rx_total; - igb_stats->opackets = tx_total; - - return 0; + return rte_eth_counters_stats_get(dev, + offsetof(struct null_queue, tx_stats), + offsetof(struct null_queue, rx_stats), + stats); } static int eth_stats_reset(struct rte_eth_dev *dev) { - unsigned int i; - struct pmd_internals *internal; - - if (dev == NULL) - return -EINVAL; - - internal = dev->data->dev_private; - for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++) - /* NOTE: review for atomic access */ - internal->rx_null_queues[i].rx_pkts = 0; - for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) - /* NOTE: review for atomic access */ - internal->tx_null_queues[i].tx_pkts = 0; - - return 0; + return rte_eth_counters_reset(dev, + offsetof(struct null_queue, tx_stats), + offsetof(struct null_queue, rx_stats)); } static void