From patchwork Wed May 15 23:40:58 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stephen Hemminger X-Patchwork-Id: 140134 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2E1F14403F; Thu, 16 May 2024 01:43:29 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F3EE4406B6; Thu, 16 May 2024 01:42:56 +0200 (CEST) Received: from mail-oi1-f173.google.com (mail-oi1-f173.google.com [209.85.167.173]) by mails.dpdk.org (Postfix) with ESMTP id 6EA5F4067C for ; Thu, 16 May 2024 01:42:50 +0200 (CEST) Received: by mail-oi1-f173.google.com with SMTP id 5614622812f47-3c99e8e372fso3168744b6e.3 for ; Wed, 15 May 2024 16:42:50 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=networkplumber-org.20230601.gappssmtp.com; s=20230601; t=1715816569; x=1716421369; darn=dpdk.org; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=ARyQNNqf2hZNrQietIAGIkeF6zKTVmpArCfhMPOZfO4=; b=EwzdgRKPD16GU77UCe4/CH4jP6IRe3B6P6shX9kFs80gW9i48OGR0AGzq6YIYoZ72g 2zpbCDLyimA1Qi+ZNAJPQte8mZNn9anm4ZJKPSy4muGRzpvBmpvoS/aQxjs56Xgz1UDI rIVPhEdFyV2E67OFY7N9o8Q0dqmUppksymna3HL2oq1hzuPeAhIqJi5ntEzu8JI5qSez 22a4pcctdd1I67GzU/Z2TQ8sDJFbsL4/Tv1WTY/tPlINb6O7Lf9KQm+L/F+d6mgdZV+W dFBynrj6G7Q2MYqVNv9USHVdqzX7QfqfyoY/dKYw6jMd2hdeBWKTihRxAku0DjSo2wm1 7Mzg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1715816569; x=1716421369; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=ARyQNNqf2hZNrQietIAGIkeF6zKTVmpArCfhMPOZfO4=; b=MqXySSssf0+eQ8bbNwE0Q71r6aru7vJm6dyrcjlDG2XIiOReWZ1Qa4+jVNhEM/gnnQ 5Go6W7DctUUR8IDLf/hj20Y+2h5/0TX5XicUB+REaH6ImmhbsUUiWHNr5tQLpOy17wbR uy0PJ2J/GR1o7oJp6SfXx8USfKEAIAyN/G5WOKISTRj1YBHl4LaWBxMTz6Gbp3fRMnQx ZI28HBiWt4YZwOOcJ6F+dcKmLGrEXH1skl0/9SYEPlFGCcYTvE8bz9RgmKKfLk62Jggc K77AuvQbJbye3uQaSgm8gb09CzMlQzxla02BBcznxxfHIqTeDx+kBz2FPzukPKac+hq9 Ed/g== X-Gm-Message-State: AOJu0YwHHieGH8U4tx5EWuRlO5/CURx5mqbatr9+NKNolc1H0FsZ63QF tTIMLv/pgUcnutTs4vuWwoskwM4iD0sn2QcWOGHuzz/mPQfqCv9qJbxgwWPYGvUluBOTUW8KmRO +K+Q= X-Google-Smtp-Source: AGHT+IEFFMyN2lYYi9Q78aXI3c0GfXqEhbx4CwLW4fdoNLknAkBYAEmHkkB/bd+4qUJlskEmWrmJ7w== X-Received: by 2002:a05:6808:3099:b0:3c7:5146:c4e5 with SMTP id 5614622812f47-3c9970b0ff3mr21595709b6e.41.1715816569490; Wed, 15 May 2024 16:42:49 -0700 (PDT) Received: from hermes.local (204-195-96-226.wavecable.com. [204.195.96.226]) by smtp.gmail.com with ESMTPSA id d2e1a72fcca58-6f67b0cd494sm958479b3a.161.2024.05.15.16.42.48 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Wed, 15 May 2024 16:42:48 -0700 (PDT) From: Stephen Hemminger To: dev@dpdk.org Cc: Stephen Hemminger , Bruce Richardson Subject: [PATCH v4 6/8] net/ring: use generic SW stats Date: Wed, 15 May 2024 16:40:58 -0700 Message-ID: <20240515234234.5015-7-stephen@networkplumber.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240515234234.5015-1-stephen@networkplumber.org> References: <20240510050507.14381-1-stephen@networkplumber.org> <20240515234234.5015-1-stephen@networkplumber.org> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Use generic per-queue infrastructure. This also fixes bug where ring code was not accounting for bytes. Signed-off-by: Stephen Hemminger --- drivers/net/ring/rte_eth_ring.c | 71 +++++++++++++-------------------- 1 file changed, 28 insertions(+), 43 deletions(-) diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index 48953dd7a0..85f14dd679 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -7,6 +7,7 @@ #include "rte_eth_ring.h" #include #include +#include #include #include #include @@ -44,8 +45,8 @@ enum dev_action { struct ring_queue { struct rte_ring *rng; - uint64_t rx_pkts; - uint64_t tx_pkts; + + struct rte_eth_counters stats; }; struct pmd_internals { @@ -77,12 +78,13 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { void **ptrs = (void *)&bufs[0]; struct ring_queue *r = q; - const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, - ptrs, nb_bufs, NULL); - if (r->rng->flags & RING_F_SC_DEQ) - r->rx_pkts += nb_rx; - else - __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED); + uint16_t i, nb_rx; + + nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, ptrs, nb_bufs, NULL); + + for (i = 0; i < nb_rx; i++) + rte_eth_count_mbuf(&r->stats, bufs[i]); + return nb_rx; } @@ -90,13 +92,20 @@ static uint16_t eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { void **ptrs = (void *)&bufs[0]; + uint32_t *sizes; struct ring_queue *r = q; - const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, - ptrs, nb_bufs, NULL); - if (r->rng->flags & RING_F_SP_ENQ) - r->tx_pkts += nb_tx; - else - __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED); + uint16_t i, nb_tx; + + sizes = alloca(sizeof(uint32_t) * nb_bufs); + + for (i = 0; i < nb_bufs; i++) + sizes[i] = rte_pktmbuf_pkt_len(bufs[i]); + + nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, ptrs, nb_bufs, NULL); + + for (i = 0; i < nb_tx; i++) + rte_eth_count_packet(&r->stats, sizes[i]); + return nb_tx; } @@ -193,40 +202,16 @@ eth_dev_info(struct rte_eth_dev *dev, static int eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - unsigned int i; - unsigned long rx_total = 0, tx_total = 0; - const struct pmd_internals *internal = dev->data->dev_private; - - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && - i < dev->data->nb_rx_queues; i++) { - stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts; - rx_total += stats->q_ipackets[i]; - } - - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && - i < dev->data->nb_tx_queues; i++) { - stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts; - tx_total += stats->q_opackets[i]; - } - - stats->ipackets = rx_total; - stats->opackets = tx_total; - - return 0; + return rte_eth_counters_stats_get(dev, offsetof(struct ring_queue, stats), + offsetof(struct ring_queue, stats), + stats); } static int eth_stats_reset(struct rte_eth_dev *dev) { - unsigned int i; - struct pmd_internals *internal = dev->data->dev_private; - - for (i = 0; i < dev->data->nb_rx_queues; i++) - internal->rx_ring_queues[i].rx_pkts = 0; - for (i = 0; i < dev->data->nb_tx_queues; i++) - internal->tx_ring_queues[i].tx_pkts = 0; - - return 0; + return rte_eth_counters_reset(dev, offsetof(struct ring_queue, stats), + offsetof(struct ring_queue, stats)); } static void