From patchwork Fri May 17 00:12:07 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stephen Hemminger X-Patchwork-Id: 140155 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 60FAC44044; Fri, 17 May 2024 02:14:12 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 55D6E40A6D; Fri, 17 May 2024 02:13:30 +0200 (CEST) Received: from mail-pj1-f53.google.com (mail-pj1-f53.google.com [209.85.216.53]) by mails.dpdk.org (Postfix) with ESMTP id CAC1340649 for ; Fri, 17 May 2024 02:13:18 +0200 (CEST) Received: by mail-pj1-f53.google.com with SMTP id 98e67ed59e1d1-2a2d82537efso584700a91.2 for ; Thu, 16 May 2024 17:13:18 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=networkplumber-org.20230601.gappssmtp.com; s=20230601; t=1715904798; x=1716509598; darn=dpdk.org; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=ARyQNNqf2hZNrQietIAGIkeF6zKTVmpArCfhMPOZfO4=; b=0qmu6H4YkEu4Ld0jWzNcvjz5Az076zdwmwkPKa5hK+K9ubQru3qfYScyOaGzeSDqh6 GJ1dU4lwmZiv75tCgTpRvFjaGf6MFxpOcz5tB/hGyqgmvX6fAggybvcweeFYQ1RhdgOl WM4e1b7dBAW7WJ4CjHkwbnQtGKg1TiLTWE4iK7H05zFE71NUCJ5fPYAT+7x63g1cxBeK sN8d+UbtsoMvsq1lOLGOc0TZT+mJXqwGQrbBI/RlTG+PSnSMG/o4QNy0o6bf8qFXvGeC AEt7BYr19KQLfTRcMFfjWmLQqYIOWFVAWolVvJ3Oo624r1riykkPgexFn8OSOrmuRQdC T7WQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1715904798; x=1716509598; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=ARyQNNqf2hZNrQietIAGIkeF6zKTVmpArCfhMPOZfO4=; b=c4/OVVkeXR2dV2vPuSuJb5j4uYuCbqj5KHkp+E5H1ZD5SRoCJ49Ow9v3veLnlOeOZp DmcfjdzZuf6681evtkuVuSFfEuZCT0COArwuo/VaA7t5VX08q0/AP2hwE3lCj3jyxJ4r AWD/nhtk7XtCRd87j7eb5DcECo2Ysie6QXC/GUub3JYbAhwFlXE4PlZnFjO+rzujtWYk 7AhThJLzpM0AxkLTHHV8U5do63NJCOWzp4KWqmI7CdAX4MlslN87AZPOnvqgx1Mjwjem Oq4NY1LZhVG9M+9UtiV/SUkuPiEV3eUZ+Kldrb6Dn5Y2nx9LASbp8Zsae9HF6Jt/wdPE e05w== X-Gm-Message-State: AOJu0YyyTXNNthe/t4wPjDVfhOgB8IkqkwmmW69ldieYh3F5iCndKxt3 NInc43sB6qp6J4sjBJ4lH5nDhbpWod4lwGQZg+0wvVrpPQz32p/+cemrnwpN8IUfoU4vxVj/Chp IVOQ= X-Google-Smtp-Source: AGHT+IEG8cMvwGmBlnWAwJKC09cPlj3tijsNnnZmfc8Ch4kH7cSRaPn5U16FEy5fUOVrfVBwaVE9KA== X-Received: by 2002:a17:90a:d09:b0:2af:b977:363a with SMTP id 98e67ed59e1d1-2b6cd1e352cmr20488185a91.43.1715904797989; Thu, 16 May 2024 17:13:17 -0700 (PDT) Received: from hermes.local (204-195-96-226.wavecable.com. [204.195.96.226]) by smtp.gmail.com with ESMTPSA id 98e67ed59e1d1-2b9f5820ce9sm3337495a91.56.2024.05.16.17.13.17 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Thu, 16 May 2024 17:13:17 -0700 (PDT) From: Stephen Hemminger To: dev@dpdk.org Cc: Stephen Hemminger , Bruce Richardson Subject: [PATCH v6 7/9] net/ring: use generic SW stats Date: Thu, 16 May 2024 17:12:07 -0700 Message-ID: <20240517001302.65514-8-stephen@networkplumber.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240517001302.65514-1-stephen@networkplumber.org> References: <20240510050507.14381-1-stephen@networkplumber.org> <20240517001302.65514-1-stephen@networkplumber.org> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Use generic per-queue infrastructure. This also fixes bug where ring code was not accounting for bytes. Signed-off-by: Stephen Hemminger --- drivers/net/ring/rte_eth_ring.c | 71 +++++++++++++-------------------- 1 file changed, 28 insertions(+), 43 deletions(-) diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index 48953dd7a0..85f14dd679 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -7,6 +7,7 @@ #include "rte_eth_ring.h" #include #include +#include #include #include #include @@ -44,8 +45,8 @@ enum dev_action { struct ring_queue { struct rte_ring *rng; - uint64_t rx_pkts; - uint64_t tx_pkts; + + struct rte_eth_counters stats; }; struct pmd_internals { @@ -77,12 +78,13 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { void **ptrs = (void *)&bufs[0]; struct ring_queue *r = q; - const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, - ptrs, nb_bufs, NULL); - if (r->rng->flags & RING_F_SC_DEQ) - r->rx_pkts += nb_rx; - else - __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED); + uint16_t i, nb_rx; + + nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, ptrs, nb_bufs, NULL); + + for (i = 0; i < nb_rx; i++) + rte_eth_count_mbuf(&r->stats, bufs[i]); + return nb_rx; } @@ -90,13 +92,20 @@ static uint16_t eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { void **ptrs = (void *)&bufs[0]; + uint32_t *sizes; struct ring_queue *r = q; - const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, - ptrs, nb_bufs, NULL); - if (r->rng->flags & RING_F_SP_ENQ) - r->tx_pkts += nb_tx; - else - __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED); + uint16_t i, nb_tx; + + sizes = alloca(sizeof(uint32_t) * nb_bufs); + + for (i = 0; i < nb_bufs; i++) + sizes[i] = rte_pktmbuf_pkt_len(bufs[i]); + + nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, ptrs, nb_bufs, NULL); + + for (i = 0; i < nb_tx; i++) + rte_eth_count_packet(&r->stats, sizes[i]); + return nb_tx; } @@ -193,40 +202,16 @@ eth_dev_info(struct rte_eth_dev *dev, static int eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - unsigned int i; - unsigned long rx_total = 0, tx_total = 0; - const struct pmd_internals *internal = dev->data->dev_private; - - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && - i < dev->data->nb_rx_queues; i++) { - stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts; - rx_total += stats->q_ipackets[i]; - } - - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && - i < dev->data->nb_tx_queues; i++) { - stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts; - tx_total += stats->q_opackets[i]; - } - - stats->ipackets = rx_total; - stats->opackets = tx_total; - - return 0; + return rte_eth_counters_stats_get(dev, offsetof(struct ring_queue, stats), + offsetof(struct ring_queue, stats), + stats); } static int eth_stats_reset(struct rte_eth_dev *dev) { - unsigned int i; - struct pmd_internals *internal = dev->data->dev_private; - - for (i = 0; i < dev->data->nb_rx_queues; i++) - internal->rx_ring_queues[i].rx_pkts = 0; - for (i = 0; i < dev->data->nb_tx_queues; i++) - internal->tx_ring_queues[i].tx_pkts = 0; - - return 0; + return rte_eth_counters_reset(dev, offsetof(struct ring_queue, stats), + offsetof(struct ring_queue, stats)); } static void