From patchwork Fri Jul 23 13:15:14 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 96257 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DBD4BA0C46; Fri, 23 Jul 2021 15:16:33 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 246B9410EE; Fri, 23 Jul 2021 15:16:02 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id 5AA5D41135 for ; Fri, 23 Jul 2021 15:16:01 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 122) id 307477F6D5; Fri, 23 Jul 2021 16:16:01 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on shelob.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD, URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.2 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id A67A37F6CD; Fri, 23 Jul 2021 16:15:24 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru A67A37F6CD Authentication-Results: shelob.oktetlabs.ru/A67A37F6CD; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: dev@dpdk.org Cc: David Marchand , Ivan Ilchenko , Andy Moreton Date: Fri, 23 Jul 2021 16:15:14 +0300 Message-Id: <20210723131515.2317168-11-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20210723131515.2317168-1-andrew.rybchenko@oktetlabs.ru> References: <20210604144225.287678-1-andrew.rybchenko@oktetlabs.ru> <20210723131515.2317168-1-andrew.rybchenko@oktetlabs.ru> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v3 10/11] net/sfc: add xstats for Rx/Tx doorbells X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Ivan Ilchenko Rx/Tx doorbells statistics are collected in software and available per queue. These stats are useful for performance investigation. Signed-off-by: Ivan Ilchenko Signed-off-by: Andrew Rybchenko Reviewed-by: Andy Moreton --- drivers/net/sfc/meson.build | 1 + drivers/net/sfc/sfc.c | 16 + drivers/net/sfc/sfc.h | 9 + drivers/net/sfc/sfc_dp.h | 10 + drivers/net/sfc/sfc_ef10.h | 3 +- drivers/net/sfc/sfc_ef100_rx.c | 1 + drivers/net/sfc/sfc_ef100_tx.c | 1 + drivers/net/sfc/sfc_ef10_essb_rx.c | 3 +- drivers/net/sfc/sfc_ef10_rx.c | 3 +- drivers/net/sfc/sfc_ef10_tx.c | 1 + drivers/net/sfc/sfc_ethdev.c | 124 +++++-- drivers/net/sfc/sfc_port.c | 10 +- drivers/net/sfc/sfc_rx.c | 1 + drivers/net/sfc/sfc_sw_stats.c | 572 +++++++++++++++++++++++++++++ drivers/net/sfc/sfc_sw_stats.h | 49 +++ drivers/net/sfc/sfc_tx.c | 4 +- 16 files changed, 772 insertions(+), 36 deletions(-) create mode 100644 drivers/net/sfc/sfc_sw_stats.c create mode 100644 drivers/net/sfc/sfc_sw_stats.h diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build index 4625859077..a912cdccfa 100644 --- a/drivers/net/sfc/meson.build +++ b/drivers/net/sfc/meson.build @@ -70,6 +70,7 @@ sources = files( 'sfc.c', 'sfc_mcdi.c', 'sfc_sriov.c', + 'sfc_sw_stats.c', 'sfc_intr.c', 'sfc_ev.c', 'sfc_port.c', diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c index 4097cf39de..274a98e228 100644 --- a/drivers/net/sfc/sfc.c +++ b/drivers/net/sfc/sfc.c @@ -24,6 +24,7 @@ #include "sfc_tx.h" #include "sfc_kvargs.h" #include "sfc_tweak.h" +#include "sfc_sw_stats.h" int @@ -636,10 +637,17 @@ sfc_configure(struct sfc_adapter *sa) if (rc != 0) goto fail_tx_configure; + rc = sfc_sw_xstats_configure(sa); + if (rc != 0) + goto fail_sw_xstats_configure; + sa->state = SFC_ADAPTER_CONFIGURED; sfc_log_init(sa, "done"); return 0; +fail_sw_xstats_configure: + sfc_tx_close(sa); + fail_tx_configure: sfc_rx_close(sa); @@ -666,6 +674,7 @@ sfc_close(struct sfc_adapter *sa) SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); sa->state = SFC_ADAPTER_CLOSING; + sfc_sw_xstats_close(sa); sfc_tx_close(sa); sfc_rx_close(sa); sfc_port_close(sa); @@ -891,6 +900,10 @@ sfc_attach(struct sfc_adapter *sa) sfc_flow_init(sa); + rc = sfc_sw_xstats_init(sa); + if (rc != 0) + goto fail_sw_xstats_init; + /* * Create vSwitch to be able to use VFs when PF is not started yet * as DPDK port. VFs should be able to talk to each other even @@ -906,6 +919,9 @@ sfc_attach(struct sfc_adapter *sa) return 0; fail_sriov_vswitch_create: + sfc_sw_xstats_close(sa); + +fail_sw_xstats_init: sfc_flow_fini(sa); sfc_mae_detach(sa); diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index 58b8c2c2ad..331e06bac6 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -217,6 +217,14 @@ struct sfc_counter_rxq { struct rte_mempool *mp; }; +struct sfc_sw_xstats { + uint64_t *reset_vals; + + rte_spinlock_t queues_bitmap_lock; + void *queues_bitmap_mem; + struct rte_bitmap *queues_bitmap; +}; + /* Adapter private data */ struct sfc_adapter { /* @@ -249,6 +257,7 @@ struct sfc_adapter { struct sfc_sriov sriov; struct sfc_intr intr; struct sfc_port port; + struct sfc_sw_xstats sw_xstats; struct sfc_filter filter; struct sfc_mae mae; diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h index 61c1a3fbac..7fd8f34b0f 100644 --- a/drivers/net/sfc/sfc_dp.h +++ b/drivers/net/sfc/sfc_dp.h @@ -42,6 +42,16 @@ enum sfc_dp_type { /** Datapath queue run-time information */ struct sfc_dp_queue { + /* + * Typically the structure is located at the end of Rx/Tx queue + * data structure and not used on datapath. So, it is not a + * problem to have extra fields even if not used. However, + * put stats at top of the structure to be closer to fields + * used on datapath or reap to have more chances to be cache-hot. + */ + uint32_t rx_dbells; + uint32_t tx_dbells; + uint16_t port_id; uint16_t queue_id; struct rte_pci_addr pci_addr; diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h index ad4c1fdbef..e9bb72e28b 100644 --- a/drivers/net/sfc/sfc_ef10.h +++ b/drivers/net/sfc/sfc_ef10.h @@ -99,7 +99,7 @@ sfc_ef10_ev_present(const efx_qword_t ev) static inline void sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added, - unsigned int ptr_mask) + unsigned int ptr_mask, uint32_t *dbell_counter) { efx_dword_t dword; @@ -118,6 +118,7 @@ sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added, * operations that follow it (i.e. doorbell write). */ rte_write32(dword.ed_u32[0], doorbell); + (*dbell_counter)++; } static inline void diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c index 10c74aa118..d4cb96881c 100644 --- a/drivers/net/sfc/sfc_ef100_rx.c +++ b/drivers/net/sfc/sfc_ef100_rx.c @@ -119,6 +119,7 @@ sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added) * operations that follow it (i.e. doorbell write). */ rte_write32(dword.ed_u32[0], rxq->doorbell); + rxq->dp.dpq.rx_dbells++; sfc_ef100_rx_debug(rxq, "RxQ pushed doorbell at pidx %u (added=%u)", EFX_DWORD_FIELD(dword, ERF_GZ_RX_RING_PIDX), diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c index f9ad6f7b73..522e9a0d34 100644 --- a/drivers/net/sfc/sfc_ef100_tx.c +++ b/drivers/net/sfc/sfc_ef100_tx.c @@ -489,6 +489,7 @@ sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added) * operations that follow it (i.e. doorbell write). */ rte_write32(dword.ed_u32[0], txq->doorbell); + txq->dp.dpq.tx_dbells++; sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)", EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX), diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c index 3c246eb149..991329e86f 100644 --- a/drivers/net/sfc/sfc_ef10_essb_rx.c +++ b/drivers/net/sfc/sfc_ef10_essb_rx.c @@ -220,7 +220,8 @@ sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq) SFC_ASSERT(rxq->added != added); rxq->added = added; - sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask); + sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask, + &rxq->dp.dpq.rx_dbells); } static bool diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c index 2b4393d232..49a7d4fb42 100644 --- a/drivers/net/sfc/sfc_ef10_rx.c +++ b/drivers/net/sfc/sfc_ef10_rx.c @@ -171,7 +171,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) SFC_ASSERT(rxq->added != added); rxq->added = added; - sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask); + sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask, + &rxq->dp.dpq.rx_dbells); } static void diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index a8d34ead33..ed43adb4ca 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -248,6 +248,7 @@ sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added, rte_io_wmb(); *(volatile efsys_uint128_t *)txq->doorbell = oword.eo_u128[0]; + txq->dp.dpq.tx_dbells++; } static unsigned int diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index dd7e5c253a..2db0d000c3 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -28,6 +28,10 @@ #include "sfc_flow.h" #include "sfc_dp.h" #include "sfc_dp_rx.h" +#include "sfc_sw_stats.h" + +#define SFC_XSTAT_ID_INVALID_VAL UINT64_MAX +#define SFC_XSTAT_ID_INVALID_NAME '\0' uint32_t sfc_logtype_driver; @@ -714,29 +718,49 @@ sfc_stats_reset(struct rte_eth_dev *dev) if (rc != 0) sfc_err(sa, "failed to reset statistics (rc = %d)", rc); + sfc_sw_xstats_reset(sa); + sfc_adapter_unlock(sa); SFC_ASSERT(rc >= 0); return -rc; } +static unsigned int +sfc_xstats_get_nb_supported(struct sfc_adapter *sa) +{ + struct sfc_port *port = &sa->port; + unsigned int nb_supported; + + sfc_adapter_lock(sa); + nb_supported = port->mac_stats_nb_supported + + sfc_sw_xstats_get_nb_supported(sa); + sfc_adapter_unlock(sa); + + return nb_supported; +} + static int sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int xstats_count) { struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); - struct sfc_port *port = &sa->port; unsigned int nb_written = 0; - unsigned int nb_supp; + unsigned int nb_supported = 0; + int rc; - if (unlikely(xstats == NULL)) { - sfc_adapter_lock(sa); - nb_supp = port->mac_stats_nb_supported; - sfc_adapter_unlock(sa); - return nb_supp; - } + if (unlikely(xstats == NULL)) + return sfc_xstats_get_nb_supported(sa); + + rc = sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written); + if (rc < 0) + return rc; - return sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written); + nb_supported = rc; + sfc_sw_xstats_get_vals(sa, xstats, xstats_count, &nb_written, + &nb_supported); + + return nb_supported; } static int @@ -748,24 +772,31 @@ sfc_xstats_get_names(struct rte_eth_dev *dev, struct sfc_port *port = &sa->port; unsigned int i; unsigned int nstats = 0; + unsigned int nb_written = 0; + int ret; - if (unlikely(xstats_names == NULL)) { - sfc_adapter_lock(sa); - nstats = port->mac_stats_nb_supported; - sfc_adapter_unlock(sa); - return nstats; - } + if (unlikely(xstats_names == NULL)) + return sfc_xstats_get_nb_supported(sa); for (i = 0; i < EFX_MAC_NSTATS; ++i) { if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { - if (nstats < xstats_count) + if (nstats < xstats_count) { strlcpy(xstats_names[nstats].name, efx_mac_stat_name(sa->nic, i), sizeof(xstats_names[0].name)); + nb_written++; + } nstats++; } } + ret = sfc_sw_xstats_get_names(sa, xstats_names, xstats_count, + &nb_written, &nstats); + if (ret != 0) { + SFC_ASSERT(ret < 0); + return ret; + } + return nstats; } @@ -774,11 +805,35 @@ sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, uint64_t *values, unsigned int n) { struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_port *port = &sa->port; + unsigned int nb_supported; + unsigned int i; + int rc; if (unlikely(ids == NULL || values == NULL)) return -EINVAL; - return sfc_port_get_mac_stats_by_id(sa, ids, values, n); + /* + * Values array could be filled in nonsequential order. Fill values with + * constant indicating invalid ID first. + */ + for (i = 0; i < n; i++) + values[i] = SFC_XSTAT_ID_INVALID_VAL; + + rc = sfc_port_get_mac_stats_by_id(sa, ids, values, n); + if (rc != 0) + return rc; + + nb_supported = port->mac_stats_nb_supported; + sfc_sw_xstats_get_vals_by_id(sa, ids, values, n, &nb_supported); + + /* Return number of written stats before invalid ID is encountered. */ + for (i = 0; i < n; i++) { + if (values[i] == SFC_XSTAT_ID_INVALID_VAL) + return i; + } + + return n; } static int @@ -790,18 +845,23 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, struct sfc_port *port = &sa->port; unsigned int nb_supported; unsigned int i; + int ret; if (unlikely(xstats_names == NULL && ids != NULL) || unlikely(xstats_names != NULL && ids == NULL)) return -EINVAL; - sfc_adapter_lock(sa); + if (unlikely(xstats_names == NULL && ids == NULL)) + return sfc_xstats_get_nb_supported(sa); - if (unlikely(xstats_names == NULL && ids == NULL)) { - nb_supported = port->mac_stats_nb_supported; - sfc_adapter_unlock(sa); - return nb_supported; - } + /* + * Names array could be filled in nonsequential order. Fill names with + * string indicating invalid ID first. + */ + for (i = 0; i < size; i++) + xstats_names[i].name[0] = SFC_XSTAT_ID_INVALID_NAME; + + sfc_adapter_lock(sa); SFC_ASSERT(port->mac_stats_nb_supported <= RTE_DIM(port->mac_stats_by_id)); @@ -812,14 +872,26 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, efx_mac_stat_name(sa->nic, port->mac_stats_by_id[ids[i]]), sizeof(xstats_names[0].name)); - } else { - sfc_adapter_unlock(sa); - return i; } } + nb_supported = port->mac_stats_nb_supported; + sfc_adapter_unlock(sa); + ret = sfc_sw_xstats_get_names_by_id(sa, ids, xstats_names, size, + &nb_supported); + if (ret != 0) { + SFC_ASSERT(ret < 0); + return ret; + } + + /* Return number of written names before invalid ID is encountered. */ + for (i = 0; i < size; i++) { + if (xstats_names[i].name[0] == SFC_XSTAT_ID_INVALID_NAME) + return i; + } + return size; } diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c index f6689a17c0..adb2b2cb81 100644 --- a/drivers/net/sfc/sfc_port.c +++ b/drivers/net/sfc/sfc_port.c @@ -7,6 +7,8 @@ * for Solarflare) and Solarflare Communications, Inc. */ +#include + #include "efx.h" #include "sfc.h" @@ -701,15 +703,11 @@ sfc_port_get_mac_stats_by_id(struct sfc_adapter *sa, const uint64_t *ids, RTE_DIM(port->mac_stats_by_id)); for (i = 0; i < n; i++) { - if (ids[i] < port->mac_stats_nb_supported) { + if (ids[i] < port->mac_stats_nb_supported) values[i] = mac_stats[port->mac_stats_by_id[ids[i]]]; - } else { - ret = i; - goto unlock; - } } - ret = n; + ret = 0; unlock: sfc_adapter_unlock(sa); diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c index f6a8ac68e8..280e8a61f9 100644 --- a/drivers/net/sfc/sfc_rx.c +++ b/drivers/net/sfc/sfc_rx.c @@ -138,6 +138,7 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq) SFC_ASSERT(added != rxq->added); rxq->added = added; efx_rx_qpush(rxq->common, added, &rxq->pushed); + rxq->dp.dpq.rx_dbells++; } static uint64_t diff --git a/drivers/net/sfc/sfc_sw_stats.c b/drivers/net/sfc/sfc_sw_stats.c new file mode 100644 index 0000000000..8489b603f5 --- /dev/null +++ b/drivers/net/sfc/sfc_sw_stats.c @@ -0,0 +1,572 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2021 Xilinx, Inc. + */ +#include +#include + +#include "sfc.h" +#include "sfc_rx.h" +#include "sfc_tx.h" +#include "sfc_sw_stats.h" + +enum sfc_sw_stats_type { + SFC_SW_STATS_RX, + SFC_SW_STATS_TX, +}; + +typedef uint64_t sfc_get_sw_xstat_val_t(struct sfc_adapter *sa, uint16_t qid); + +struct sfc_sw_xstat_descr { + const char *name; + enum sfc_sw_stats_type type; + sfc_get_sw_xstat_val_t *get_val; +}; + +static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_rx_dbells; +static uint64_t +sfc_get_sw_xstat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid) +{ + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + struct sfc_rxq_info *rxq_info; + + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid); + if (rxq_info->state & SFC_RXQ_INITIALIZED) + return rxq_info->dp->dpq.rx_dbells; + return 0; +} + +static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_tx_dbells; +static uint64_t +sfc_get_sw_xstat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid) +{ + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + struct sfc_txq_info *txq_info; + + txq_info = sfc_txq_info_by_ethdev_qid(sas, qid); + if (txq_info->state & SFC_TXQ_INITIALIZED) + return txq_info->dp->dpq.tx_dbells; + return 0; +} + +struct sfc_sw_xstat_descr sfc_sw_xstats[] = { + { + .name = "dbells", + .type = SFC_SW_STATS_RX, + .get_val = sfc_get_sw_xstat_val_rx_dbells, + }, + { + .name = "dbells", + .type = SFC_SW_STATS_TX, + .get_val = sfc_get_sw_xstat_val_tx_dbells, + } +}; + +static int +sfc_sw_stat_get_name(struct sfc_adapter *sa, + const struct sfc_sw_xstat_descr *sw_xstat, char *name, + size_t name_size, unsigned int id_off) +{ + const char *prefix; + int ret; + + switch (sw_xstat->type) { + case SFC_SW_STATS_RX: + prefix = "rx"; + break; + case SFC_SW_STATS_TX: + prefix = "tx"; + break; + default: + sfc_err(sa, "%s: unknown software statistics type %d", + __func__, sw_xstat->type); + return -EINVAL; + } + + if (id_off == 0) { + ret = snprintf(name, name_size, "%s_%s", prefix, + sw_xstat->name); + if (ret < 0 || ret >= (int)name_size) { + sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d", + __func__, prefix, sw_xstat->name, ret); + return ret > 0 ? -EINVAL : ret; + } + } else { + uint16_t qid = id_off - 1; + ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid, + sw_xstat->name); + if (ret < 0 || ret >= (int)name_size) { + sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d", + __func__, prefix, qid, sw_xstat->name, ret); + return ret > 0 ? -EINVAL : ret; + } + } + + return 0; +} + +static unsigned int +sfc_sw_stat_get_queue_count(struct sfc_adapter *sa, + const struct sfc_sw_xstat_descr *sw_xstat) +{ + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + + switch (sw_xstat->type) { + case SFC_SW_STATS_RX: + return sas->ethdev_rxq_count; + case SFC_SW_STATS_TX: + return sas->ethdev_txq_count; + default: + sfc_err(sa, "%s: unknown software statistics type %d", + __func__, sw_xstat->type); + return 0; + } +} + +static unsigned int +sfc_sw_xstat_per_queue_get_count(unsigned int nb_queues) +{ + /* Take into account the accumulative xstat of all queues */ + return nb_queues > 0 ? 1 + nb_queues : 0; +} + +static unsigned int +sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa, + const struct sfc_sw_xstat_descr *sw_xstat) +{ + unsigned int nb_queues; + + nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat); + return sfc_sw_xstat_per_queue_get_count(nb_queues); +} + +static int +sfc_sw_stat_get_names(struct sfc_adapter *sa, + const struct sfc_sw_xstat_descr *sw_xstat, + struct rte_eth_xstat_name *xstats_names, + unsigned int xstats_names_sz, + unsigned int *nb_written, + unsigned int *nb_supported) +{ + const size_t name_size = sizeof(xstats_names[0].name); + unsigned int id_base = *nb_supported; + unsigned int nb_queues; + unsigned int qid; + int rc; + + nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat); + if (nb_queues == 0) + return 0; + *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues); + + /* + * The order of each software xstat type is the accumulative xstat + * followed by per-queue xstats. + */ + if (*nb_written < xstats_names_sz) { + rc = sfc_sw_stat_get_name(sa, sw_xstat, + xstats_names[*nb_written].name, + name_size, *nb_written - id_base); + if (rc != 0) + return rc; + (*nb_written)++; + } + + for (qid = 0; qid < nb_queues; ++qid) { + if (*nb_written < xstats_names_sz) { + rc = sfc_sw_stat_get_name(sa, sw_xstat, + xstats_names[*nb_written].name, + name_size, *nb_written - id_base); + if (rc != 0) + return rc; + (*nb_written)++; + } + } + + return 0; +} + +static int +sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa, + const struct sfc_sw_xstat_descr *sw_xstat, + const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, + unsigned int size, + unsigned int *nb_supported) +{ + const size_t name_size = sizeof(xstats_names[0].name); + unsigned int id_base = *nb_supported; + unsigned int nb_queues; + unsigned int i; + int rc; + + nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat); + if (nb_queues == 0) + return 0; + *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues); + + /* + * The order of each software xstat type is the accumulative xstat + * followed by per-queue xstats. + */ + for (i = 0; i < size; i++) { + if (id_base <= ids[i] && ids[i] <= id_base + nb_queues) { + rc = sfc_sw_stat_get_name(sa, sw_xstat, + xstats_names[i].name, + name_size, ids[i] - id_base); + if (rc != 0) + return rc; + } + } + + return 0; +} + +static void +sfc_sw_xstat_get_values(struct sfc_adapter *sa, + const struct sfc_sw_xstat_descr *sw_xstat, + struct rte_eth_xstat *xstats, + unsigned int xstats_size, + unsigned int *nb_written, + unsigned int *nb_supported) +{ + unsigned int qid; + uint64_t value; + struct rte_eth_xstat *accum_xstat; + bool count_accum_value = false; + unsigned int nb_queues; + + nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat); + if (nb_queues == 0) + return; + *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues); + + /* + * The order of each software xstat type is the accumulative xstat + * followed by per-queue xstats. + */ + if (*nb_written < xstats_size) { + count_accum_value = true; + accum_xstat = &xstats[*nb_written]; + xstats[*nb_written].id = *nb_written; + xstats[*nb_written].value = 0; + (*nb_written)++; + } + + for (qid = 0; qid < nb_queues; ++qid) { + value = sw_xstat->get_val(sa, qid); + + if (*nb_written < xstats_size) { + xstats[*nb_written].id = *nb_written; + xstats[*nb_written].value = value; + (*nb_written)++; + } + + if (count_accum_value) + accum_xstat->value += value; + } +} + +static void +sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa, + const struct sfc_sw_xstat_descr *sw_xstat, + const uint64_t *ids, + uint64_t *values, + unsigned int ids_size, + unsigned int *nb_supported) +{ + rte_spinlock_t *bmp_lock = &sa->sw_xstats.queues_bitmap_lock; + struct rte_bitmap *bmp = sa->sw_xstats.queues_bitmap; + unsigned int id_base = *nb_supported; + bool count_accum_value = false; + unsigned int accum_value_idx; + uint64_t accum_value = 0; + unsigned int i, qid; + unsigned int nb_queues; + + + rte_spinlock_lock(bmp_lock); + rte_bitmap_reset(bmp); + + nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat); + if (nb_queues == 0) + goto unlock; + *nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues); + + /* + * The order of each software xstat type is the accumulative xstat + * followed by per-queue xstats. + */ + for (i = 0; i < ids_size; i++) { + if (id_base <= ids[i] && ids[i] <= (id_base + nb_queues)) { + if (ids[i] == id_base) { /* Accumulative value */ + count_accum_value = true; + accum_value_idx = i; + continue; + } + qid = ids[i] - id_base - 1; + values[i] = sw_xstat->get_val(sa, qid); + accum_value += values[i]; + + rte_bitmap_set(bmp, qid); + } + } + + if (count_accum_value) { + for (qid = 0; qid < nb_queues; ++qid) { + if (rte_bitmap_get(bmp, qid) != 0) + continue; + values[accum_value_idx] += sw_xstat->get_val(sa, qid); + } + values[accum_value_idx] += accum_value; + } + +unlock: + rte_spinlock_unlock(bmp_lock); +} + +unsigned int +sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa) +{ + unsigned int nb_supported = 0; + unsigned int i; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) { + nb_supported += sfc_sw_xstat_get_nb_supported(sa, + &sfc_sw_xstats[i]); + } + + return nb_supported; +} + +void +sfc_sw_xstats_get_vals(struct sfc_adapter *sa, + struct rte_eth_xstat *xstats, + unsigned int xstats_count, + unsigned int *nb_written, + unsigned int *nb_supported) +{ + uint64_t *reset_vals = sa->sw_xstats.reset_vals; + unsigned int sw_xstats_offset; + unsigned int i; + + sfc_adapter_lock(sa); + + sw_xstats_offset = *nb_supported; + + for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) { + sfc_sw_xstat_get_values(sa, &sfc_sw_xstats[i], xstats, + xstats_count, nb_written, nb_supported); + } + + for (i = sw_xstats_offset; i < *nb_written; i++) + xstats[i].value -= reset_vals[i - sw_xstats_offset]; + + sfc_adapter_unlock(sa); +} + +int +sfc_sw_xstats_get_names(struct sfc_adapter *sa, + struct rte_eth_xstat_name *xstats_names, + unsigned int xstats_count, + unsigned int *nb_written, + unsigned int *nb_supported) +{ + unsigned int i; + int ret; + + sfc_adapter_lock(sa); + + for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) { + ret = sfc_sw_stat_get_names(sa, &sfc_sw_xstats[i], + xstats_names, xstats_count, + nb_written, nb_supported); + if (ret != 0) { + sfc_adapter_unlock(sa); + return ret; + } + } + + sfc_adapter_unlock(sa); + + return 0; +} + +void +sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa, + const uint64_t *ids, + uint64_t *values, + unsigned int n, + unsigned int *nb_supported) +{ + uint64_t *reset_vals = sa->sw_xstats.reset_vals; + unsigned int sw_xstats_offset; + unsigned int i; + + sfc_adapter_lock(sa); + + sw_xstats_offset = *nb_supported; + + for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) { + sfc_sw_xstat_get_values_by_id(sa, &sfc_sw_xstats[i], ids, + values, n, nb_supported); + } + + for (i = 0; i < n; i++) { + if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported) + values[i] -= reset_vals[ids[i] - sw_xstats_offset]; + } + + sfc_adapter_unlock(sa); +} + +int +sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa, + const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, + unsigned int size, + unsigned int *nb_supported) +{ + unsigned int i; + int ret; + + sfc_adapter_lock(sa); + + for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) { + ret = sfc_sw_xstat_get_names_by_id(sa, &sfc_sw_xstats[i], ids, + xstats_names, size, + nb_supported); + if (ret != 0) { + sfc_adapter_unlock(sa); + SFC_ASSERT(ret < 0); + return ret; + } + } + + sfc_adapter_unlock(sa); + + return 0; +} + +static void +sfc_sw_xstat_reset(struct sfc_adapter *sa, struct sfc_sw_xstat_descr *sw_xstat, + uint64_t *reset_vals) +{ + unsigned int nb_queues; + unsigned int qid; + uint64_t *accum_xstat_reset; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat); + if (nb_queues == 0) + return; + + /* + * The order of each software xstat type is the accumulative xstat + * followed by per-queue xstats. + */ + accum_xstat_reset = reset_vals; + *accum_xstat_reset = 0; + reset_vals++; + + for (qid = 0; qid < nb_queues; ++qid) { + reset_vals[qid] = sw_xstat->get_val(sa, qid); + *accum_xstat_reset += reset_vals[qid]; + } +} + +void +sfc_sw_xstats_reset(struct sfc_adapter *sa) +{ + uint64_t *reset_vals = sa->sw_xstats.reset_vals; + struct sfc_sw_xstat_descr *sw_xstat; + unsigned int i; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) { + sw_xstat = &sfc_sw_xstats[i]; + sfc_sw_xstat_reset(sa, sw_xstat, reset_vals); + reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_xstat); + } +} + +int +sfc_sw_xstats_configure(struct sfc_adapter *sa) +{ + uint64_t **reset_vals = &sa->sw_xstats.reset_vals; + size_t nb_supported = 0; + unsigned int i; + + for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) + nb_supported += sfc_sw_xstat_get_nb_supported(sa, + &sfc_sw_xstats[i]); + + *reset_vals = rte_realloc(*reset_vals, + nb_supported * sizeof(**reset_vals), 0); + if (*reset_vals == NULL) + return ENOMEM; + + memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals)); + + return 0; +} + +static void +sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa) +{ + rte_bitmap_free(sa->sw_xstats.queues_bitmap); + rte_free(sa->sw_xstats.queues_bitmap_mem); +} + +static int +sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa) +{ + struct rte_bitmap **queues_bitmap = &sa->sw_xstats.queues_bitmap; + void **queues_bitmap_mem = &sa->sw_xstats.queues_bitmap_mem; + uint32_t bmp_size; + int rc; + + bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT); + *queues_bitmap_mem = NULL; + *queues_bitmap = NULL; + + *queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0, + sa->socket_id); + if (*queues_bitmap_mem == NULL) + return ENOMEM; + + *queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT, + *queues_bitmap_mem, bmp_size); + if (*queues_bitmap == NULL) { + rc = EINVAL; + goto fail; + } + + rte_spinlock_init(&sa->sw_xstats.queues_bitmap_lock); + return 0; + +fail: + sfc_sw_xstats_free_queues_bitmap(sa); + return rc; +} + +int +sfc_sw_xstats_init(struct sfc_adapter *sa) +{ + sa->sw_xstats.reset_vals = NULL; + + return sfc_sw_xstats_alloc_queues_bitmap(sa); +} + +void +sfc_sw_xstats_close(struct sfc_adapter *sa) +{ + rte_free(sa->sw_xstats.reset_vals); + sa->sw_xstats.reset_vals = NULL; + + sfc_sw_xstats_free_queues_bitmap(sa); +} diff --git a/drivers/net/sfc/sfc_sw_stats.h b/drivers/net/sfc/sfc_sw_stats.h new file mode 100644 index 0000000000..1abded8018 --- /dev/null +++ b/drivers/net/sfc/sfc_sw_stats.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2021 Xilinx, Inc. + */ +#ifndef _SFC_SW_STATS_H +#define _SFC_SW_STATS_H + +#include + +#include "sfc.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void sfc_sw_xstats_get_vals(struct sfc_adapter *sa, + struct rte_eth_xstat *xstats, + unsigned int xstats_count, unsigned int *nb_written, + unsigned int *nb_supported); + +int sfc_sw_xstats_get_names(struct sfc_adapter *sa, + struct rte_eth_xstat_name *xstats_names, + unsigned int xstats_count, unsigned int *nb_written, + unsigned int *nb_supported); + +void sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa, const uint64_t *ids, + uint64_t *values, unsigned int n, + unsigned int *nb_supported); + +int sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa, const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, + unsigned int size, + unsigned int *nb_supported); + +unsigned int sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa); + +int sfc_sw_xstats_configure(struct sfc_adapter *sa); + +void sfc_sw_xstats_reset(struct sfc_adapter *sa); + +int sfc_sw_xstats_init(struct sfc_adapter *sa); + +void sfc_sw_xstats_close(struct sfc_adapter *sa); + +#ifdef __cplusplus +} +#endif + +#endif /* _SFC_SW_STATS_H */ diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index ce2a9a6a4f..49b239f4d2 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -980,8 +980,10 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->completed, &txq->added); SFC_ASSERT(rc == 0); - if (likely(pushed != txq->added)) + if (likely(pushed != txq->added)) { efx_tx_qpush(txq->common, txq->added, pushed); + txq->dp.dpq.tx_dbells++; + } } #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE