From patchwork Thu Oct 21 07:03:55 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 102534 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4ECCCA0C4B; Thu, 21 Oct 2021 09:04:09 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 30955410E2; Thu, 21 Oct 2021 09:04:09 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id B9C3A40142 for ; Thu, 21 Oct 2021 09:04:07 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 122) id 46BA97F690; Thu, 21 Oct 2021 10:04:07 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on shelob.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD, URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.2 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id 42A547F4FE; Thu, 21 Oct 2021 10:04:02 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru 42A547F4FE Authentication-Results: shelob.oktetlabs.ru/42A547F4FE; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: dev@dpdk.org Cc: David Marchand , Viacheslav Galaktionov , Andy Moreton Date: Thu, 21 Oct 2021 10:03:55 +0300 Message-Id: <20211021070355.3547582-1-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH] net/sfc: allow control threads for counter queue polling X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Viacheslav Galaktionov MAE counters can be polled from a control thread if no service core is allocated for this. Signed-off-by: Viacheslav Galaktionov Signed-off-by: Andrew Rybchenko Reviewed-by: Andy Moreton --- The problem to require service cores for HW offload was raised by David on review in 21.08 release cycle. doc/guides/rel_notes/release_21_11.rst | 1 + drivers/net/sfc/sfc_mae.h | 26 +++++- drivers/net/sfc/sfc_mae_counter.c | 120 ++++++++++++++++++++----- 3 files changed, 123 insertions(+), 24 deletions(-) diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst index 041383ee2a..9517e0fb0a 100644 --- a/doc/guides/rel_notes/release_21_11.rst +++ b/doc/guides/rel_notes/release_21_11.rst @@ -158,6 +158,7 @@ New Features * Added port representors support on SN1000 SmartNICs * Added flow API transfer proxy support + * Added support for flow counters without service cores * **Updated Marvell cnxk crypto PMD.** diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h index 23dcf1e482..45a2fdc3bb 100644 --- a/drivers/net/sfc/sfc_mae.h +++ b/drivers/net/sfc/sfc_mae.h @@ -127,6 +127,13 @@ struct sfc_mae_counters { unsigned int n_mae_counters; }; +/** Options for MAE counter polling mode */ +enum sfc_mae_counter_polling_mode { + SFC_MAE_COUNTER_POLLING_OFF = 0, + SFC_MAE_COUNTER_POLLING_SERVICE, + SFC_MAE_COUNTER_POLLING_THREAD, +}; + struct sfc_mae_counter_registry { /* Common counter information */ /** Counters collection */ @@ -143,10 +150,21 @@ struct sfc_mae_counter_registry { bool use_credits; /* Information used by configuration routines */ - /** Counter service core ID */ - uint32_t service_core_id; - /** Counter service ID */ - uint32_t service_id; + enum sfc_mae_counter_polling_mode polling_mode; + union { + struct { + /** Counter service core ID */ + uint32_t core_id; + /** Counter service ID */ + uint32_t id; + } service; + struct { + /** Counter thread ID */ + pthread_t id; + /** The thread should keep running */ + volatile bool run; + } thread; + } polling; }; /** diff --git a/drivers/net/sfc/sfc_mae_counter.c b/drivers/net/sfc/sfc_mae_counter.c index 418caffe59..5f2aea1bf4 100644 --- a/drivers/net/sfc/sfc_mae_counter.c +++ b/drivers/net/sfc/sfc_mae_counter.c @@ -45,9 +45,6 @@ sfc_mae_counter_rxq_required(struct sfc_adapter *sa) if (encp->enc_mae_supported == B_FALSE) return false; - if (sfc_mae_counter_get_service_lcore(sa) == RTE_MAX_LCORE) - return false; - return true; } @@ -402,6 +399,23 @@ sfc_mae_counter_routine(void *arg) return 0; } +static void * +sfc_mae_counter_thread(void *data) +{ + struct sfc_adapter *sa = data; + struct sfc_mae_counter_registry *counter_registry = + &sa->mae.counter_registry; + + /* + * Check run condition without atomic since it is not a problem + * if we run a bit more before we notice stop request + */ + while (counter_registry->polling.thread.run) + sfc_mae_counter_routine(data); + + return NULL; +} + static void sfc_mae_counter_service_unregister(struct sfc_adapter *sa) { @@ -410,15 +424,15 @@ sfc_mae_counter_service_unregister(struct sfc_adapter *sa) const unsigned int wait_ms = 10000; unsigned int i; - rte_service_runstate_set(registry->service_id, 0); - rte_service_component_runstate_set(registry->service_id, 0); + rte_service_runstate_set(registry->polling.service.id, 0); + rte_service_component_runstate_set(registry->polling.service.id, 0); /* * Wait for the counter routine to finish the last iteration. * Give up on timeout. */ for (i = 0; i < wait_ms; i++) { - if (rte_service_may_be_active(registry->service_id) == 0) + if (rte_service_may_be_active(registry->polling.service.id) == 0) break; rte_delay_ms(1); @@ -426,10 +440,10 @@ sfc_mae_counter_service_unregister(struct sfc_adapter *sa) if (i == wait_ms) sfc_warn(sa, "failed to wait for counter service to stop"); - rte_service_map_lcore_set(registry->service_id, - registry->service_core_id, 0); + rte_service_map_lcore_set(registry->polling.service.id, + registry->polling.service.core_id, 0); - rte_service_component_unregister(registry->service_id); + rte_service_component_unregister(registry->polling.service.id); } static struct sfc_rxq_info * @@ -438,6 +452,18 @@ sfc_counter_rxq_info_get(struct sfc_adapter *sa) return &sfc_sa2shared(sa)->rxq_info[sa->counter_rxq.sw_index]; } +static void +sfc_mae_counter_registry_prepare(struct sfc_mae_counter_registry *registry, + struct sfc_adapter *sa, + uint32_t counter_stream_flags) +{ + registry->rx_pkt_burst = sa->eth_dev->rx_pkt_burst; + registry->rx_dp = sfc_counter_rxq_info_get(sa)->dp; + registry->pushed_n_buffers = 0; + registry->use_credits = counter_stream_flags & + EFX_MAE_COUNTERS_STREAM_OUT_USES_CREDITS; +} + static int sfc_mae_counter_service_register(struct sfc_adapter *sa, uint32_t counter_stream_flags) @@ -458,11 +484,8 @@ sfc_mae_counter_service_register(struct sfc_adapter *sa, service.socket_id = sa->socket_id; service.callback = sfc_mae_counter_routine; service.callback_userdata = sa; - counter_registry->rx_pkt_burst = sa->eth_dev->rx_pkt_burst; - counter_registry->rx_dp = sfc_counter_rxq_info_get(sa)->dp; - counter_registry->pushed_n_buffers = 0; - counter_registry->use_credits = counter_stream_flags & - EFX_MAE_COUNTERS_STREAM_OUT_USES_CREDITS; + sfc_mae_counter_registry_prepare(counter_registry, sa, + counter_stream_flags); cid = sfc_get_service_lcore(sa->socket_id); if (cid == RTE_MAX_LCORE && sa->socket_id != SOCKET_ID_ANY) { @@ -520,8 +543,9 @@ sfc_mae_counter_service_register(struct sfc_adapter *sa, goto fail_runstate_set; } - counter_registry->service_core_id = cid; - counter_registry->service_id = sid; + counter_registry->polling_mode = SFC_MAE_COUNTER_POLLING_SERVICE; + counter_registry->polling.service.core_id = cid; + counter_registry->polling.service.id = sid; sfc_log_init(sa, "done"); @@ -544,6 +568,47 @@ sfc_mae_counter_service_register(struct sfc_adapter *sa, return rc; } +static void +sfc_mae_counter_thread_stop(struct sfc_adapter *sa) +{ + struct sfc_mae_counter_registry *counter_registry = + &sa->mae.counter_registry; + int rc; + + /* Ensure that flag is set before attempting to join thread */ + __atomic_store_n(&counter_registry->polling.thread.run, false, + __ATOMIC_RELEASE); + + rc = pthread_join(counter_registry->polling.thread.id, NULL); + if (rc != 0) + sfc_err(sa, "failed to join the MAE counter polling thread"); + + counter_registry->polling_mode = SFC_MAE_COUNTER_POLLING_OFF; +} + +static int +sfc_mae_counter_thread_spawn(struct sfc_adapter *sa, + uint32_t counter_stream_flags) +{ + struct sfc_mae_counter_registry *counter_registry = + &sa->mae.counter_registry; + int rc; + + sfc_log_init(sa, "entry"); + + sfc_mae_counter_registry_prepare(counter_registry, sa, + counter_stream_flags); + + counter_registry->polling_mode = SFC_MAE_COUNTER_POLLING_THREAD; + counter_registry->polling.thread.run = true; + + rc = rte_ctrl_thread_create(&sa->mae.counter_registry.polling.thread.id, + "mae_counter_thread", NULL, + sfc_mae_counter_thread, sa); + + return rc; +} + int sfc_mae_counters_init(struct sfc_mae_counters *counters, uint32_t nb_counters_max) @@ -754,7 +819,15 @@ sfc_mae_counter_stop(struct sfc_adapter *sa) return; } - sfc_mae_counter_service_unregister(sa); + SFC_ASSERT(mae->counter_registry.polling_mode != + SFC_MAE_COUNTER_POLLING_OFF); + + if (mae->counter_registry.polling_mode == + SFC_MAE_COUNTER_POLLING_SERVICE) + sfc_mae_counter_service_unregister(sa); + else + sfc_mae_counter_thread_stop(sa); + efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL); mae->counter_rxq_running = false; @@ -787,15 +860,22 @@ sfc_mae_counter_start(struct sfc_adapter *sa) sfc_log_init(sa, "stream start flags: 0x%x", flags); - rc = sfc_mae_counter_service_register(sa, flags); - if (rc != 0) - goto fail_service_register; + if (sfc_mae_counter_get_service_lcore(sa) != RTE_MAX_LCORE) { + rc = sfc_mae_counter_service_register(sa, flags); + if (rc != 0) + goto fail_service_register; + } else { + rc = sfc_mae_counter_thread_spawn(sa, flags); + if (rc != 0) + goto fail_thread_spawn; + } mae->counter_rxq_running = true; return 0; fail_service_register: +fail_thread_spawn: efx_mae_counters_stream_stop(sa->nic, sa->counter_rxq.sw_index, NULL); fail_counter_stream: