@@ -162,6 +162,8 @@ struct bnxt_ulp_data {
uint64_t feature_bits;
uint64_t default_class_bits;
uint64_t default_act_bits;
+ struct ulp_fc_tfc_stats_cache_entry *stats_cache;
+ struct bnxt_ulp_sc_info *sc_info;
};
enum bnxt_ulp_tfo_type {
@@ -670,6 +670,7 @@ bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
struct bnxt_ulp_context *ulp_ctx;
struct rte_flow_action_rss *rss_conf;
struct rte_flow_query_count *count;
+ enum bnxt_ulp_device_id dev_id;
uint32_t flow_id;
ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
@@ -681,6 +682,15 @@ bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
+ rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
+ if (rc) {
+ BNXT_DRV_DBG(ERR, "Can't identify the device\n");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to query flow.");
+ return -EINVAL;
+ }
+
flow_id = (uint32_t)(uintptr_t)flow;
switch (action->type) {
@@ -696,7 +706,11 @@ bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
count = data;
- rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);
+ if (dev_id == BNXT_ULP_DEVICE_ID_THOR2)
+ rc = ulp_sc_mgr_query_count_get(ulp_ctx, flow_id, count);
+ else
+ rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);
+
if (unlikely(rc)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -26,6 +26,7 @@
#include "ulp_template_struct.h"
#include "ulp_mark_mgr.h"
#include "ulp_fc_mgr.h"
+#include "ulp_sc_mgr.h"
#include "ulp_flow_db.h"
#include "ulp_mapper.h"
#include "ulp_matcher.h"
@@ -887,6 +888,9 @@ ulp_tfc_deinit(struct bnxt *bp,
BNXT_DRV_DBG(ERR, "Failed to close HA (%d)\n", rc);
}
+ /* Delete the Stats Counter Manager */
+ ulp_sc_mgr_deinit(bp->ulp_ctx);
+
/* cleanup the flow database */
ulp_flow_db_deinit(bp->ulp_ctx);
@@ -1043,6 +1047,12 @@ ulp_tfc_init(struct bnxt *bp,
goto jump_to_error;
}
+ rc = ulp_sc_mgr_init(bp->ulp_ctx);
+ if (rc) {
+ BNXT_DRV_DBG(ERR, "Failed to initialize ulp stats cache mgr\n");
+ goto jump_to_error;
+ }
+
rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
if (rc) {
BNXT_DRV_DBG(ERR, "Unable to get device id from ulp.\n");
@@ -25,6 +25,7 @@
#include "ulp_template_struct.h"
#include "ulp_mark_mgr.h"
#include "ulp_fc_mgr.h"
+#include "ulp_sc_mgr.h"
#include "ulp_flow_db.h"
#include "ulp_mapper.h"
#include "ulp_matcher.h"
@@ -739,6 +740,31 @@ bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
return ulp_ctx->cfg_data->fc_info;
}
+/* Function to set the flow counter info into the context */
+static inline int32_t
+bnxt_ulp_cntxt_ptr2_sc_info_set(struct bnxt_ulp_context *ulp_ctx,
+ struct bnxt_ulp_sc_info *ulp_sc_info)
+{
+ if (unlikely(!ulp_ctx || !ulp_ctx->cfg_data)) {
+ BNXT_DRV_DBG(ERR, "Invalid ulp context data\n");
+ return -EINVAL;
+ }
+
+ ulp_ctx->cfg_data->sc_info = ulp_sc_info;
+
+ return 0;
+}
+
+/* Function to retrieve the flow counter info from the context. */
+static inline struct bnxt_ulp_sc_info *
+bnxt_ulp_cntxt_ptr2_sc_info_get(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (unlikely(!ulp_ctx || !ulp_ctx->cfg_data))
+ return NULL;
+
+ return ulp_ctx->cfg_data->sc_info;
+}
+
/* Function to get the ulp flags from the ulp context. */
static inline int32_t
bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
@@ -31,6 +31,8 @@ sources += files(
'bnxt_ulp_tfc.c',
'ulp_fc_mgr_tfc.c',
'ulp_fc_mgr_tf.c',
- 'ulp_alloc_tbl.c')
+ 'ulp_alloc_tbl.c',
+ 'ulp_sc_mgr.c',
+ 'ulp_sc_mgr_tfc.c')
subdir('generic_templates')
@@ -30,6 +30,19 @@
#define ULP_TFC_CNTR_ALIGN 32
#define ULP_TFC_ACT_WORD_SZ 32
+struct ulp_fc_tfc_stats_cache_entry {
+ uint32_t flags;
+ uint64_t timestamp;
+ uint8_t tsid;
+ uint32_t record_size;
+ uint32_t offset;
+ uint8_t dir;
+ uint64_t packet_count;
+ uint64_t byte_count;
+ uint16_t tcp_flags;
+ uint32_t tcp_timestamp;
+};
+
static int32_t
ulp_fc_tfc_update_accum_stats(__rte_unused struct bnxt_ulp_context *ctxt,
__rte_unused struct bnxt_ulp_fc_info *fc_info,
@@ -12,6 +12,7 @@
#include "ulp_mapper.h"
#include "ulp_flow_db.h"
#include "ulp_fc_mgr.h"
+#include "ulp_sc_mgr.h"
#include "ulp_tun.h"
#ifdef TF_FLOW_SCALE_QUERY
#include "tf_resources.h"
@@ -633,6 +634,9 @@ ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt,
if (!ulp_fc_mgr_thread_isstarted(ulp_ctxt))
ulp_fc_mgr_thread_start(ulp_ctxt);
+
+ if (!ulp_sc_mgr_thread_isstarted(ulp_ctxt))
+ ulp_sc_mgr_thread_start(ulp_ctxt);
}
/* all good, return success */
@@ -2950,6 +2950,72 @@ ulp_mapper_vnic_tbl_process(struct bnxt_ulp_mapper_parms *parms,
return rc;
}
+static int32_t
+ulp_mapper_stats_cache_tbl_process(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+ struct ulp_flow_db_res_params fid_parms;
+ uint64_t counter_handle;
+ struct ulp_blob data;
+ uint16_t data_len = 0;
+ uint8_t *tmp_data;
+ int32_t rc = 0;
+
+ /* Initialize the blob data */
+ if (unlikely(ulp_blob_init(&data, tbl->result_bit_size,
+ BNXT_ULP_BYTE_ORDER_BE))) {
+ BNXT_DRV_DBG(ERR, "Failed initial ulp_global table blob\n");
+ return -EINVAL;
+ }
+
+ /* read the arguments from the result table */
+ rc = ulp_mapper_tbl_result_build(parms, tbl, &data,
+ "ULP Global Result");
+ if (unlikely(rc)) {
+ BNXT_DRV_DBG(ERR, "Failed to build the result blob\n");
+ return rc;
+ }
+
+ tmp_data = ulp_blob_data_get(&data, &data_len);
+ counter_handle = *(uint64_t *)tmp_data;
+ counter_handle = tfp_be_to_cpu_64(counter_handle);
+
+ memset(&fid_parms, 0, sizeof(fid_parms));
+ fid_parms.direction = tbl->direction;
+ fid_parms.resource_func = tbl->resource_func;
+ fid_parms.resource_type = tbl->resource_type;
+ fid_parms.resource_sub_type = tbl->resource_sub_type;
+ fid_parms.resource_hndl = counter_handle;
+ fid_parms.critical_resource = tbl->critical_resource;
+ rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms);
+ if (unlikely(rc)) {
+ BNXT_DRV_DBG(ERR, "Failed to link resource to flow rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = ulp_sc_mgr_entry_alloc(parms, counter_handle, tbl);
+ if (unlikely(rc)) {
+ BNXT_DRV_DBG(ERR, "Failed to link resource to flow rc = %d\n",
+ rc);
+ return rc;
+ }
+#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
+#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_MAPPER
+ BNXT_DRV_DBG(DEBUG, "flow id =0x%x\n", parms->flow_id);
+#endif
+#endif
+ return rc;
+}
+
+static int32_t
+ulp_mapper_stats_cache_tbl_res_free(struct bnxt_ulp_context *ulp,
+ uint32_t fid)
+{
+ ulp_sc_mgr_entry_free(ulp, fid);
+ return 0;
+}
+
/* Free the vnic resource */
static int32_t
ulp_mapper_vnic_tbl_res_free(struct bnxt_ulp_context *ulp __rte_unused,
@@ -4148,6 +4214,9 @@ ulp_mapper_tbls_process(struct bnxt_ulp_mapper_parms *parms, void *error)
case BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE:
rc = ulp_mapper_allocator_tbl_process(parms, tbl);
break;
+ case BNXT_ULP_RESOURCE_FUNC_STATS_CACHE:
+ rc = ulp_mapper_stats_cache_tbl_process(parms, tbl);
+ break;
default:
BNXT_DRV_DBG(ERR, "Unexpected mapper resource %d\n",
tbl->resource_func);
@@ -4286,6 +4355,10 @@ ulp_mapper_resource_free(struct bnxt_ulp_context *ulp,
res->direction,
res->resource_hndl);
break;
+ case BNXT_ULP_RESOURCE_FUNC_STATS_CACHE:
+ rc = ulp_mapper_stats_cache_tbl_res_free(ulp,
+ fid);
+ break;
default:
break;
}
new file mode 100644
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2021 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ulp_utils.h"
+#include "bnxt_ulp_tfc.h"
+#include "bnxt_tf_common.h"
+#include "ulp_sc_mgr.h"
+#include "ulp_flow_db.h"
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+#include "tfc.h"
+#include "tfc_debug.h"
+#include "tfc_action_handle.h"
+
+#define ULP_TFC_CNTR_READ_BYTES 32
+#define ULP_TFC_CNTR_ALIGN 32
+#define ULP_TFC_ACT_WORD_SZ 32
+
+static const struct bnxt_ulp_sc_core_ops *
+bnxt_ulp_sc_ops_get(struct bnxt_ulp_context *ctxt)
+{
+ int32_t rc;
+ enum bnxt_ulp_device_id dev_id;
+ const struct bnxt_ulp_sc_core_ops *func_ops;
+
+ rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id);
+ if (rc)
+ return NULL;
+
+ switch (dev_id) {
+ case BNXT_ULP_DEVICE_ID_THOR2:
+ func_ops = &ulp_sc_tfc_core_ops;
+ break;
+ case BNXT_ULP_DEVICE_ID_THOR:
+ case BNXT_ULP_DEVICE_ID_STINGRAY:
+ case BNXT_ULP_DEVICE_ID_WH_PLUS:
+ default:
+ func_ops = NULL;
+ break;
+ }
+ return func_ops;
+}
+
+int32_t ulp_sc_mgr_init(struct bnxt_ulp_context *ctxt)
+{
+ const struct bnxt_ulp_sc_core_ops *sc_ops;
+ struct bnxt_ulp_device_params *dparms;
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+ uint32_t stats_cache_tbl_sz;
+ uint32_t dev_id;
+ int rc;
+
+ if (!ctxt) {
+ BNXT_DRV_DBG(DEBUG, "Invalid ULP CTXT\n");
+ return -EINVAL;
+ }
+
+ if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
+ BNXT_DRV_DBG(DEBUG, "Failed to get device id\n");
+ return -EINVAL;
+ }
+
+ dparms = bnxt_ulp_device_params_get(dev_id);
+ if (!dparms) {
+ BNXT_DRV_DBG(DEBUG, "Failed to device parms\n");
+ return -EINVAL;
+ }
+
+ sc_ops = bnxt_ulp_sc_ops_get(ctxt);
+ if (sc_ops == NULL) {
+ BNXT_DRV_DBG(DEBUG, "Failed to get the counter ops\n");
+ return -EINVAL;
+ }
+
+ ulp_sc_info = rte_zmalloc("ulp_sc_info", sizeof(*ulp_sc_info), 0);
+ if (!ulp_sc_info) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ ulp_sc_info->sc_ops = sc_ops;
+ ulp_sc_info->flags = 0;
+
+ rc = pthread_mutex_init(&ulp_sc_info->sc_lock, NULL);
+ if (rc) {
+ BNXT_DRV_DBG(ERR, "Failed to initialize sc mutex\n");
+ goto error;
+ }
+
+ /* Add the SC info tbl to the ulp context. */
+ bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, ulp_sc_info);
+
+ ulp_sc_info->num_counters = dparms->ext_flow_db_num_entries;
+ if (!ulp_sc_info->num_counters) {
+ /* No need for software counters, call fw directly */
+ BNXT_DRV_DBG(DEBUG, "Sw flow counter support not enabled\n");
+ return 0;
+ }
+
+ /*
+ * Size is determined by the number of flows + 10% to cover IDs
+ * used for resources.
+ */
+ stats_cache_tbl_sz = sizeof(struct ulp_sc_tfc_stats_cache_entry) *
+ (ulp_sc_info->num_counters +
+ (ulp_sc_info->num_counters / 10));
+
+ ulp_sc_info->stats_cache_tbl = rte_zmalloc("ulp_stats_cache_tbl",
+ stats_cache_tbl_sz, 0);
+ if (!ulp_sc_info->stats_cache_tbl) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ ulp_sc_info->read_data = rte_zmalloc("ulp_stats_cache_read_data",
+ ULP_SC_BATCH_SIZE * ULP_SC_PAGE_SIZE,
+ ULP_SC_PAGE_SIZE);
+ if (!ulp_sc_info->read_data) {
+ rte_free(ulp_sc_info->stats_cache_tbl);
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ rc = ulp_sc_mgr_thread_start(ctxt);
+ if (rc)
+ BNXT_DRV_DBG(DEBUG, "Stats counter thread start failed\n");
+
+ error:
+ return rc;
+}
+
+/*
+ * Release all resources in the Flow Counter Manager for this ulp context
+ *
+ * ctxt [in] The ulp context for the Flow Counter manager
+ *
+ */
+int32_t
+ulp_sc_mgr_deinit(struct bnxt_ulp_context *ctxt)
+{
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+
+ if (!ulp_sc_info)
+ return -EINVAL;
+
+ pthread_mutex_lock(&ulp_sc_info->sc_lock);
+
+ ulp_sc_mgr_thread_cancel(ctxt);
+
+ pthread_mutex_destroy(&ulp_sc_info->sc_lock);
+
+ if (ulp_sc_info->stats_cache_tbl)
+ rte_free(ulp_sc_info->stats_cache_tbl);
+
+ if (ulp_sc_info->read_data)
+ rte_free(ulp_sc_info->read_data);
+
+ rte_free(ulp_sc_info);
+
+ /* Safe to ignore on deinit */
+ (void)bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, NULL);
+
+ return 0;
+}
+
+#define ULP_SC_PERIOD_S 1
+#define ULP_SC_PERIOD_MS (ULP_SC_PERIOD_S * 1000)
+
+static void *ulp_stats_cache_main_loop(void *arg)
+{
+ struct ulp_sc_tfc_stats_cache_entry *count;
+ const struct bnxt_ulp_sc_core_ops *sc_ops;
+ struct ulp_sc_tfc_stats_cache_entry *sce;
+ struct ulp_sc_tfc_stats_cache_entry *sce_end;
+ struct tfc_mpc_batch_info_t batch_info;
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+ struct bnxt_ulp_context *ctxt = NULL;
+ uint16_t words = (ULP_TFC_CNTR_READ_BYTES + ULP_TFC_ACT_WORD_SZ - 1) / ULP_TFC_ACT_WORD_SZ;
+ uint32_t batch_size;
+ struct tfc *tfcp = NULL;
+ uint32_t batch;
+ uint32_t delay = ULP_SC_PERIOD_MS;
+ uint64_t start;
+ uint64_t stop;
+ uint64_t hz;
+ int oldstate;
+ int oldtype;
+ uint8_t *data;
+ int rc;
+ static uint32_t loop;
+ uint64_t cycles = 0;
+ uint64_t cpms = 0;
+
+ while (!ctxt) {
+ ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
+
+ if (ctxt)
+ break;
+
+ BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n");
+ rte_delay_us_block(1000);
+ }
+
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+ if (!ulp_sc_info) {
+ bnxt_ulp_cntxt_entry_release();
+ goto terminate;
+ }
+
+ sc_ops = ulp_sc_info->sc_ops;
+
+ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
+ pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
+
+ hz = rte_get_timer_hz();
+ cpms = hz / 1000;
+
+ while (true) {
+ bnxt_ulp_cntxt_entry_release();
+ ctxt = NULL;
+ rte_delay_ms(delay);
+
+ while (!ctxt) {
+ ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
+
+ if (ctxt)
+ break;
+
+ BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n");
+ rte_delay_us_block(1);
+ }
+
+ start = rte_get_timer_cycles();
+ sce = ulp_sc_info->stats_cache_tbl;
+ sce_end = sce + (ulp_sc_info->num_counters + (ulp_sc_info->num_counters / 10));
+
+ while (ulp_sc_info->num_entries && (sce < sce_end)) {
+ data = ulp_sc_info->read_data;
+
+ rc = tfc_mpc_batch_start(&batch_info);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "MPC batch start failed rc:%d loop:%d\n", rc, loop);
+ break;
+ }
+
+ if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
+ break;
+
+ rc = pthread_mutex_lock(&ulp_sc_info->sc_lock);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to get SC lock, terminating main loop rc:%d loop:%d\n",
+ rc, loop);
+ goto terminate;
+ }
+
+ for (batch = 0; (batch < ULP_SC_BATCH_SIZE) && (sce < sce_end);) {
+ if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
+ sce++;
+ continue;
+ }
+
+ tfcp = bnxt_ulp_cntxt_tfcp_get(sce->ctxt);
+ if (tfcp == NULL) {
+ bnxt_ulp_cntxt_entry_release();
+ goto terminate;
+ }
+
+
+ /* Store the entry pointer to use for counter update */
+ batch_info.em_hdl[batch_info.count] = (uint64_t)sce;
+
+ rc = sc_ops->ulp_stats_cache_update(tfcp,
+ sce->dir,
+ data,
+ sce->handle,
+ &words,
+ &batch_info,
+ sce->reset);
+ if (rc) {
+ /* Abort this batch */
+ PMD_DRV_LOG(ERR,
+ "loop:%d read_counter() failed:%d\n",
+ loop, rc);
+ break;
+ }
+
+ if (sce->reset)
+ sce->reset = false;
+
+ /* Next */
+ batch++;
+ sce++;
+ data += ULP_SC_PAGE_SIZE;
+ }
+
+ batch_size = batch_info.count;
+ rc = tfc_mpc_batch_end(tfcp, &batch_info);
+
+ pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+ bnxt_ulp_cntxt_release_fdb_lock(ctxt);
+
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "MPC batch end failed rc:%d loop:%d\n",
+ rc, loop);
+ batch_info.enabled = false;
+ break;
+ }
+
+ /* Process counts */
+ data = ulp_sc_info->read_data;
+
+ for (batch = 0; batch < batch_size; batch++) {
+ /* Check for error in completion */
+ if (batch_info.result[batch]) {
+ PMD_DRV_LOG(ERR,
+ "batch:%d result:%d\n",
+ batch, batch_info.result[batch]);
+ } else {
+ count =
+ (struct ulp_sc_tfc_stats_cache_entry *)((uintptr_t)batch_info.em_hdl[batch]);
+ memcpy(&count->packet_count, data, ULP_TFC_ACT_WORD_SZ);
+ }
+
+ data += ULP_SC_PAGE_SIZE;
+ }
+ }
+
+ loop++;
+ stop = rte_get_timer_cycles();
+ cycles = stop - start;
+ if (cycles > (hz * ULP_SC_PERIOD_S)) {
+ PMD_DRV_LOG(ERR, "%s: Stats collection time exceeded %dmS Cycles:%" PRIu64 "\n",
+ __func__, ULP_SC_PERIOD_MS, cycles);
+ delay = ULP_SC_PERIOD_MS;
+ } else {
+ delay = ULP_SC_PERIOD_MS - (cycles / cpms);
+
+ if (delay > ULP_SC_PERIOD_MS) {
+ PMD_DRV_LOG(ERR, "%s: Stats collection delay:%dmS exceedes %dmS\n",
+ __func__, delay, ULP_SC_PERIOD_MS);
+ delay = ULP_SC_PERIOD_MS;
+ }
+ }
+ }
+
+ terminate:
+ return NULL;
+}
+
+/*
+ * Check if the alarm thread that walks through the flows is started
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ */
+bool ulp_sc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
+{
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+
+ if (ulp_sc_info)
+ return !!(ulp_sc_info->flags & ULP_FLAG_SC_THREAD);
+
+ return false;
+}
+
+/*
+ * Setup the Flow counter timer thread that will fetch/accumulate raw counter
+ * data from the chip's internal flow counters
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ */
+int32_t
+ulp_sc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
+{
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+ int rc;
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+
+ if (ulp_sc_info && !(ulp_sc_info->flags & ULP_FLAG_SC_THREAD)) {
+ rc = pthread_create(&ulp_sc_info->tid,
+ NULL,
+ &ulp_stats_cache_main_loop,
+ (void *)ctxt->cfg_data);
+ if (rc)
+ return rc;
+
+ ulp_sc_info->flags |= ULP_FLAG_SC_THREAD;
+ }
+
+ return 0;
+}
+
+/*
+ * Cancel the alarm handler
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ */
+void ulp_sc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
+{
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+ if (!ulp_sc_info)
+ return;
+
+ ulp_sc_info->flags &= ~ULP_FLAG_SC_THREAD;
+ pthread_cancel(ulp_sc_info->tid);
+}
+
+/*
+ * Fill the rte_flow_query_count 'data' argument passed
+ * in the rte_flow_query() with the values obtained and
+ * accumulated locally.
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ * flow_id [in] The HW flow ID
+ *
+ * count [out] The rte_flow_query_count 'data' that is set
+ *
+ */
+int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
+ uint32_t flow_id,
+ struct rte_flow_query_count *count)
+{
+ struct ulp_sc_tfc_stats_cache_entry *sce;
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+ int rc = 0;
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+ if (!ulp_sc_info)
+ return -ENODEV;
+
+ sce = ulp_sc_info->stats_cache_tbl;
+ sce += flow_id;
+
+ /* If entry is not valid return an error */
+ if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
+ return -EBUSY;
+
+ count->hits = sce->packet_count;
+ count->hits_set = 1;
+ count->bytes = sce->byte_count;
+ count->bytes_set = 1;
+
+ if (count->reset)
+ sce->reset = true;
+
+ return rc;
+}
+
+
+int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
+ uint64_t counter_handle,
+ struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+ struct ulp_sc_tfc_stats_cache_entry *sce;
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(parms->ulp_ctx);
+ if (!ulp_sc_info)
+ return -ENODEV;
+
+ pthread_mutex_lock(&ulp_sc_info->sc_lock);
+
+ sce = ulp_sc_info->stats_cache_tbl;
+ sce += parms->flow_id;
+
+ /* If entry is not free return an error */
+ if (sce->flags & ULP_SC_ENTRY_FLAG_VALID) {
+ pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+ return -EBUSY;
+ }
+
+ memset(sce, 0, sizeof(*sce));
+ sce->ctxt = parms->ulp_ctx;
+ sce->flags |= ULP_SC_ENTRY_FLAG_VALID;
+ sce->handle = counter_handle;
+ sce->dir = tbl->direction;
+ ulp_sc_info->num_entries++;
+ pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+
+ return 0;
+}
+
+void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
+ uint32_t fid)
+{
+ struct ulp_sc_tfc_stats_cache_entry *sce;
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ulp);
+ if (!ulp_sc_info)
+ return;
+
+ pthread_mutex_lock(&ulp_sc_info->sc_lock);
+
+ sce = ulp_sc_info->stats_cache_tbl;
+ sce += fid;
+
+ if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
+ pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+ return;
+ }
+
+ sce->flags = 0;
+ ulp_sc_info->num_entries--;
+
+ pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+}
new file mode 100644
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2023 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _ULP_SC_MGR_H_
+#define _ULP_SC_MGR_H_
+
+#include "pthread.h"
+#include "bnxt_ulp.h"
+#include "ulp_flow_db.h"
+
+#define ULP_FLAG_SC_THREAD BIT(0)
+
+#define ULP_SC_ENTRY_FLAG_VALID BIT(0)
+
+#define ULP_SC_BATCH_SIZE 64
+#define ULP_SC_PAGE_SIZE 4096
+
+struct ulp_sc_tfc_stats_cache_entry {
+ struct bnxt_ulp_context *ctxt;
+ uint32_t flags;
+ uint64_t timestamp;
+ uint64_t handle;
+ uint8_t dir;
+ uint64_t packet_count;
+ uint64_t byte_count;
+ uint64_t count_fields1;
+ uint64_t count_fields2;
+ bool reset;
+};
+
+struct bnxt_ulp_sc_info {
+ struct ulp_sc_tfc_stats_cache_entry *stats_cache_tbl;
+ uint8_t *read_data;
+ uint32_t flags;
+ uint32_t num_entries;
+ pthread_mutex_t sc_lock;
+ uint32_t num_counters;
+ pthread_t tid;
+ const struct bnxt_ulp_sc_core_ops *sc_ops;
+};
+
+struct bnxt_ulp_sc_core_ops {
+ int32_t
+ (*ulp_stats_cache_update)(struct tfc *tfcp,
+ int dir,
+ uint8_t *data,
+ uint64_t handle,
+ uint16_t *words,
+ struct tfc_mpc_batch_info_t *batch_info,
+ bool reset);
+};
+
+/*
+ * Allocate all resources in the stats cache manager for this ulp context
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+int32_t
+ulp_sc_mgr_init(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Release all resources in the stats cache manager for this ulp context
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+int32_t
+ulp_sc_mgr_deinit(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Setup the stats cache timer thread that will fetch/accumulate raw counter
+ * data from the chip's internal stats caches
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+int32_t
+ulp_sc_mgr_thread_start(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Alarm handler that will issue the TF-Core API to fetch
+ * data from the chip's internal stats caches
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ */
+void
+ulp_sc_mgr_alarm_cb(void *arg);
+
+/*
+ * Cancel the alarm handler
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ *
+ */
+void ulp_sc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Check if the thread that walks through the flows is started
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ *
+ */
+bool ulp_sc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt);
+
+/*
+ * Get the current counts for the given flow id
+ *
+ * ctxt [in] The ulp context for the stats cache manager
+ * flow_id [in] The flow identifier
+ * count [out] structure in which the updated counts are passed
+ * back to the caller.
+ *
+ */
+int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
+ uint32_t flow_id,
+ struct rte_flow_query_count *count);
+
+/*
+ * Allocate a cache entry for flow
+ *
+ * parms [in] Various fields used to identify the flow
+ * counter_handle [in] This is the action table entry identifier.
+ * tbl [in] Various fields used to identify the flow
+ *
+ */
+int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
+ uint64_t counter_handle,
+ struct bnxt_ulp_mapper_tbl_info *tbl);
+
+/*
+ * Free cache entry
+ *
+ * ulp [in] The ulp context for the stats cache manager
+ * fid [in] The flow identifier
+ *
+ */
+void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
+ uint32_t fid);
+
+extern const struct bnxt_ulp_sc_core_ops ulp_sc_tfc_core_ops;
+
+#endif /* _ULP_SC_MGR_H_ */
new file mode 100644
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2021 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_alarm.h>
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ulp_utils.h"
+#include "bnxt_ulp_tfc.h"
+#include "bnxt_tf_common.h"
+#include "ulp_sc_mgr.h"
+#include "ulp_flow_db.h"
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+#include "tfc.h"
+#include "tfc_debug.h"
+#include "tfc_action_handle.h"
+
+static int32_t
+ulp_sc_tfc_stats_cache_update(struct tfc *tfcp,
+ int dir,
+ uint8_t *data,
+ uint64_t handle,
+ uint16_t *words,
+ struct tfc_mpc_batch_info_t *batch_info,
+ bool reset)
+{
+ struct tfc_cmm_info cmm_info;
+ struct tfc_cmm_clr cmm_clr;
+ int rc;
+
+ cmm_info.dir = dir;
+ cmm_info.rsubtype = CFA_RSUBTYPE_CMM_ACT;
+ cmm_info.act_handle = handle;
+ cmm_clr.clr = reset;
+
+ if (reset) {
+ cmm_clr.offset_in_byte = 0;
+ cmm_clr.sz_in_byte = 16;
+ }
+
+ rc = tfc_act_get(tfcp,
+ batch_info,
+ &cmm_info,
+ &cmm_clr,
+ data,
+ words);
+
+ return rc;
+}
+
+
+const struct bnxt_ulp_sc_core_ops ulp_sc_tfc_core_ops = {
+ .ulp_stats_cache_update = ulp_sc_tfc_stats_cache_update
+};