[v1,1/4] net/bnxt/tf_ulp: fix F1F2 vxlan counter acccumulation for Thor2
Checks
Commit Message
From: Shuanglin Wang <shuanglin.wang@broadcom.com>
Add code for Thor2 to support the counter accumulation for
F1F2 vxlan parent-child flows.
Also, add a check for device state in the TF tunnel free API,
if it is zero then TF will skip the resource free as it
was already done by bnxt_free_all_hwrm_resources.
Fixes: 0513f0af034d ("net/bnxt/tf_ulp: add stats cache for Thor2")
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Signed-off-by: Shuanglin Wang <shuanglin.wang@broadcom.com>
Signed-off-by: Sangtani Parag Satishbhai <parag-satishbhai.sangtani@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
---
drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c | 12 ++-
drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 4 +-
drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 13 +++
drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c | 110 +++++++++++++++++++--
drivers/net/bnxt/tf_ulp/ulp_sc_mgr.h | 18 +++-
5 files changed, 142 insertions(+), 15 deletions(-)
@@ -587,10 +587,16 @@ bnxt_pmd_global_tunnel_set(struct bnxt_ulp_context *ulp_ctx,
return -EINVAL;
}
- if (udp_port)
+ if (udp_port) {
rc = bnxt_udp_tunnel_port_add_op(eth_dev, &udp_tunnel);
- else
- rc = bnxt_udp_tunnel_port_del_op(eth_dev, &udp_tunnel);
+ } else {
+ /* TODO: Make the counters shareable so the resource
+ * free can be synced up between core dpdk path and
+ * the tf path.
+ */
+ if (eth_dev->data->dev_started != 0)
+ rc = bnxt_udp_tunnel_port_del_op(eth_dev, &udp_tunnel);
+ }
} else {
bp = bnxt_pmd_get_bp(port_id);
if (!bp) {
@@ -1176,7 +1176,7 @@ ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
}
/* internal validation function for parent flow tbl */
-static struct ulp_fdb_parent_info *
+struct ulp_fdb_parent_info *
ulp_flow_db_pc_db_entry_get(struct bnxt_ulp_context *ulp_ctxt,
uint32_t pc_idx)
{
@@ -1634,6 +1634,8 @@ ulp_flow_db_parent_flow_create(struct bnxt_ulp_mapper_parms *parms)
}
}
+ /* Set parent flow entry idx in stats cache entry */
+ ulp_sc_mgr_set_pc_idx(parms->ulp_ctx, parms->flow_id, pc_idx);
return 0;
}
@@ -417,4 +417,17 @@ void ulp_flow_db_shared_session_set(struct ulp_flow_db_res_params *res,
*/
enum bnxt_ulp_session_type
ulp_flow_db_shared_session_get(struct ulp_flow_db_res_params *res);
+
+/*
+ * Get the parent flow table info
+ *
+ * ulp_ctxt [in] Ptr to ulp_context
+ * pc_idx [in] The index to parent child db
+ *
+ * returns Pointer of parent flow tbl
+ */
+struct ulp_fdb_parent_info *
+ulp_flow_db_pc_db_entry_get(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t pc_idx);
+
#endif /* _ULP_FLOW_DB_H_ */
@@ -441,8 +441,17 @@ int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
{
struct ulp_sc_tfc_stats_cache_entry *sce;
struct bnxt_ulp_sc_info *ulp_sc_info;
+ struct ulp_fdb_parent_info *pc_entry;
+ struct bnxt_ulp_flow_db *flow_db;
+ uint32_t max_array;
+ uint32_t child_fid;
+ uint32_t a_idx;
+ uint32_t f2_cnt;
+ uint64_t *t;
+ uint64_t bs;
int rc = 0;
+ /* Get stats cache info */
ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
if (!ulp_sc_info)
return -ENODEV;
@@ -450,18 +459,66 @@ int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
sce = ulp_sc_info->stats_cache_tbl;
sce += flow_id;
- /* If entry is not valid return an error */
- if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
- return -EBUSY;
-
- count->hits = sce->packet_count;
- count->hits_set = 1;
- count->bytes = sce->byte_count;
- count->bytes_set = 1;
+ /* To handle the parent flow */
+ if (sce->flags & ULP_SC_ENTRY_FLAG_PARENT) {
+ flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ctxt);
+ if (!flow_db) {
+ BNXT_DRV_DBG(ERR, "parent child db validation failed\n");
+ return -EINVAL;
+ }
- if (count->reset)
- sce->reset = true;
+ /* Validate the arguments and parent child entry */
+ pc_entry = ulp_flow_db_pc_db_entry_get(ctxt, sce->pc_idx);
+ if (!pc_entry) {
+ BNXT_DRV_DBG(ERR, "failed to get the parent child entry\n");
+ return -EINVAL;
+ }
+ t = pc_entry->child_fid_bitset;
+ f2_cnt = pc_entry->f2_cnt;
+ max_array = flow_db->parent_child_db.child_bitset_size * 8 / ULP_INDEX_BITMAP_SIZE;
+
+ /* Iterate all possible child flows */
+ for (a_idx = 0; (a_idx < max_array) && f2_cnt; a_idx++) {
+ /* If it is zero, then check the next bitset */
+ bs = t[a_idx];
+ if (!bs)
+ continue;
+
+ /* check one bitset */
+ do {
+ /* get the next child fid */
+ child_fid = (a_idx * ULP_INDEX_BITMAP_SIZE) + rte_clz64(bs);
+ sce = ulp_sc_info->stats_cache_tbl;
+ sce += child_fid;
+
+ /* clear the bit for this child flow */
+ ULP_INDEX_BITMAP_RESET(bs, child_fid);
+ f2_cnt--;
+
+ /* no counter action, then ignore flows */
+ if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
+ continue;
+ count->hits += sce->packet_count;
+ count->hits_set = 1;
+ count->bytes += sce->byte_count;
+ count->bytes_set = 1;
+ } while (bs && f2_cnt);
+ }
+ } else {
+ /* To handle regular or child flows */
+ /* If entry is not valid return an error */
+ if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
+ return -EBUSY;
+
+ count->hits = sce->packet_count;
+ count->hits_set = 1;
+ count->bytes = sce->byte_count;
+ count->bytes_set = 1;
+
+ if (count->reset)
+ sce->reset = true;
+ }
return rc;
}
@@ -491,6 +548,8 @@ int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
memset(sce, 0, sizeof(*sce));
sce->ctxt = parms->ulp_ctx;
sce->flags |= ULP_SC_ENTRY_FLAG_VALID;
+ if (parms->parent_flow)
+ sce->flags |= ULP_SC_ENTRY_FLAG_PARENT;
sce->handle = counter_handle;
sce->dir = tbl->direction;
ulp_sc_info->num_entries++;
@@ -524,3 +583,34 @@ void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
pthread_mutex_unlock(&ulp_sc_info->sc_lock);
}
+
+/*
+ * Set pc_idx for the flow if stat cache info is valid
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ * flow_id [in] The HW flow ID
+ *
+ * pc_idx [in] The parent flow entry idx
+ *
+ */
+void ulp_sc_mgr_set_pc_idx(struct bnxt_ulp_context *ctxt,
+ uint32_t flow_id,
+ uint32_t pc_idx)
+{
+ struct ulp_sc_tfc_stats_cache_entry *sce;
+ struct bnxt_ulp_sc_info *ulp_sc_info;
+
+ /* Get stats cache info */
+ ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
+ if (!ulp_sc_info)
+ return;
+
+ pthread_mutex_lock(&ulp_sc_info->sc_lock);
+
+ sce = ulp_sc_info->stats_cache_tbl;
+ sce += flow_id;
+ sce->pc_idx = pc_idx & ULP_SC_PC_IDX_MASK;
+
+ pthread_mutex_unlock(&ulp_sc_info->sc_lock);
+}
@@ -13,13 +13,16 @@
#define ULP_FLAG_SC_THREAD BIT(0)
#define ULP_SC_ENTRY_FLAG_VALID BIT(0)
+#define ULP_SC_ENTRY_FLAG_PARENT BIT(1)
+#define ULP_SC_PC_IDX_MASK 0xFFFFF
#define ULP_SC_BATCH_SIZE 64
#define ULP_SC_PAGE_SIZE 4096
struct ulp_sc_tfc_stats_cache_entry {
struct bnxt_ulp_context *ctxt;
- uint32_t flags;
+ uint32_t flags : 8;
+ uint32_t pc_idx : 24;
uint64_t timestamp;
uint64_t handle;
uint8_t dir;
@@ -137,6 +140,19 @@ int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
uint32_t fid);
+
+/*
+ * Set pc_idx for the flow if stat cache info is valid
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ * flow_id [in] The HW flow ID
+ * pc_idx [in] The parent flow entry idx
+ *
+ */
+void ulp_sc_mgr_set_pc_idx(struct bnxt_ulp_context *ctxt,
+ uint32_t flow_id,
+ uint32_t pc_idx);
+
extern const struct bnxt_ulp_sc_core_ops ulp_sc_tfc_core_ops;
#endif /* _ULP_SC_MGR_H_ */