[v8,06/47] net/bnxt: tf_core: TF support flow scale query

Message ID 20241107135254.1611676-7-sriharsha.basavapatna@broadcom.com (mailing list archive)
State Accepted, archived
Delegated to: Ajit Khaparde
Headers
Series TruFlow update for Thor2 |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Sriharsha Basavapatna Nov. 7, 2024, 1:52 p.m. UTC
From: Shuanglin Wang <shuanglin.wang@broadcom.com>

TF supports the flow scale query feature for OVS application.
The resource usage is tracked when opening a TF session
or adding/deleting a flow. The resources includes WC TCAM,
EM, Action, Counter, Meter, ACT_ENCAP, ACT_ENCAP, and SP_SMAC.
User can query the resource usage using niccli.

Several improvements on flow scale query feature:
1. Some default rules require both RX and TX resources;
   need to update usage states on both directions.
2. Update resource usage state for regular flows only.
3. Added a buffer dirty state to avoid unnecessary
   state sync with firmware.

This feature is disabled by default. Using the build flag
-DTF_FLOW_SCALE_QUERY to enable it.

Signed-off-by: Shuanglin Wang <shuanglin.wang@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
---
 drivers/net/bnxt/hsi_struct_def_dpdk.h        | 348 +++++++++--
 drivers/net/bnxt/tf_core/cfa_tcam_mgr.c       |  57 +-
 .../net/bnxt/tf_core/cfa_tcam_mgr_device.h    |   9 +
 .../net/bnxt/tf_core/cfa_tcam_mgr_session.c   |  13 +
 .../net/bnxt/tf_core/cfa_tcam_mgr_session.h   |   3 +
 drivers/net/bnxt/tf_core/meson.build          |   1 +
 drivers/net/bnxt/tf_core/tf_core.c            | 150 +++++
 drivers/net/bnxt/tf_core/tf_core.h            |  52 ++
 drivers/net/bnxt/tf_core/tf_device.h          |  65 +++
 drivers/net/bnxt/tf_core/tf_device_p4.c       |  10 +
 drivers/net/bnxt/tf_core/tf_device_p58.c      | 105 ++++
 .../net/bnxt/tf_core/tf_em_hash_internal.c    |  28 +-
 drivers/net/bnxt/tf_core/tf_em_internal.c     |   7 +
 drivers/net/bnxt/tf_core/tf_msg.c             | 175 ++++++
 drivers/net/bnxt/tf_core/tf_msg.h             |  57 ++
 drivers/net/bnxt/tf_core/tf_resources.c       | 542 ++++++++++++++++++
 drivers/net/bnxt/tf_core/tf_resources.h       | 129 +++++
 drivers/net/bnxt/tf_core/tf_rm.c              |  39 +-
 drivers/net/bnxt/tf_core/tf_session.c         |  10 +
 drivers/net/bnxt/tf_ulp/ulp_mapper.c          |  16 +
 20 files changed, 1746 insertions(+), 70 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_core/tf_resources.c
  

Patch

diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 9beacd94aa..d8c9e5a960 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -1,5 +1,5 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2014-2023 Broadcom Inc.
+ * Copyright (c) 2014-2024 Broadcom Inc.
  * All rights reserved.
  *
  * DO NOT MODIFY!!! This file is automatically generated.
@@ -836,6 +836,10 @@  struct cmd_nums {
 	#define HWRM_TF_IF_TBL_SET                        UINT32_C(0x2fe)
 	/* Experimental */
 	#define HWRM_TF_IF_TBL_GET                        UINT32_C(0x2ff)
+	/* Experimental */
+	#define HWRM_TF_RESC_USAGE_SET                    UINT32_C(0x300)
+	/* Experimental */
+	#define HWRM_TF_RESC_USAGE_QUERY                  UINT32_C(0x301)
 	/* TruFlow command to check firmware table scope capabilities. */
 	#define HWRM_TFC_TBL_SCOPE_QCAPS                  UINT32_C(0x380)
 	/* TruFlow command to allocate a table scope ID and create the pools. */
@@ -14960,32 +14964,18 @@  struct hwrm_func_qcaps_output {
 	uint16_t	xid_partition_cap;
 	/*
 	 * When this bit is '1', it indicates that FW is capable of
-	 * supporting partition based XID management for KTLS TX
+	 * supporting partition based XID management for Tx crypto
 	 * key contexts.
 	 */
-	#define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_KTLS_TKC \
+	#define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_TX_CK \
 		UINT32_C(0x1)
 	/*
 	 * When this bit is '1', it indicates that FW is capable of
-	 * supporting partition based XID management for KTLS RX
+	 * supporting partition based XID management for Rx crypto
 	 * key contexts.
 	 */
-	#define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_KTLS_RKC \
+	#define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_RX_CK \
 		UINT32_C(0x2)
-	/*
-	 * When this bit is '1', it indicates that FW is capable of
-	 * supporting partition based XID management for QUIC TX
-	 * key contexts.
-	 */
-	#define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_QUIC_TKC \
-		UINT32_C(0x4)
-	/*
-	 * When this bit is '1', it indicates that FW is capable of
-	 * supporting partition based XID management for QUIC RX
-	 * key contexts.
-	 */
-	#define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_QUIC_RKC \
-		UINT32_C(0x8)
 	/*
 	 * This value uniquely identifies the hardware NIC used by the
 	 * function. The value returned will be the same for all functions.
@@ -15804,8 +15794,21 @@  struct hwrm_func_qcfg_output {
 	 * initialize_fw.
 	 */
 	uint32_t	roce_max_gid_per_vf;
-	/* Bitmap of context types that have XID partition enabled. */
+	/*
+	 * Bitmap of context types that have XID partition enabled.
+	 * Only valid for PF.
+	 */
 	uint16_t	xid_partition_cfg;
+	/*
+	 * When this bit is '1', it indicates that driver enables XID
+	 * partition on Tx crypto key contexts.
+	 */
+	#define HWRM_FUNC_QCFG_OUTPUT_XID_PARTITION_CFG_TX_CK     UINT32_C(0x1)
+	/*
+	 * When this bit is '1', it indicates that driver enables XID
+	 * partition on Rx crypto key contexts.
+	 */
+	#define HWRM_FUNC_QCFG_OUTPUT_XID_PARTITION_CFG_RX_CK     UINT32_C(0x2)
 	uint8_t	unused_7;
 	/*
 	 * This field is used in Output records to indicate that the output
@@ -16886,34 +16889,20 @@  struct hwrm_func_cfg_input {
 	/* Number of GIDs per VF. Only valid for PF. */
 	uint32_t	roce_max_gid_per_vf;
 	/*
-	 * Bitmap of context kinds that have XID partition enabled.
+	 * Bitmap of context types that have XID partition enabled.
 	 * Only valid for PF.
 	 */
 	uint16_t	xid_partition_cfg;
 	/*
 	 * When this bit is '1', it indicates that driver enables XID
-	 * partition on KTLS TX key contexts.
-	 */
-	#define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_KTLS_TKC \
-		UINT32_C(0x1)
-	/*
-	 * When this bit is '1', it indicates that driver enables XID
-	 * partition on KTLS RX key contexts.
+	 * partition on Tx crypto key contexts.
 	 */
-	#define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_KTLS_RKC \
-		UINT32_C(0x2)
+	#define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_TX_CK     UINT32_C(0x1)
 	/*
 	 * When this bit is '1', it indicates that driver enables XID
-	 * partition on QUIC TX key contexts.
+	 * partition on Rx crypto key contexts.
 	 */
-	#define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_QUIC_TKC \
-		UINT32_C(0x4)
-	/*
-	 * When this bit is '1', it indicates that driver enables XID
-	 * partition on QUIC RX key contexts.
-	 */
-	#define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_QUIC_RKC \
-		UINT32_C(0x8)
+	#define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_RX_CK     UINT32_C(0x2)
 	uint16_t	unused_2;
 } __rte_packed;
 
@@ -22737,11 +22726,11 @@  struct hwrm_func_backing_store_cfg_v2_input {
 	/* TIM. */
 	#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TIM \
 		UINT32_C(0xf)
-	/* Tx key context. */
-	#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TKC \
+	/* Tx crypto key. */
+	#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TX_CK \
 		UINT32_C(0x13)
-	/* Rx key context. */
-	#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RKC \
+	/* Rx crypto key. */
+	#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RX_CK \
 		UINT32_C(0x14)
 	/* Mid-path TQM ring. */
 	#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MP_TQM_RING \
@@ -22781,7 +22770,7 @@  struct hwrm_func_backing_store_cfg_v2_input {
 	 *    RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
 	 * 3. If the backing store type is XID partition, use the following
 	 *    instance value to map to context types:
-	 *    KTLS_TKC (0), KTLS_RKC (1), QUIC_TKC (2), QUIC_RKC (3)
+	 *    TX_CK (0), RX_CK (1)
 	 */
 	uint16_t	instance;
 	/* Control flags. */
@@ -22990,11 +22979,11 @@  struct hwrm_func_backing_store_qcfg_v2_input {
 	/* TIM. */
 	#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TIM \
 		UINT32_C(0xf)
-	/* Tx key context. */
-	#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TKC \
+	/* Tx crypto key. */
+	#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TX_CK \
 		UINT32_C(0x13)
-	/* Rx key context. */
-	#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RKC \
+	/* Rx crypto key. */
+	#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RX_CK \
 		UINT32_C(0x14)
 	/* Mid-path TQM ring. */
 	#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_MP_TQM_RING \
@@ -23034,7 +23023,7 @@  struct hwrm_func_backing_store_qcfg_v2_input {
 	 *    RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
 	 * 3. If the backing store type is XID partition, use the following
 	 *    instance value to map to context types:
-	 *    KTLS_TKC (0), KTLS_RKC (1), QUIC_TKC (2), QUIC_RKC (3)
+	 *    TX_CK (0), RX_CK (1)
 	 */
 	uint16_t	instance;
 	uint8_t	rsvd[4];
@@ -23111,7 +23100,7 @@  struct hwrm_func_backing_store_qcfg_v2_output {
 	 *    RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
 	 * 3. If the backing store type is XID partition, use the following
 	 *    instance value to map to context types:
-	 *    KTLS_TKC (0), KTLS_RKC (1), QUIC_TKC (2), QUIC_RKC (3)
+	 *    TX_CK (0), RX_CK (1)
 	 */
 	uint16_t	instance;
 	/* Control flags. */
@@ -23190,6 +23179,7 @@  struct hwrm_func_backing_store_qcfg_v2_output {
 	 * | VINC |            vnic_split_entries                      |
 	 * | MRAV |            mrav_split_entries                      |
 	 * | TS   |             ts_split_entries                       |
+	 * | CK   |             ck_split_entries                       |
 	 */
 	uint32_t	split_entry_0;
 	/* Split entry #1. */
@@ -23271,6 +23261,25 @@  struct ts_split_entries {
 	uint32_t	rsvd2[2];
 } __rte_packed;
 
+/* Common structure to cast crypto key split entries. This casting is required
+ * in the following HWRM command inputs/outputs if the backing store type is
+ * TX_CK or RX_CK.
+ * 1. hwrm_func_backing_store_cfg_v2_input
+ * 2. hwrm_func_backing_store_qcfg_v2_output
+ * 3. hwrm_func_backing_store_qcaps_v2_output
+ */
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+	/*
+	 * Number of QUIC backing store entries. That means the number of KTLS
+	 * backing store entries is the difference between this number and the
+	 * total number of crypto key entries.
+	 */
+	uint32_t	num_quic_entries;
+	uint32_t	rsvd;
+	uint32_t	rsvd2[2];
+} __rte_packed;
+
 /************************************
  * hwrm_func_backing_store_qcaps_v2 *
  ************************************/
@@ -23490,7 +23499,7 @@  struct hwrm_func_backing_store_qcaps_v2_output {
 	 *    RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
 	 * 3. If the backing store type is VF XID partition in-use table, use
 	 *    the following bits to map to context types:
-	 *    KTLS_TKC (0), KTLS_RKC (1), QUIC_TKC (2), QUIC_RKC (3)
+	 *    TX_CK (0), RX_CK (1)
 	 */
 	uint32_t	instance_bit_map;
 	/*
@@ -23587,6 +23596,7 @@  struct hwrm_func_backing_store_qcaps_v2_output {
 	 * | VINC |            vnic_split_entries                      |
 	 * | MRAV |            mrav_split_entries                      |
 	 * | TS   |             ts_split_entries                       |
+	 * | CK   |             ck_split_entries                       |
 	 */
 	uint32_t	split_entry_0;
 	/* Split entry #1. */
@@ -27288,8 +27298,14 @@  struct hwrm_port_phy_qcfg_output {
 	/* QSFP112 */
 	#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP112 \
 		(UINT32_C(0x1e) << 24)
+	/* SFP-DD CMIS */
+	#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFPDD \
+		(UINT32_C(0x1f) << 24)
+	/* SFP CMIS */
+	#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_CSFP \
+		(UINT32_C(0x20) << 24)
 	#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST \
-		HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP112
+		HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_CSFP
 	/*
 	 * This value represents the current configuration of
 	 * Forward Error Correction (FEC) on the port.
@@ -51794,7 +51810,7 @@  struct hwrm_cfa_lag_group_member_unrgtr_output {
  *****************************/
 
 
-/* hwrm_cfa_tls_filter_alloc_input (size:704b/88B) */
+/* hwrm_cfa_tls_filter_alloc_input (size:768b/96B) */
 struct hwrm_cfa_tls_filter_alloc_input {
 	/* The HWRM command request type. */
 	uint16_t	req_type;
@@ -51892,6 +51908,12 @@  struct hwrm_cfa_tls_filter_alloc_input {
 	 */
 	#define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
 		UINT32_C(0x400)
+	/*
+	 * This bit must be '1' for the quic_dst_connect_id field to be
+	 * configured.
+	 */
+	#define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_QUIC_DST_CONNECT_ID \
+		UINT32_C(0x800)
 	/*
 	 * This value identifies a set of CFA data structures used for an L2
 	 * context.
@@ -51970,10 +51992,12 @@  struct hwrm_cfa_tls_filter_alloc_input {
 	 */
 	uint16_t	dst_port;
 	/*
-	 * The Key Context Identifier (KID) for use with KTLS.
+	 * The Key Context Identifier (KID) for use with KTLS or QUIC.
 	 * KID is limited to 20-bits.
 	 */
 	uint32_t	kid;
+	/* The Destination Connection ID of QUIC. */
+	uint64_t	quic_dst_connect_id;
 } __rte_packed;
 
 /* hwrm_cfa_tls_filter_alloc_output (size:192b/24B) */
@@ -55766,6 +55790,222 @@  struct hwrm_tf_session_hotup_state_get_output {
 	uint8_t	valid;
 } __rte_packed;
 
+/**************************
+ * hwrm_tf_resc_usage_set *
+ **************************/
+
+
+/* hwrm_tf_resc_usage_set_input (size:1024b/128B) */
+struct hwrm_tf_resc_usage_set_input {
+	/* The HWRM command request type. */
+	uint16_t	req_type;
+	/*
+	 * The completion ring to send the completion event on. This should
+	 * be the NQ ID returned from the `nq_alloc` HWRM command.
+	 */
+	uint16_t	cmpl_ring;
+	/*
+	 * The sequence ID is used by the driver for tracking multiple
+	 * commands. This ID is treated as opaque data by the firmware and
+	 * the value is returned in the `hwrm_resp_hdr` upon completion.
+	 */
+	uint16_t	seq_id;
+	/*
+	 * The target ID of the command:
+	 * * 0x0-0xFFF8 - The function ID
+	 * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+	 * * 0xFFFD - Reserved for user-space HWRM interface
+	 * * 0xFFFF - HWRM
+	 */
+	uint16_t	target_id;
+	/*
+	 * A physical address pointer pointing to a host buffer that the
+	 * command's response data will be written. This can be either a host
+	 * physical address (HPA) or a guest physical address (GPA) and must
+	 * point to a physically contiguous block of memory.
+	 */
+	uint64_t	resp_addr;
+	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+	uint32_t	fw_session_id;
+	/* Control flags. */
+	uint16_t	flags;
+	/* Indicates the flow direction. */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR     UINT32_C(0x1)
+	/* If this bit set to 0, then it indicates rx flow. */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_RX    UINT32_C(0x0)
+	/* If this bit is set to 1, then it indicates tx flow. */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX    UINT32_C(0x1)
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_LAST \
+		HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX
+	/* Indicate table data is being sent via DMA. */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DMA     UINT32_C(0x2)
+	/* Types of the resource to set their usage state. */
+	uint16_t	types;
+	/* WC TCAM Pool */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_WC_TCAM \
+		UINT32_C(0x1)
+	/* EM Internal Memory Pool */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_EM \
+		UINT32_C(0x2)
+	/* Meter Instance */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_METER \
+		UINT32_C(0x4)
+	/* Counter Record Table */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_COUNTER \
+		UINT32_C(0x8)
+	/* Action Record Table */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACTION \
+		UINT32_C(0x10)
+	/* ACT MODIFY/ENCAP Record Table */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACT_MOD_ENCAP \
+		UINT32_C(0x20)
+	/* Source Property SMAC Record Table */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_SP_SMAC \
+		UINT32_C(0x40)
+	/* All Resource Types */
+	#define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ALL \
+		UINT32_C(0x80)
+	/* Size of the data to set. */
+	uint16_t	size;
+	/* unused */
+	uint8_t	unused1[6];
+	/* Data to be set. */
+	uint8_t	data[96];
+} __rte_packed;
+
+/* hwrm_tf_resc_usage_set_output (size:128b/16B) */
+struct hwrm_tf_resc_usage_set_output {
+	/* The specific error status for the command. */
+	uint16_t	error_code;
+	/* The HWRM command request type. */
+	uint16_t	req_type;
+	/* The sequence ID from the original command. */
+	uint16_t	seq_id;
+	/* The length of the response data in number of bytes. */
+	uint16_t	resp_len;
+	/* unused. */
+	uint8_t	unused0[7];
+	/*
+	 * This field is used in Output records to indicate that the output
+	 * is completely written to RAM. This field should be read as '1'
+	 * to indicate that the output has been completely written.
+	 * When writing a command completion or response to an internal
+	 * processor, the order of writes has to be such that this field
+	 * is written last.
+	 */
+	uint8_t	valid;
+} __rte_packed;
+
+/****************************
+ * hwrm_tf_resc_usage_query *
+ ****************************/
+
+
+/* hwrm_tf_resc_usage_query_input (size:256b/32B) */
+struct hwrm_tf_resc_usage_query_input {
+	/* The HWRM command request type. */
+	uint16_t	req_type;
+	/*
+	 * The completion ring to send the completion event on. This should
+	 * be the NQ ID returned from the `nq_alloc` HWRM command.
+	 */
+	uint16_t	cmpl_ring;
+	/*
+	 * The sequence ID is used by the driver for tracking multiple
+	 * commands. This ID is treated as opaque data by the firmware and
+	 * the value is returned in the `hwrm_resp_hdr` upon completion.
+	 */
+	uint16_t	seq_id;
+	/*
+	 * The target ID of the command:
+	 * * 0x0-0xFFF8 - The function ID
+	 * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+	 * * 0xFFFD - Reserved for user-space HWRM interface
+	 * * 0xFFFF - HWRM
+	 */
+	uint16_t	target_id;
+	/*
+	 * A physical address pointer pointing to a host buffer that the
+	 * command's response data will be written. This can be either a host
+	 * physical address (HPA) or a guest physical address (GPA) and must
+	 * point to a physically contiguous block of memory.
+	 */
+	uint64_t	resp_addr;
+	/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+	uint32_t	fw_session_id;
+	/* Control flags. */
+	uint16_t	flags;
+	/* Indicates the flow direction. */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR     UINT32_C(0x1)
+	/* If this bit set to 0, then it indicates rx flow. */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX    UINT32_C(0x0)
+	/* If this bit is set to 1, then it indicates tx flow. */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX    UINT32_C(0x1)
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_LAST \
+		HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX
+	/* unused. */
+	uint8_t	unused0[2];
+	/* Types of the resource to retrieve their usage state. */
+	uint16_t	types;
+	/* WC TCAM Pool */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_WC_TCAM \
+		UINT32_C(0x1)
+	/* EM Internal Memory Pool */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_EM \
+		UINT32_C(0x2)
+	/* Meter Instance */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_METER \
+		UINT32_C(0x4)
+	/* Counter Record Table */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_COUNTER \
+		UINT32_C(0x8)
+	/* Action Record Table */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACTION \
+		UINT32_C(0x10)
+	/* ACT MODIFY/ENCAP Record Table */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACT_MOD_ENCAP \
+		UINT32_C(0x20)
+	/* Source Property SMAC Record Table */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_SP_SMAC \
+		UINT32_C(0x40)
+	/* All Resource Types */
+	#define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ALL \
+		UINT32_C(0x80)
+	/* unused */
+	uint8_t	unused1[6];
+} __rte_packed;
+
+/* hwrm_tf_resc_usage_query_output (size:960b/120B) */
+struct hwrm_tf_resc_usage_query_output {
+	/* The specific error status for the command. */
+	uint16_t	error_code;
+	/* The HWRM command request type. */
+	uint16_t	req_type;
+	/* The sequence ID from the original command. */
+	uint16_t	seq_id;
+	/* The length of the response data in number of bytes. */
+	uint16_t	resp_len;
+	/* Response code. */
+	uint32_t	resp_code;
+	/* Response size. */
+	uint16_t	size;
+	/* unused */
+	uint16_t	unused0;
+	/* Response data. */
+	uint8_t	data[96];
+	/* unused */
+	uint8_t	unused1[7];
+	/*
+	 * This field is used in Output records to indicate that the output
+	 * is completely written to RAM. This field should be read as '1'
+	 * to indicate that the output has been completely written.
+	 * When writing a command completion or response to an internal
+	 * processor, the order of writes has to be such that this field
+	 * is written last.
+	 */
+	uint8_t	valid;
+} __rte_packed;
+
 /****************************
  * hwrm_tfc_tbl_scope_qcaps *
  ****************************/
diff --git a/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c b/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c
index 9df2d2b937..349f52caba 100644
--- a/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c
+++ b/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c
@@ -20,15 +20,6 @@ 
 
 #define TF_TCAM_SLICE_INVALID (-1)
 
-/*
- * The following macros are for setting the entry status in a row entry.
- * row is (struct cfa_tcam_mgr_table_rows_0 *)
- */
-#define ROW_ENTRY_INUSE(row, entry)  ((row)->entry_inuse &   (1U << (entry)))
-#define ROW_ENTRY_SET(row, entry)    ((row)->entry_inuse |=  (1U << (entry)))
-#define ROW_ENTRY_CLEAR(row, entry)  ((row)->entry_inuse &= ~(1U << (entry)))
-#define ROW_INUSE(row)               ((row)->entry_inuse != 0)
-
 static struct cfa_tcam_mgr_entry_data *entry_data[TF_TCAM_MAX_SESSIONS];
 
 static int global_data_initialized[TF_TCAM_MAX_SESSIONS];
@@ -685,6 +676,27 @@  cfa_tcam_mgr_rows_combine(int sess_idx, struct cfa_tcam_mgr_context *context,
 				if (entry_moved)
 					break;
 			}
+
+#ifdef TF_FLOW_SCALE_QUERY
+			/* CFA update usage state when moved entries */
+			if (entry_moved) {
+				if (tf_tcam_usage_update(context->tfp->session->session_id.id,
+							 parms->dir,
+							 parms->type,
+							 to_row,
+							 TF_RESC_ALLOC)) {
+					CFA_TCAM_MGR_TRACE(DEBUG, "TF tcam usage update failed\n");
+				}
+				if (tf_tcam_usage_update(context->tfp->session->session_id.id,
+							 parms->dir,
+							 parms->type,
+							 from_row,
+							 TF_RESC_FREE)) {
+					CFA_TCAM_MGR_TRACE(DEBUG, "TF tcam usage update failed\n");
+				}
+			}
+#endif /* TF_FLOW_SCALE_QUERY */
+
 			if (ROW_INUSE(from_row))
 				entry_moved = false;
 			else
@@ -1207,6 +1219,11 @@  cfa_tcam_mgr_bind(struct cfa_tcam_mgr_context *context,
 		return rc;
 	}
 
+#ifdef TF_FLOW_SCALE_QUERY
+	/* Initialize the WC TCAM usage state */
+	tf_tcam_usage_init(tfp);
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	return 0;
 }
 
@@ -1352,6 +1369,17 @@  cfa_tcam_mgr_alloc(struct cfa_tcam_mgr_context *context,
 
 	parms->id = new_entry_id;
 
+#ifdef TF_FLOW_SCALE_QUERY
+	/* CFA update usage state */
+	if (tf_tcam_usage_update(session_id,
+				 parms->dir,
+				 parms->type,
+				 row,
+				 TF_RESC_ALLOC)) {
+		CFA_TCAM_MGR_TRACE(DEBUG, "TF tcam usage update failed\n");
+	}
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	return 0;
 }
 
@@ -1443,6 +1471,17 @@  cfa_tcam_mgr_free(struct cfa_tcam_mgr_context *context,
 					    table_data->max_slices);
 		ROW_ENTRY_CLEAR(row, entry->slice);
 
+#ifdef TF_FLOW_SCALE_QUERY
+		/* CFA update usage state */
+		if (tf_tcam_usage_update(session_id,
+					 parms->dir,
+					 parms->type,
+					 row,
+					 TF_RESC_FREE)) {
+			CFA_TCAM_MGR_TRACE(DEBUG, "TF tcam usage update failed\n");
+		}
+#endif /* TF_FLOW_SCALE_QUERY */
+
 		new_row_to_free = entry->row;
 		cfa_tcam_mgr_rows_combine(sess_idx, context, parms, table_data,
 					  new_row_to_free);
diff --git a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_device.h b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_device.h
index 6ab9b5e118..c24e5c8389 100644
--- a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_device.h
+++ b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_device.h
@@ -42,6 +42,15 @@  TF_TCAM_TABLE_ROWS_DEF(2);
 TF_TCAM_TABLE_ROWS_DEF(4);
 TF_TCAM_TABLE_ROWS_DEF(8);
 
+/*
+ * The following macros are for setting the entry status in a row entry.
+ * row is (struct cfa_tcam_mgr_table_rows_0 *)
+ */
+#define ROW_ENTRY_INUSE(row, entry)  ((row)->entry_inuse &   (1U << (entry)))
+#define ROW_ENTRY_SET(row, entry)    ((row)->entry_inuse |=  (1U << (entry)))
+#define ROW_ENTRY_CLEAR(row, entry)  ((row)->entry_inuse &= ~(1U << (entry)))
+#define ROW_INUSE(row)               ((row)->entry_inuse != 0)
+
 #define TF_TCAM_MAX_ENTRIES (L2_CTXT_TCAM_RX_MAX_ENTRIES +	\
 			     L2_CTXT_TCAM_TX_MAX_ENTRIES +	\
 			     PROF_TCAM_RX_MAX_ENTRIES +		\
diff --git a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.c b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.c
index 3d085bc69e..40bbcc54c8 100644
--- a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.c
+++ b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.c
@@ -74,6 +74,19 @@  cfa_tcam_mgr_session_find(unsigned int session_id)
 	return -CFA_TCAM_MGR_ERR_CODE(INVAL);
 }
 
+int
+cfa_tcam_mgr_session_empty(void)
+{
+	unsigned int sess_idx;
+
+	for (sess_idx = 0; sess_idx < ARRAY_SIZE(session_data); sess_idx++) {
+		if (session_data[sess_idx].session_id)
+			return 0;
+	}
+
+	return 1;
+}
+
 int
 cfa_tcam_mgr_session_add(unsigned int session_id)
 {
diff --git a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.h b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.h
index 69311b7e1d..7e75776686 100644
--- a/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.h
+++ b/drivers/net/bnxt/tf_core/cfa_tcam_mgr_session.h
@@ -19,6 +19,9 @@  cfa_tcam_mgr_get_session_from_context(struct cfa_tcam_mgr_context *context,
 int
 cfa_tcam_mgr_session_find(unsigned int session_id);
 
+int
+cfa_tcam_mgr_session_empty(void);
+
 int
 cfa_tcam_mgr_session_add(unsigned int session_id);
 
diff --git a/drivers/net/bnxt/tf_core/meson.build b/drivers/net/bnxt/tf_core/meson.build
index 13a71738a0..7d38ab8793 100644
--- a/drivers/net/bnxt/tf_core/meson.build
+++ b/drivers/net/bnxt/tf_core/meson.build
@@ -30,6 +30,7 @@  sources += files(
         'tf_msg.c',
         'tfp.c',
         'tf_rm.c',
+        'tf_resources.c',
         'tf_session.c',
         'tf_sram_mgr.c',
         'tf_tbl.c',
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 3a812bee3a..1c728aadd8 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -1101,6 +1101,21 @@  tf_alloc_tbl_entry(struct tf *tfp,
 
 	parms->idx = idx;
 
+#ifdef TF_FLOW_SCALE_QUERY
+	/* Update resource usage buffer */
+	if (!rc && dev->ops->tf_dev_update_tbl_usage_buffer) {
+		rc = dev->ops->tf_dev_update_tbl_usage_buffer(tfs->session_id.id,
+							      parms->dir,
+							      parms->type,
+							      TF_RESC_ALLOC);
+		if (rc) {
+			TFP_DRV_LOG(DEBUG,
+				    "%s: Table usage update failed!\n",
+				    tf_dir_2_str(parms->dir));
+		}
+	}
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	return 0;
 }
 
@@ -1181,6 +1196,22 @@  tf_free_tbl_entry(struct tf *tfp,
 			return rc;
 		}
 	}
+
+#ifdef TF_FLOW_SCALE_QUERY
+	/* Update resource usage buffer */
+	if (!rc && dev->ops->tf_dev_update_tbl_usage_buffer) {
+		rc = dev->ops->tf_dev_update_tbl_usage_buffer(tfs->session_id.id,
+							      parms->dir,
+							      parms->type,
+							      TF_RESC_FREE);
+		if (rc) {
+			TFP_DRV_LOG(DEBUG,
+				    "%s: Table usage update failed!\n",
+				    tf_dir_2_str(parms->dir));
+		}
+	}
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	return 0;
 }
 
@@ -2027,3 +2058,122 @@  int tf_get_session_hotup_state(struct tf *tfp,
 
 	return rc;
 }
+
+#ifdef TF_FLOW_SCALE_QUERY
+/* Update TF resource usage state with firmware */
+int tf_update_resc_usage(struct tf *tfp,
+			 enum tf_dir dir,
+			 enum tf_flow_resc_type flow_resc_type)
+{
+	int rc;
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+	TF_CHECK_PARMS1(tfp);
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup device, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Support Thor(P5) on the first session */
+	if (dev->type != TF_DEVICE_TYPE_P5 || tfs->session_id.internal.fw_session_id)
+		return rc;
+
+	if (dev->ops->tf_dev_update_resc_usage == NULL) {
+		rc = -EOPNOTSUPP;
+		TFP_DRV_LOG(ERR,
+			    "%s: Operation not supported, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	rc = dev->ops->tf_dev_update_resc_usage(tfp, dir, flow_resc_type);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Flow resource usage update failed, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	TFP_DRV_LOG(DEBUG,
+		    "%s: Flow resource usage updated: usage type %d\n",
+		    tf_dir_2_str(dir), flow_resc_type);
+
+	return 0;
+}
+
+/* Get TF resource usage state from firmware*/
+int tf_query_resc_usage(struct tf *tfp,
+			struct tf_query_resc_usage_parms *parms)
+{
+	int rc;
+	struct tf_session *tfs;
+	struct tf_dev_info *dev;
+
+	TF_CHECK_PARMS2(tfp, parms);
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup device, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Support Thor(P5) on the first session */
+	if (dev->type != TF_DEVICE_TYPE_P5 || tfs->session_id.internal.fw_session_id)
+		return rc;
+
+	if (dev->ops->tf_dev_query_resc_usage == NULL) {
+		rc = -EOPNOTSUPP;
+		TFP_DRV_LOG(ERR,
+			    "%s: Operation not supported, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	rc = dev->ops->tf_dev_query_resc_usage(tfp, parms);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Flow resource usage query failed, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	TFP_DRV_LOG(DEBUG,
+		    "%s: Flow resource usage query successfully: usage type %d\n",
+		    tf_dir_2_str(parms->dir), parms->flow_resc_type);
+	return 0;
+}
+#endif /* TF_FLOW_SCALE_QUERY */
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index fd1ee2f454..90637c6508 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -2584,4 +2584,56 @@  struct tf_get_sram_policy_parms {
  */
 int tf_get_sram_policy(struct tf *tfp,
 		       struct tf_get_sram_policy_parms *parms);
+
+#ifdef TF_FLOW_SCALE_QUERY
+enum tf_flow_resc_type {
+	TF_FLOW_RESC_TYPE_WCTCAM,
+	TF_FLOW_RESC_TYPE_EM,
+	TF_FLOW_RESC_TYPE_METER,
+	TF_FLOW_RESC_TYPE_COUNTER,
+	TF_FLOW_RESC_TYPE_ACTION,
+	TF_FLOW_RESC_TYPE_ACT_MOD_ENCAP,
+	TF_FLOW_RESC_TYPE_SP_SMAC,
+	TF_FLOW_RESC_TYPE_ALL,
+};
+
+/**
+ * Update TF resource usage state with firmware
+ *
+ * Returns success or failure code.
+ */
+int tf_update_resc_usage(struct tf *tfp,
+			 enum tf_dir dir,
+			 enum tf_flow_resc_type flow_resc_type);
+
+/**
+ * tf_query_resc_usage parameter definition
+ */
+struct	tf_query_resc_usage_parms {
+	/**
+	 * [in] receive or transmit direction
+	 */
+	enum tf_dir dir;
+	/**
+	 * [in] RESC type
+	 */
+	enum tf_flow_resc_type flow_resc_type;
+	/**
+	 * [in] received buffer size
+	 */
+	uint32_t size;
+	/**
+	 * [out] buffer for query data
+	 */
+	uint8_t data[96];
+};
+/**
+ * Get TF resource usage state from firmware
+ *
+ * Returns success or failure code.
+ */
+int tf_query_resc_usage(struct tf *tfp,
+			struct tf_query_resc_usage_parms *parms);
+
+#endif /* TF_FLOW_SCALE_QUERY */
 #endif /* _TF_CORE_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_device.h b/drivers/net/bnxt/tf_core/tf_device.h
index 06c17a7212..0b0ca8b42f 100644
--- a/drivers/net/bnxt/tf_core/tf_device.h
+++ b/drivers/net/bnxt/tf_core/tf_device.h
@@ -1127,6 +1127,71 @@  struct tf_dev_ops {
 	 */
 	int (*tf_dev_get_sram_policy)(enum tf_dir dir,
 				      enum tf_sram_bank_id *bank_id);
+
+#ifdef TF_FLOW_SCALE_QUERY
+	/**
+	 * Update resource usage state with firmware
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] dir
+	 *   Receive or transmit direction
+	 *
+	 * [in] flow_resc_type
+	 *   Resource type to update its usage state
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_update_resc_usage)(struct tf *tfp,
+					enum tf_dir dir,
+					enum tf_flow_resc_type flow_resc_type);
+
+	/**
+	 * Query resource usage state from firmware
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] dir
+	 *   Receive or transmit direction
+	 *
+	 * [in] flow_resc_type
+	 *   Resource type to query its usage state
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_query_resc_usage)(struct tf *tfp,
+				       struct tf_query_resc_usage_parms *parms);
+
+	/**
+	 * Update buffer of table usage
+	 *
+	 * [in] session_id
+	 *   The TruFlow session id
+	 *
+	 * [in] dir
+	 *   Receive or transmit direction
+	 *
+	 * [in] tbl_type
+	 *   SRAM table type to update its usage state
+	 *
+	 * [in] resc_opt
+	 *   Alloca or free resource
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_update_tbl_usage_buffer)(uint32_t session_id,
+					      enum tf_dir dir,
+					      enum tf_tbl_type tbl_type,
+					      uint32_t resc_opt);
+#endif /* TF_FLOW_SCALE_QUERY */
 };
 
 /**
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index 4df1918bc5..6f6526e552 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -512,6 +512,11 @@  const struct tf_dev_ops tf_dev_ops_p4_init = {
 	.tf_dev_get_sram_resources = NULL,
 	.tf_dev_set_sram_policy = NULL,
 	.tf_dev_get_sram_policy = NULL,
+#ifdef TF_FLOW_SCALE_QUERY
+	.tf_dev_update_resc_usage = NULL,
+	.tf_dev_query_resc_usage = NULL,
+	.tf_dev_update_tbl_usage_buffer = NULL,
+#endif /* TF_FLOW_SCALE_QUERY */
 };
 
 /**
@@ -570,4 +575,9 @@  const struct tf_dev_ops tf_dev_ops_p4 = {
 	.tf_dev_get_sram_resources = NULL,
 	.tf_dev_set_sram_policy = NULL,
 	.tf_dev_get_sram_policy = NULL,
+#ifdef TF_FLOW_SCALE_QUERY
+	.tf_dev_update_resc_usage = NULL,
+	.tf_dev_query_resc_usage = NULL,
+	.tf_dev_update_tbl_usage_buffer = NULL,
+#endif /* TF_FLOW_SCALE_QUERY */
 };
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c b/drivers/net/bnxt/tf_core/tf_device_p58.c
index 51c260b5d7..8f915744a7 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c
@@ -15,8 +15,10 @@ 
 #include "tf_if_tbl.h"
 #include "tfp.h"
 #include "tf_msg_common.h"
+ #include "tf_msg.h"
 #include "tf_tbl_sram.h"
 #include "tf_util.h"
+#include "tf_resources.h"
 
 #define TF_DEV_P58_PARIF_MAX 16
 #define TF_DEV_P58_PF_MASK 0xfUL
@@ -781,6 +783,99 @@  static int tf_dev_p58_get_sram_policy(enum tf_dir dir,
 	return 0;
 }
 
+#ifdef TF_FLOW_SCALE_QUERY
+/**
+ * Update resource usage to firmware.
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ *
+ * [in] flow_resc_type
+ *   Types of the resource to update their usage state.
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+static int tf_dev_p58_update_resc_usage(struct tf *tfp,
+					enum tf_dir dir,
+					enum tf_flow_resc_type flow_resc_type)
+{
+	int rc;
+
+	struct cfa_tf_resc_usage *usage_state = &tf_resc_usage[dir];
+
+	flow_resc_type |= HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ALL;
+	rc = tf_msg_set_resc_usage(tfp,
+				   dir,
+				   flow_resc_type,
+				   sizeof(cfa_tf_resc_usage_t),
+				   (uint8_t *)usage_state);
+
+	return rc;
+}
+
+/**
+ * Query resource usage from firmware.
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in/out] parms
+ *   Pointer to parms structure
+ *
+ * Returns
+ *   - (0) if successful.
+ *   - (-EINVAL) on failure.
+ */
+static int tf_dev_p58_query_resc_usage(struct tf *tfp,
+				       struct tf_query_resc_usage_parms *parms)
+{
+	int rc = 0;
+
+	parms->size = sizeof(struct cfa_tf_resc_usage);
+	rc = tf_msg_query_resc_usage(tfp,
+				     parms->dir,
+				     parms->flow_resc_type,
+				     &parms->size,
+				     (void *)parms->data);
+	return rc;
+}
+
+/**
+ * Update buffer of table usage state
+ *
+ * [in] session_id
+ *   Pointer to TF handle
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ *
+ * [in] tbl_type
+ *   SRAM table type to update its usage state
+ *
+ * [in] resc_opt
+ *   Alloca or free resource
+ *
+ *    returns:
+ *    0       - Success
+ *    -EINVAL - Error
+ */
+static int
+tf_dev_p58_update_tbl_usage_buffer(uint32_t session_id,
+				   enum tf_dir dir,
+				   enum tf_tbl_type tbl_type,
+				   enum tf_resc_opt resc_opt)
+{
+	int rc;
+	rc = tf_tbl_usage_update(session_id, dir, tbl_type, resc_opt);
+	return rc;
+}
+#endif /* TF_FLOW_SCALE_QUERY */
+
 /**
  * Truflow P58 device specific functions
  */
@@ -835,6 +930,11 @@  const struct tf_dev_ops tf_dev_ops_p58_init = {
 	.tf_dev_get_sram_resources = tf_dev_p58_get_sram_resources,
 	.tf_dev_set_sram_policy = tf_dev_p58_set_sram_policy,
 	.tf_dev_get_sram_policy = tf_dev_p58_get_sram_policy,
+#ifdef TF_FLOW_SCALE_QUERY
+	.tf_dev_update_resc_usage = tf_dev_p58_update_resc_usage,
+	.tf_dev_query_resc_usage = tf_dev_p58_query_resc_usage,
+	.tf_dev_update_tbl_usage_buffer = tf_dev_p58_update_tbl_usage_buffer,
+#endif /* TF_FLOW_SCALE_QUERY */
 };
 
 /**
@@ -894,4 +994,9 @@  const struct tf_dev_ops tf_dev_ops_p58 = {
 	.tf_dev_get_sram_resources = tf_dev_p58_get_sram_resources,
 	.tf_dev_set_sram_policy = tf_dev_p58_set_sram_policy,
 	.tf_dev_get_sram_policy = tf_dev_p58_get_sram_policy,
+#ifdef TF_FLOW_SCALE_QUERY
+	.tf_dev_update_resc_usage = tf_dev_p58_update_resc_usage,
+	.tf_dev_query_resc_usage = tf_dev_p58_query_resc_usage,
+	.tf_dev_update_tbl_usage_buffer = tf_dev_p58_update_tbl_usage_buffer,
+#endif /* TF_FLOW_SCALE_QUERY */
 };
diff --git a/drivers/net/bnxt/tf_core/tf_em_hash_internal.c b/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
index 8fa78be226..eacff28c5f 100644
--- a/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
@@ -108,6 +108,15 @@  tf_em_hash_insert_int_entry(struct tf *tfp,
 				     rptr_entry,
 				     0);
 	dpool_set_entry_data(pool, index, parms->flow_handle);
+
+#ifdef TF_FLOW_SCALE_QUERY
+	/* Update usage state buffer for EM */
+	tf_em_usage_update(tfs->session_id.id,
+			   parms->dir,
+			   num_of_entries,
+			   TF_RESC_ALLOC);
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	return 0;
 }
 
@@ -124,6 +133,10 @@  tf_em_hash_delete_int_entry(struct tf *tfp,
 	int rc = 0;
 	struct tf_session *tfs;
 	struct dpool *pool;
+#ifdef TF_FLOW_SCALE_QUERY
+	uint32_t size;
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	/* Retrieve the session information */
 	rc = tf_session_get_session(tfp, &tfs);
 	if (rc) {
@@ -137,11 +150,18 @@  tf_em_hash_delete_int_entry(struct tf *tfp,
 	rc = tf_msg_delete_em_entry(tfp, parms);
 
 	/* Return resource to pool */
-	if (rc == 0) {
-		pool = (struct dpool *)tfs->em_pool[parms->dir];
-		dpool_free(pool, parms->index);
-	}
+	pool = (struct dpool *)tfs->em_pool[parms->dir];
+
+#ifdef TF_FLOW_SCALE_QUERY
+	/* Update usage state buffer for EM */
+	size = DP_FLAGS_SIZE(pool->entry[parms->index - pool->start_index].flags);
+	tf_em_usage_update(tfs->session_id.id,
+			   parms->dir,
+			   size,
+			   TF_RESC_FREE);
+#endif /* TF_FLOW_SCALE_QUERY */
 
+	dpool_free(pool, parms->index);
 	return rc;
 }
 
diff --git a/drivers/net/bnxt/tf_core/tf_em_internal.c b/drivers/net/bnxt/tf_core/tf_em_internal.c
index 7f7a663789..0952f30146 100644
--- a/drivers/net/bnxt/tf_core/tf_em_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_internal.c
@@ -293,6 +293,13 @@  tf_em_int_bind(struct tf *tfp,
 			/* Logging handled in tf_create_em_pool */
 			if (rc)
 				return rc;
+
+#ifdef TF_FLOW_SCALE_QUERY
+			/* Initialize the usage state buffer for EM */
+			tf_em_usage_init(tfs->session_id.id,
+					 i,
+					 iparms.info->entry.stride);
+#endif /* TF_FLOW_SCALE_QUERY */
 		}
 
 		if (rc) {
diff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c
index 1ef828a1e9..f2d2de859c 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.c
+++ b/drivers/net/bnxt/tf_core/tf_msg.c
@@ -1995,3 +1995,178 @@  tf_msg_session_get_hotup_state(struct tf *tfp,
 
 	return rc;
 }
+
+#ifdef TF_FLOW_SCALE_QUERY
+/* Send set resource usage request to the firmware. */
+int
+tf_msg_set_resc_usage(struct tf *tfp,
+		      enum tf_dir dir,
+		      uint32_t resc_types,
+		      uint32_t size,
+		      uint8_t *data)
+{
+	int rc;
+	struct hwrm_tf_resc_usage_set_input req = { 0 };
+	struct hwrm_tf_resc_usage_set_output resp = { 0 };
+	struct tfp_send_msg_parms parms = { 0 };
+	struct tf_msg_dma_buf buf = { 0 };
+	uint8_t fw_session_id;
+	struct tf_dev_info *dev;
+	struct tf_session *tfs;
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session_internal(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup device, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Unable to lookup FW id, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Populate the request */
+	req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+	req.flags = tfp_cpu_to_le_16(dir);
+	req.types = tfp_cpu_to_le_32(resc_types);
+	req.size = tfp_cpu_to_le_16(size);
+#if (TF_RM_MSG_DEBUG == 1)
+	/* Dump data */
+	dump_tf_resc_usage(dir, data, size);
+#endif /* (TF_RM_MSG_DEBUG == 1) */
+	/* Check for data size conformity */
+	if (size > sizeof(req.data)) {
+		/* use dma buffer */
+		req.flags |= HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DMA;
+		rc = tf_msg_alloc_dma_buf(&buf, size);
+		if (rc)
+			goto exit;
+		tfp_memcpy(buf.va_addr, data, size);
+		tfp_memcpy(&req.data[0],
+			   &buf.pa_addr,
+			   sizeof(buf.pa_addr));
+	} else {
+		tfp_memcpy(&req.data, data, size);
+	}
+
+	parms.tf_type = HWRM_TF_RESC_USAGE_SET;
+	parms.req_data = (uint32_t *)&req;
+	parms.req_size = sizeof(req);
+	parms.resp_data = (uint32_t *)&resp;
+	parms.resp_size = sizeof(resp);
+	parms.mailbox = dev->ops->tf_dev_get_mailbox();
+
+	rc = tfp_send_msg_direct(tf_session_get_bp(tfp),
+				 &parms);
+
+	/* Free dma buffer */
+	if (size > sizeof(req.data))
+		tf_msg_free_dma_buf(&buf);
+exit:
+	return rc;
+}
+
+/* Send query resource usage request to the firmware. */
+int tf_msg_query_resc_usage(struct tf *tfp,
+			    enum tf_dir dir,
+			    uint32_t resc_types,
+			    uint32_t *size,
+			    uint8_t *data)
+{
+	int rc;
+	struct hwrm_tf_resc_usage_query_input req = { 0 };
+	struct hwrm_tf_resc_usage_query_output resp = { 0 };
+	struct tfp_send_msg_parms parms = { 0 };
+	uint8_t fw_session_id;
+	struct tf_dev_info *dev;
+	struct tf_session *tfs;
+	uint32_t flags = 0;
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session_internal(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup device, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Unable to lookup FW id, rc:%s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+	flags = (dir == TF_DIR_TX ?
+		 HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX :
+		 HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX);
+
+	/* Populate the request */
+	req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+	req.flags = tfp_cpu_to_le_16(flags);
+	req.types = tfp_cpu_to_le_32(resc_types);
+
+	parms.tf_type = HWRM_TF_RESC_USAGE_QUERY;
+	parms.req_data = (uint32_t *)&req;
+	parms.req_size = sizeof(req);
+	parms.resp_data = (uint32_t *)&resp;
+	parms.resp_size = sizeof(resp);
+	parms.mailbox = dev->ops->tf_dev_get_mailbox();
+
+	rc = tfp_send_msg_direct(tf_session_get_bp(tfp),
+				 &parms);
+	if (rc)
+		return rc;
+
+	/* The response size should be less than or equal to (<=) the input buffer size. */
+	if (resp.size > *size)
+		return -EINVAL;
+
+	*size = resp.size;
+
+	/*
+	 * Copy the requested number of bytes
+	 */
+	tfp_memcpy(data,
+		   &resp.data,
+		   resp.size);
+
+#if (TF_RM_MSG_DEBUG == 1)
+	/* dump data */
+	dump_tf_resc_usage(dir, data, resp.size);
+#endif /* (TF_RM_MSG_DEBUG == 1) */
+
+	return 0;
+}
+#endif /* TF_FLOW_SCALE_QUERY */
diff --git a/drivers/net/bnxt/tf_core/tf_msg.h b/drivers/net/bnxt/tf_core/tf_msg.h
index 24d0ae5f43..f3364c1518 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.h
+++ b/drivers/net/bnxt/tf_core/tf_msg.h
@@ -797,4 +797,61 @@  int
 tf_msg_session_get_hotup_state(struct tf *tfp,
 			       uint16_t *state,
 			       uint16_t *ref_cnt);
+
+#ifdef TF_FLOW_SCALE_QUERY
+/**
+ * Send set resource usage request to the firmware.
+ *
+ * [in] tfp
+ *   Pointer to session handle
+ *
+ * [in] dir
+ *   Receive or Transmit direction
+ *
+ * [in] resc_types
+ *   Type of resource to update its usage state
+ *
+ * [in] size
+ *   The size of data buffer
+ *
+ * [in] data
+ *   Pointer of the resource usage state
+ *
+ * Returns:
+ *  0 on Success else internal Truflow error
+ */
+int tf_msg_set_resc_usage(struct tf *tfp,
+			  enum tf_dir dir,
+			  uint32_t resc_types,
+			  uint32_t size,
+			  uint8_t *data);
+
+/**
+ * Send query resource usage request to the firmware.
+ *
+ * [in] tfp
+ *   Pointer to session handle
+ *
+ * [in] dir
+ *   Receive or Transmit direction
+ *
+ * [in] resc_types
+ *   Type of resource to update its usage state
+ *
+ * [in/out] size
+ *   Pointer to the size of data buffer
+ *
+ * [out] data
+ *   Pointer of the resource usage state
+ *
+ * Returns:
+ *  0 on Success else internal Truflow error
+ */
+int tf_msg_query_resc_usage(struct tf *tfp,
+			    enum tf_dir dir,
+			    uint32_t resc_types,
+			    uint32_t *size,
+			    uint8_t *data);
+#endif /* TF_FLOW_SCALE_QUERY */
+
 #endif  /* _TF_MSG_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_resources.c b/drivers/net/bnxt/tf_core/tf_resources.c
new file mode 100644
index 0000000000..b814dfe902
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_resources.c
@@ -0,0 +1,542 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2023 Broadcom
+ * All rights reserved.
+ */
+
+/* Truflow Table APIs and supporting code */
+
+#include <rte_common.h>
+
+#include "tf_tbl.h"
+#include "tf_common.h"
+#include "tf_rm.h"
+#include "tf_util.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "tf_session.h"
+#include "tf_device.h"
+#include "cfa_tcam_mgr_device.h"
+#include "cfa_tcam_mgr_session.h"
+
+#ifdef TF_FLOW_SCALE_QUERY
+
+/* Logging defines */
+#define TF_FLOW_SCALE_QUERY_DEBUG 0
+
+/* global data stored in firmware memory and TruFlow driver*/
+struct cfa_tf_resc_usage tf_resc_usage[TF_DIR_MAX];
+
+struct tf_resc_usage_buffer_control {
+	enum tf_device_type device_type;
+	bool fw_sync_paused;
+	uint32_t buffer_dirty[TF_DIR_MAX];
+};
+
+static struct tf_resc_usage_buffer_control resc_usage_control;
+
+/* Check if supporting resource usage */
+static bool tf_resc_usage_support(int session_id)
+{
+	bool support = true;
+	int sess_idx;
+
+	/* Not valid session id */
+	if (!session_id)
+		return false;
+
+	/* Support Generic template with one session */
+	sess_idx = cfa_tcam_mgr_session_find(session_id);
+	if (sess_idx < 0 && !cfa_tcam_mgr_session_empty())
+		support = false;
+
+	/* Support Thor */
+	if (resc_usage_control.device_type != TF_DEVICE_TYPE_P5)
+		support = false;
+
+#if (TF_FLOW_SCALE_QUERY_DEBUG == 1)
+	TFP_DRV_LOG(INFO, "Resc usage update sess_id: %x, idx: %d, type: %d, allow: %s\n",
+			 session_id,
+			 sess_idx,
+			 resc_usage_control.device_type,
+			 support ? "True" : "False");
+#endif /* TF_FLOW_SCALE_QUERY_DEBUG == 1 */
+	return support;
+}
+
+/* Reset the resource usage buffer */
+void tf_resc_usage_reset(enum tf_device_type type, int session_id)
+{
+	/* Check if supported on this device */
+	if (cfa_tcam_mgr_session_find(session_id) > 0)
+		return;
+
+	/* Support Thor only*/
+	if (type != TF_DEVICE_TYPE_P5)
+		return;
+
+	resc_usage_control.fw_sync_paused = false;
+	resc_usage_control.device_type = type;
+	resc_usage_control.buffer_dirty[TF_DIR_RX] = 1;
+	resc_usage_control.buffer_dirty[TF_DIR_TX] = 1;
+	memset(tf_resc_usage, 0, sizeof(tf_resc_usage));
+}
+
+/* Check the bumber of the used slices in a row */
+static int
+tf_tcam_mgr_row_entry_used(struct cfa_tcam_mgr_table_rows_0 *row,
+			    int max_slices)
+{
+	int used = 0, j;
+
+	for (j = 0; j < (max_slices / row->entry_size); j++) {
+		if (ROW_ENTRY_INUSE(row, j))
+			used++;
+	}
+	return used;
+}
+
+/* Initialize the resource usage buffer for WC-TCAM tables */
+void tf_tcam_usage_init(int session_id)
+{
+	enum tf_dir dir;
+	enum cfa_tcam_mgr_tbl_type type = CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS;
+	struct cfa_tcam_mgr_table_data *table_data = NULL;
+	struct tf_resc_wc_tcam_usage *usage_data = NULL;
+	int sess_idx = cfa_tcam_mgr_session_find(session_id);
+
+	/* Check if supported on this device */
+	if (!tf_resc_usage_support(session_id))
+		return;
+
+	/* Iterate over all directions */
+	for (dir = 0; dir < TF_DIR_MAX; dir++) {
+		table_data = &cfa_tcam_mgr_tables[sess_idx][dir][type];
+		usage_data = &tf_resc_usage[dir].wc_tcam_usage;
+
+		/* cfa_tcam_mgr_table_dump(session_id, dir, type); */
+		memset(usage_data, 0, sizeof(*usage_data));
+		if (table_data->start_row != table_data->end_row)
+			usage_data->max_row_number = table_data->end_row -
+						     table_data->start_row + 1;
+		usage_data->unused_row_number = usage_data->max_row_number;
+
+#if (TF_FLOW_SCALE_QUERY_DEBUG == 1)
+		/* dump usage data */
+		CFA_TCAM_MGR_LOG(INFO, "WC-TCAM:  1-p  1-f  2-p  2-f  4-f  free-rows\n");
+		CFA_TCAM_MGR_LOG(INFO, "%s         %-4d %-4d %-4d %-4d %-4d %-4d\n",
+				 (dir == TF_DIR_RX) ? "RX" : "TX",
+				 usage_data->slice_row_1_p_used,
+				 usage_data->slice_row_1_f_used,
+				 usage_data->slice_row_2_p_used,
+				 usage_data->slice_row_2_f_used,
+				 usage_data->slice_row_4_used,
+				 usage_data->unused_row_number);
+#endif
+	}
+}
+
+/* Update wc-tcam table resoure usage */
+int tf_tcam_usage_update(int session_id,
+			 enum tf_dir dir,
+			 int tcam_tbl_type,
+			 void *data,
+			 enum tf_resc_opt resc_opt)
+{
+	struct tf_resc_wc_tcam_usage *usage_data;
+	int used_entries;
+	struct cfa_tcam_mgr_table_rows_0 *key_row = (struct cfa_tcam_mgr_table_rows_0 *)data;
+	int key_slices = key_row->entry_size;
+
+	/* Check if supported on this device */
+	if (!tf_resc_usage_support(session_id))
+		return -1;
+
+	/* Support WC-TCAM APPs only */
+	if (tcam_tbl_type != CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS)
+		return 0;
+
+	resc_usage_control.buffer_dirty[dir] = 1;
+	usage_data = &tf_resc_usage[dir].wc_tcam_usage;
+	if (resc_opt == TF_RESC_ALLOC) {
+		switch (key_slices) {
+		case 4:
+			usage_data->unused_row_number -= 1;
+			usage_data->slice_row_4_used += 1;
+			break;
+		case 2:
+			used_entries = tf_tcam_mgr_row_entry_used(key_row, 4);
+			if (used_entries == 2) {
+				usage_data->slice_row_2_p_used -= 1;
+				usage_data->slice_row_2_f_used += 1;
+			} else {
+				usage_data->unused_row_number -= 1;
+				usage_data->slice_row_2_p_used += 1;
+			}
+			break;
+		case 1:
+			used_entries = tf_tcam_mgr_row_entry_used(key_row, 4);
+			if (used_entries == 4) {
+				usage_data->slice_row_1_p_used -= 1;
+				usage_data->slice_row_1_f_used += 1;
+			} else if (used_entries == 1) {
+				usage_data->slice_row_1_p_used += 1;
+				usage_data->unused_row_number -= 1;
+			}
+			break;
+		default:
+			CFA_TCAM_MGR_LOG(ERR, "CFA invalid size of key slices: %d\n", key_slices);
+			break;
+		}
+	} else { /* free one entry */
+		switch (key_slices) {
+		case 4:
+			usage_data->unused_row_number += 1;
+			usage_data->slice_row_4_used -= 1;
+			break;
+		case 2:
+			if (!ROW_INUSE(key_row)) {  /* empty */
+				usage_data->unused_row_number += 1;
+				usage_data->slice_row_2_p_used -= 1;
+			} else {
+				usage_data->slice_row_2_p_used += 1;
+				usage_data->slice_row_2_f_used -= 1;
+			}
+			break;
+		case 1:
+			used_entries = tf_tcam_mgr_row_entry_used(key_row, 4);
+			if (!ROW_INUSE(key_row)) {  /* empty */
+				usage_data->unused_row_number += 1;
+				usage_data->slice_row_1_p_used -= 1;
+			} else if (used_entries == 3) {
+				usage_data->slice_row_1_f_used -= 1;
+				usage_data->slice_row_1_p_used += 1;
+			}
+			break;
+		default:
+			CFA_TCAM_MGR_LOG(ERR, "CFA invalid size of key slices: %d\n", key_slices);
+			break;
+		}
+	}
+
+#if (TF_FLOW_SCALE_QUERY_DEBUG == 1)
+	/* dump usage data*/
+	CFA_TCAM_MGR_LOG(INFO, "WC-TCAM:  1-p  1-f  2-p  2-f  4-f  free-rows\n");
+	CFA_TCAM_MGR_LOG(INFO, "          %-4d %-4d %-4d %-4d %-4d %-4d\n",
+			 usage_data->slice_row_1_p_used,
+			 usage_data->slice_row_1_f_used,
+			 usage_data->slice_row_2_p_used,
+			 usage_data->slice_row_2_f_used,
+			 usage_data->slice_row_4_used,
+			 usage_data->unused_row_number);
+#endif
+	return 0;
+}
+
+/* Initialize the EM usage table */
+void tf_em_usage_init(uint32_t session_id, enum tf_dir dir, uint16_t max_entries)
+{
+	struct tf_resc_em_usage *em;
+
+	/* Check if supported on this device */
+	if (!tf_resc_usage_support(session_id))
+		return;
+
+	em = &tf_resc_usage[dir].em_int_usage;
+	em->max_entries = max_entries;
+	em->used_entries = 0;
+}
+
+/* Update the EM usage table */
+int tf_em_usage_update(uint32_t session_id,
+		       enum tf_dir dir,
+		       uint16_t size,
+		       enum tf_resc_opt resc_opt)
+{
+	struct tf_resc_em_usage *em;
+
+#if (TF_FLOW_SCALE_QUERY_DEBUG == 1)
+	CFA_TCAM_MGR_LOG(INFO, "%s: %s: EM record size: %d, %s\n",
+			 __func__,
+			 dir ? "TX" : "RX",
+			 size,
+			 resc_opt == TF_RESC_ALLOC ? "Alloc" : "Free");
+#endif /* TF_FLOW_SCALE_QUERY_DEBUG == 1 */
+
+	/* Check if supported on this device */
+	if (!tf_resc_usage_support(session_id))
+		return -1;
+
+	/* not valid size */
+	if (!size)
+		return 0;
+
+	resc_usage_control.buffer_dirty[dir] = 1;
+	em = &tf_resc_usage[dir].em_int_usage;
+	if (resc_opt == TF_RESC_ALLOC) {
+		em->used_entries += size;
+		assert(em->used_entries <= em->max_entries);
+	} else {
+		assert(em->used_entries >= size);
+		em->used_entries -= size;
+	}
+	return 0;
+}
+
+/* Initialize the usage buffer for all kinds of sram tables */
+void tf_tbl_usage_init(uint32_t session_id,
+		       enum tf_dir dir,
+		       uint32_t tbl_type,
+		       uint16_t max_entries)
+{
+	struct tf_rm_element_cfg *tbl_cfg = tf_tbl_p58[dir];
+
+#if (TF_FLOW_SCALE_QUERY_DEBUG == 1)
+	CFA_TCAM_MGR_LOG(INFO, "%s: %s: tbl_type: %d[%s], max entries: [%d]:[0x%x]\n",
+			 __func__,
+			 dir ? "TX" : "RX",
+			 tbl_type,
+			 tf_tbl_type_2_str(tbl_type),
+			 max_entries,
+			 max_entries);
+#endif /* TF_FLOW_SCALE_QUERY_DEBUG == 1 */
+
+	/* Check if supported on this device */
+	if (!tf_resc_usage_support(session_id))
+		return;
+
+	/* Convert to entries */
+	if (tbl_cfg[tbl_type].slices)
+		max_entries *= (16 / tbl_cfg[tbl_type].slices);
+
+	switch (tbl_type) {
+	/* Counter Action */
+	case TF_TBL_TYPE_ACT_STATS_64:
+	{
+		struct tf_resc_cnt_usage *cnt;
+		cnt = &tf_resc_usage[dir].cnt_usage;
+		cnt->max_entries = max_entries;
+		cnt->used_entries = 0;
+		break;
+	}
+	/* Action Recrod */
+	case TF_TBL_TYPE_COMPACT_ACT_RECORD:
+	case TF_TBL_TYPE_FULL_ACT_RECORD:
+	{
+		struct tf_resc_act_usage *act;
+		act = &tf_resc_usage[dir].act_usage;
+		act->max_entries += max_entries;
+		act->free_entries += max_entries;
+		act->num_compact_act_records = 0;
+		act->num_full_act_records = 0;
+		break;
+	}
+	/* ACT_ENCAP adn ACT_MODIFY Records */
+	case TF_TBL_TYPE_ACT_ENCAP_8B:
+	case TF_TBL_TYPE_ACT_ENCAP_16B:
+	case TF_TBL_TYPE_ACT_ENCAP_32B:
+	case TF_TBL_TYPE_ACT_ENCAP_64B:
+	case TF_TBL_TYPE_ACT_ENCAP_128B:
+	case TF_TBL_TYPE_ACT_MODIFY_8B:
+	case TF_TBL_TYPE_ACT_MODIFY_16B:
+	case TF_TBL_TYPE_ACT_MODIFY_32B:
+	case TF_TBL_TYPE_ACT_MODIFY_64B:
+	{
+		struct tf_resc_act_mod_enc_usage *mod_encap;
+		mod_encap = &tf_resc_usage[dir].mod_encap_usage;
+		mod_encap->max_entries += max_entries;
+		mod_encap->free_entries += max_entries;
+		break;
+	}
+	/* SP_SMAC Record */
+	case TF_TBL_TYPE_ACT_SP_SMAC:
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+	{
+		struct tf_resc_act_sp_smac_usage *sp_smac;
+		sp_smac = &tf_resc_usage[dir].sp_smac_usage;
+		sp_smac->max_entries += max_entries;
+		sp_smac->free_entries += max_entries;
+		break;
+	}
+	/** Meter Profiles */
+	case TF_TBL_TYPE_METER_PROF:
+		tf_resc_usage[dir].meter_usage.max_meter_profile = max_entries;
+		break;
+	/** Meter Instance */
+	case TF_TBL_TYPE_METER_INST:
+		tf_resc_usage[dir].meter_usage.max_meter_instance = max_entries;
+		break;
+	default:
+		break;
+	}
+}
+
+/* Update the usage buffer for sram tables: add or free one entry */
+int tf_tbl_usage_update(uint32_t session_id,
+			 enum tf_dir dir,
+			 uint32_t tbl_type,
+			 enum tf_resc_opt resc_opt)
+{
+	struct tf_rm_element_cfg *tbl_cfg = tf_tbl_p58[dir];
+	struct tf_resc_cnt_usage *cnt;
+	int inc = (resc_opt == TF_RESC_ALLOC) ? 1 : -1;
+	int slices = tbl_cfg[tbl_type].slices;
+	int entries = 0;
+
+	/* Check if supported on this device */
+	if (!tf_resc_usage_support(session_id))
+		return -1;
+
+	/* Convert to entries */
+	if (slices)
+		entries = inc * (16 / slices);
+
+#if (TF_FLOW_SCALE_QUERY_DEBUG == 1)
+	TFP_DRV_LOG(INFO, "%s: %s: tbl_type: %d[%s] %s, Entries: %d\n", __func__,
+			 dir ? "TX" : "RX",
+			 tbl_type,
+			 tf_tbl_type_2_str(tbl_type),
+			 resc_opt ? "Alloc" : "Free",
+			 entries);
+#endif /* TF_FLOW_SCALE_QUERY_DEBUG == 1 */
+
+	resc_usage_control.buffer_dirty[dir] = 1;
+	switch (tbl_type) {
+	/* Counter Action */
+	case TF_TBL_TYPE_ACT_STATS_64:
+		cnt = &tf_resc_usage[dir].cnt_usage;
+		cnt->used_entries += inc;
+		break;
+	/* ACTION Record */
+	case TF_TBL_TYPE_FULL_ACT_RECORD:
+	case TF_TBL_TYPE_COMPACT_ACT_RECORD:
+	{
+		struct tf_resc_act_usage *act;
+		act = &tf_resc_usage[dir].act_usage;
+		if (tbl_type == TF_TBL_TYPE_COMPACT_ACT_RECORD)
+			act->num_compact_act_records += inc;
+		else
+			act->num_full_act_records += inc;
+		act->free_entries -= entries;
+		break;
+	}
+	/* ACT_ENCAP and ACT_MODIFY Records */
+	case TF_TBL_TYPE_ACT_ENCAP_8B:
+	case TF_TBL_TYPE_ACT_ENCAP_16B:
+	case TF_TBL_TYPE_ACT_ENCAP_32B:
+	case TF_TBL_TYPE_ACT_ENCAP_64B:
+	case TF_TBL_TYPE_ACT_ENCAP_128B:
+	case TF_TBL_TYPE_ACT_MODIFY_8B:
+	case TF_TBL_TYPE_ACT_MODIFY_16B:
+	case TF_TBL_TYPE_ACT_MODIFY_32B:
+	case TF_TBL_TYPE_ACT_MODIFY_64B:
+	{
+		struct tf_resc_act_mod_enc_usage *mod_encap;
+		mod_encap = &tf_resc_usage[dir].mod_encap_usage;
+		switch (slices) {
+		case 1:
+			mod_encap->data.num_128b_records += inc;
+			break;
+		case 2:
+			mod_encap->data.num_64b_records += inc;
+			break;
+		case 4:
+			mod_encap->data.num_32b_records += inc;
+			break;
+		case 8:
+			mod_encap->data.num_16b_records += inc;
+			break;
+		case 16:
+			mod_encap->data.num_8b_records += inc;
+			break;
+		default:
+			break;
+		}
+		mod_encap->free_entries -= entries;
+		break;
+	}
+	/* SP SMAC table */
+	case TF_TBL_TYPE_ACT_SP_SMAC:
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+	case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+	{
+		struct tf_resc_act_sp_smac_usage *sp_smac;
+		sp_smac = &tf_resc_usage[dir].sp_smac_usage;
+		if (tbl_type == TF_TBL_TYPE_ACT_SP_SMAC)
+			sp_smac->num_sp_smac_records += inc;
+		else if (tbl_type == TF_TBL_TYPE_ACT_SP_SMAC_IPV4)
+			sp_smac->num_sp_smac_ipv4_records += inc;
+		else if (tbl_type == TF_TBL_TYPE_ACT_SP_SMAC_IPV6)
+			sp_smac->num_sp_smac_ipv6_records += inc;
+		sp_smac->free_entries -= entries;
+		break;
+	}
+	/* Meter Profiles */
+	case TF_TBL_TYPE_METER_PROF:
+		tf_resc_usage[dir].meter_usage.used_meter_profile += inc;
+		break;
+	/* Meter Instance */
+	case TF_TBL_TYPE_METER_INST:
+		tf_resc_usage[dir].meter_usage.used_meter_instance += inc;
+		break;
+	default:
+	/* not support types */
+		break;
+	}
+	return 0;
+}
+
+/* pause usage state update with firmware */
+void tf_resc_pause_usage_update(void)
+{
+	resc_usage_control.fw_sync_paused = true;
+}
+
+/* resume usage state update with firmware */
+void tf_resc_resume_usage_update(void)
+{
+	resc_usage_control.fw_sync_paused = false;
+}
+
+/* check if paused the resource usage update with firmware */
+static bool tf_resc_usage_update_paused(void)
+{
+	return resc_usage_control.fw_sync_paused;
+}
+
+/* resync all resource usage state with firmware for both direction */
+void tf_resc_usage_update_all(struct bnxt *bp)
+{
+	struct tf *tfp;
+	enum tf_dir dir;
+
+	/* When paused state update with firmware, do nothing */
+	if (tf_resc_usage_update_paused())
+		return;
+
+	tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT);
+	if (!tfp || !tfp->session) {
+		BNXT_DRV_DBG(ERR, "Failed to get truflow or session pointer\n");
+		return;
+	}
+
+	/* Check if supported on this device */
+	if (!tf_resc_usage_support(tfp->session->session_id.id))
+		return;
+
+	/* update usage state with firmware for each direction */
+	for (dir = 0; dir < TF_DIR_MAX; dir++) {
+		if (resc_usage_control.buffer_dirty[dir]) {
+			tf_update_resc_usage(tfp, dir, TF_FLOW_RESC_TYPE_ALL);
+			resc_usage_control.buffer_dirty[dir] = 0;
+		}
+	}
+}
+
+void dump_tf_resc_usage(__rte_unused enum tf_dir dir,
+			__rte_unused void *data,
+			__rte_unused uint32_t size)
+{
+	/* empty routine */
+}
+#endif /* TF_FLOW_SCALE_QUERY */
diff --git a/drivers/net/bnxt/tf_core/tf_resources.h b/drivers/net/bnxt/tf_core/tf_resources.h
index 8c28d3dc68..715c9e0d94 100644
--- a/drivers/net/bnxt/tf_core/tf_resources.h
+++ b/drivers/net/bnxt/tf_core/tf_resources.h
@@ -5,6 +5,135 @@ 
 
 #ifndef _TF_RESOURCES_H_
 #define _TF_RESOURCES_H_
+#include <rte_common.h>
+#include <tf_rm.h>
+#include "bnxt.h"
 
 #define TF_NUM_TBL_SCOPE           16      /* < Number of TBL scopes */
+
+#ifdef TF_FLOW_SCALE_QUERY
+/* Feature of flow scale query */
+enum tf_resc_opt {
+	TF_RESC_FREE,
+	TF_RESC_ALLOC
+};
+
+/**
+ *  WC TCAM includes a set of rows, and each row have 4-slices;
+ *  each slice has 160bit
+ */
+typedef struct tf_resc_wc_tcam_usage {
+	uint16_t max_row_number;      /* Max number of rows (excluding AFM), 160bit row */
+	uint16_t slice_row_1_p_used;  /* 1-slice rows partially used */
+	uint16_t slice_row_1_f_used;  /* 1-slice rows fully used */
+	uint16_t slice_row_2_p_used;  /* 2-slice rows partially used */
+	uint16_t slice_row_2_f_used;  /* 2-slice rows fully used */
+	uint16_t slice_row_4_used;    /* 4-slice rows fully used */
+	uint16_t unused_row_number;   /* number of unused rows */
+	uint8_t  reserved[2];
+} __rte_packed tf_resc_wc_tcam_usage_t;
+
+/* Resource Internal EM memory pool; vary size records */
+typedef struct tf_resc_em_usage {
+	uint16_t max_entries;   /* Max 16-Bytes entries */
+	uint16_t used_entries;  /* each record takes up to 7 entries by design */
+} __rte_packed tf_resc_em_usage_t;
+
+/* Resource Meter */
+typedef struct tf_resc_meter_usage {
+	uint16_t max_meter_instance;    /* 1023 for Thor, app can reserve some entries */
+	uint16_t max_meter_profile;     /* 256 for Thor, app can reserve some profiles  */
+	uint16_t used_meter_instance;   /* meter instance: fixed size record */
+	uint16_t used_meter_profile;    /* meter profile: fixed size record */
+} __rte_packed tf_resc_meter_usage_t;
+
+/* Resource Counter */
+typedef struct tf_resc_cnt_usage {
+	uint16_t max_entries;           /* each counter take 64-Bytes */
+	uint16_t used_entries;          /* each record uses one entry */
+} __rte_packed tf_resc_cnt_usage_t;
+
+/* Resource Action */
+typedef struct tf_resc_act_usage {
+	uint16_t max_entries;              /* Max 8-Bytes entries */
+	uint16_t num_compact_act_records;  /* 8-Bytes records */
+	uint16_t num_full_act_records;     /* 16-Bytes records */
+	uint16_t free_entries;             /* unused entries */
+} __rte_packed tf_resc_act_usage_t;
+
+/* Resource SP SMAC  */
+typedef struct tf_resc_act_sp_smac_usage {
+	uint16_t max_entries;              /* Max 8-Bytes entries */
+	uint16_t num_sp_smac_records;      /* 8-Bytes records */
+	uint16_t num_sp_smac_ipv4_records; /* 8-Bytes records */
+	uint16_t num_sp_smac_ipv6_records; /* 16-Bytes records */
+	uint16_t free_entries;             /* unused entries */
+} __rte_packed tf_resc_act_sp_smac_usage_t;
+
+/* Resource ACT MODIFY and ACT ENCAP */
+typedef struct tf_resc_act_mod_enc_usage {
+	uint16_t max_entries;	            /* Max 8-Bytes entries */
+	struct {
+		uint16_t num_8b_records;    /* 8-bytes records */
+		uint16_t num_16b_records;   /* 16-bytes records  */
+		uint16_t num_32b_records;   /* 32-bytes records  */
+		uint16_t num_64b_records;   /* 64-bytes records  */
+		uint16_t num_128b_records;  /* 128-bytes records  */
+	} data;
+	int16_t free_entries; /* unused entries */
+} __rte_packed tf_resc_act_mod_enc_usage_t;
+
+/* All types of resource usage on both direction */
+typedef struct cfa_tf_resc_usage {
+	tf_resc_em_usage_t           em_int_usage;
+	tf_resc_wc_tcam_usage_t      wc_tcam_usage;
+	tf_resc_cnt_usage_t          cnt_usage;
+	tf_resc_act_usage_t          act_usage;
+	tf_resc_meter_usage_t        meter_usage;
+	tf_resc_act_mod_enc_usage_t  mod_encap_usage;
+	tf_resc_act_sp_smac_usage_t  sp_smac_usage;
+} __rte_packed cfa_tf_resc_usage_t;
+
+/* global data stored in firmware memory and TruFlow driver */
+extern cfa_tf_resc_usage_t tf_resc_usage[TF_DIR_MAX];
+
+void tf_resc_usage_reset(enum tf_device_type type, int session_id);
+
+void tf_tcam_usage_init(int session_id);
+
+int tf_tcam_usage_update(int session_id,
+			 enum tf_dir dir,
+			 int tcam_tbl_type,
+			 void *key_row,
+			 enum tf_resc_opt resc_opt);
+
+void tf_em_usage_init(uint32_t session_id, enum tf_dir dir, uint16_t max_entries);
+
+int tf_em_usage_update(uint32_t session_id,
+		       enum tf_dir dir,
+		       uint16_t size,
+		       enum tf_resc_opt resc_opt);
+
+void tf_tbl_usage_init(uint32_t session_id,
+		       enum tf_dir dir,
+		       uint32_t tbl_type,
+		       uint16_t max_entries);
+
+int tf_tbl_usage_update(uint32_t session_id,
+			enum tf_dir dir,
+			uint32_t tbl_type,
+			enum tf_resc_opt resc_opt);
+
+void dump_tf_resc_usage(enum tf_dir dir, void *data, uint32_t size);
+
+extern struct tf_rm_element_cfg tf_tbl_p58[TF_DIR_MAX][TF_TBL_TYPE_MAX];
+
+void tf_resc_pause_usage_update(void);
+
+void tf_resc_resume_usage_update(void);
+
+void tf_resc_usage_update_all(struct bnxt *bp);
+
+#endif /* TF_FLOW_SCALE_QUERY */
+
 #endif /* _TF_RESOURCES_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c
index 9b85f5397d..e38bfcf4f6 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.c
+++ b/drivers/net/bnxt/tf_core/tf_rm.c
@@ -364,7 +364,8 @@  tf_rm_update_parent_reservations(struct tf *tfp,
 				 struct tf_rm_element_cfg *cfg,
 				 uint16_t *alloc_cnt,
 				 uint16_t num_elements,
-				 uint16_t *req_cnt)
+				 uint16_t *req_cnt,
+				 __rte_unused enum tf_dir dir)
 {
 	int parent, child;
 	const char *type_str = NULL;
@@ -388,6 +389,13 @@  tf_rm_update_parent_reservations(struct tf *tfp,
 				dev->ops->tf_dev_get_resource_str(tfp,
 							 cfg[parent].hcapi_type,
 							 &type_str);
+#ifdef TF_FLOW_SCALE_QUERY
+				/* Initialize the usage buffer for SRAM tables */
+				tf_tbl_usage_init(tfp,
+						  dir,
+						  parent,
+						  alloc_cnt[parent]);
+#endif /* TF_FLOW_SCALE_QUERY */
 			}
 
 			/* Search again through all the elements */
@@ -418,6 +426,13 @@  tf_rm_update_parent_reservations(struct tf *tfp,
 					combined_cnt += cnt;
 					/* Clear the requested child count */
 					req_cnt[child] = 0;
+#ifdef TF_FLOW_SCALE_QUERY
+					/* Initialize the usage buffer for SRAM tables */
+					tf_tbl_usage_init(tfp->session->session_id.id,
+							  dir,
+							  child,
+							  alloc_cnt[child]);
+#endif /* TF_FLOW_SCALE_QUERY */
 				}
 			}
 			/* Save the parent count to be requested */
@@ -501,7 +516,8 @@  tf_rm_create_db(struct tf *tfp,
 	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
 					 parms->alloc_cnt,
 					 parms->num_elements,
-					 req_cnt);
+					 req_cnt,
+					 parms->dir);
 
 	/* Process capabilities against DB requirements. However, as a
 	 * DB can hold elements that are not HCAPI we can reduce the
@@ -672,6 +688,22 @@  tf_rm_create_db(struct tf *tfp,
 				}
 			}
 			j++;
+
+#ifdef TF_FLOW_SCALE_QUERY
+			/* Initialize the usage buffer for Meter tables */
+			if (cfg->hcapi_type == CFA_RESOURCE_TYPE_P58_METER ||
+			    cfg->hcapi_type == CFA_RESOURCE_TYPE_P58_METER_PROF) {
+				uint32_t tbl_type;
+				if (cfg->hcapi_type == CFA_RESOURCE_TYPE_P58_METER)
+					tbl_type = TF_TBL_TYPE_METER_INST;
+				else
+					tbl_type = TF_TBL_TYPE_METER_PROF;
+				tf_tbl_usage_init(tfp->session->session_id.id,
+						  parms->dir,
+						  tbl_type,
+						  req_cnt[i]);
+			}
+#endif /* TF_FLOW_SCALE_QUERY */
 		} else {
 			/* Bail out as we want what we requested for
 			 * all elements, not any less.
@@ -755,7 +787,8 @@  tf_rm_create_db_no_reservation(struct tf *tfp,
 	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
 					 parms->alloc_cnt,
 					 parms->num_elements,
-					 req_cnt);
+					 req_cnt,
+					 parms->dir);
 
 	/* Process capabilities against DB requirements. However, as a
 	 * DB can hold elements that are not HCAPI we can reduce the
diff --git a/drivers/net/bnxt/tf_core/tf_session.c b/drivers/net/bnxt/tf_core/tf_session.c
index 253d716572..7545974c93 100644
--- a/drivers/net/bnxt/tf_core/tf_session.c
+++ b/drivers/net/bnxt/tf_core/tf_session.c
@@ -197,6 +197,11 @@  tf_session_create(struct tf *tfp,
 		parms->open_cfg->shared_session_creator = true;
 	}
 
+#ifdef TF_FLOW_SCALE_QUERY
+	/* Reset the resource usage buffer before binding a device */
+	tf_resc_usage_reset(parms->open_cfg->device_type, tfp->session->session_id.id);
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	rc = tf_dev_bind(tfp,
 			 parms->open_cfg->device_type,
 			 &parms->open_cfg->resources,
@@ -216,6 +221,11 @@  tf_session_create(struct tf *tfp,
 
 	session->dev_init = true;
 
+#ifdef TF_FLOW_SCALE_QUERY
+	/* Sync the initial resource usage with firmware */
+	tf_resc_usage_update_all(parms->open_cfg->bp);
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	return 0;
 
  cleanup:
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 6d345e12c7..1e95905e21 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -4410,6 +4410,22 @@  ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx,
 			goto flow_error;
 	}
 
+#ifdef TF_FLOW_SCALE_QUERY
+	tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT);
+	if (!tfp) {
+		BNXT_DRV_DBG(ERR, "Failed to get truflow pointer\n");
+		return -EINVAL;
+	}
+
+	if (parms->act_bitmap->bits & BNXT_ULP_FLOW_DIR_BITMASK_EGR)
+		dir = TF_DIR_TX;
+	else
+		dir = TF_DIR_RX;
+
+	/* sync resource usage state with firmware */
+	tf_update_resc_usage(tfp, dir, TF_FLOW_RESC_TYPE_ALL);
+#endif /* TF_FLOW_SCALE_QUERY */
+
 	return rc;
 
 flow_error: