diff mbox series

[v4,12/13] net/bnxt: add SRAM manager shared session

Message ID 20210920074214.23747-13-venkatkumar.duvvuru@broadcom.com (mailing list archive)
State Accepted
Delegated to: Ajit Khaparde
Headers show
Series enhancements to host based flow table management | expand

Checks

Context Check Description
ci/Intel-compilation warning apply issues
ci/checkpatch success coding style OK

Commit Message

Venkat Duvvuru Sept. 20, 2021, 7:42 a.m. UTC
From: Farah Smith <farah.smith@broadcom.com>

Fix shared session support issues due to SRAM manager
additions. Shared session does not support slices within
RM blocks. Calculate resources required without slices
and determine base addresses using old methods for the
shared session.

Signed-off-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Shahaji Bhosle <sbhosle@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/tf_core/tf_em_internal.c |   5 +-
 drivers/net/bnxt/tf_core/tf_rm.c          | 134 +++++++++++++++++++---
 drivers/net/bnxt/tf_core/tf_tbl_sram.c    |  73 +++++++++---
 3 files changed, 176 insertions(+), 36 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/bnxt/tf_core/tf_em_internal.c b/drivers/net/bnxt/tf_core/tf_em_internal.c
index 2d57595f17..67ba011eae 100644
--- a/drivers/net/bnxt/tf_core/tf_em_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_internal.c
@@ -326,8 +326,11 @@  tf_em_int_unbind(struct tf *tfp)
 		return rc;
 
 	if (!tf_session_is_shared_session(tfs)) {
-		for (i = 0; i < TF_DIR_MAX; i++)
+		for (i = 0; i < TF_DIR_MAX; i++) {
+			if (tfs->em_pool[i] == NULL)
+				continue;
 			dpool_free_all(tfs->em_pool[i]);
+		}
 	}
 
 	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_EM, &em_db_ptr);
diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c
index 03c958a7d6..dd537aaece 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.c
+++ b/drivers/net/bnxt/tf_core/tf_rm.c
@@ -18,6 +18,9 @@ 
 #include "tfp.h"
 #include "tf_msg.h"
 
+/* Logging defines */
+#define TF_RM_DEBUG  0
+
 /**
  * Generic RM Element data type that an RM DB is build upon.
  */
@@ -207,6 +210,45 @@  tf_rm_adjust_index(struct tf_rm_element *db,
 	return rc;
 }
 
+/**
+ * Logs an array of found residual entries to the console.
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ *
+ * [in] module
+ *   Type of Device Module
+ *
+ * [in] count
+ *   Number of entries in the residual array
+ *
+ * [in] residuals
+ *   Pointer to an array of residual entries. Array is index same as
+ *   the DB in which this function is used. Each entry holds residual
+ *   value for that entry.
+ */
+#if (TF_RM_DEBUG == 1)
+static void
+tf_rm_log_residuals(enum tf_dir dir,
+		    enum tf_module_type module,
+		    uint16_t count,
+		    uint16_t *residuals)
+{
+	int i;
+
+	/* Walk the residual array and log the types that wasn't
+	 * cleaned up to the console.
+	 */
+	for (i = 0; i < count; i++) {
+		if (residuals[i] != 0)
+			TFP_DRV_LOG(INFO,
+				"%s, %s was not cleaned up, %d outstanding\n",
+				tf_dir_2_str(dir),
+				tf_module_subtype_2_str(module, i),
+				residuals[i]);
+	}
+}
+#endif /* TF_RM_DEBUG == 1 */
 /**
  * Performs a check of the passed in DB for any lingering elements. If
  * a resource type was found to not have been cleaned up by the caller
@@ -322,6 +364,12 @@  tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
 		*resv_size = found;
 	}
 
+#if (TF_RM_DEBUG == 1)
+	tf_rm_log_residuals(rm_db->dir,
+			    rm_db->module,
+			    rm_db->num_entries,
+			    residuals);
+#endif
 	tfp_free((void *)residuals);
 	*resv = local_resv;
 
@@ -367,7 +415,8 @@  tf_rm_update_parent_reservations(struct tf *tfp,
 				 struct tf_rm_element_cfg *cfg,
 				 uint16_t *alloc_cnt,
 				 uint16_t num_elements,
-				 uint16_t *req_cnt)
+				 uint16_t *req_cnt,
+				 bool shared_session)
 {
 	int parent, child;
 	const char *type_str;
@@ -378,18 +427,28 @@  tf_rm_update_parent_reservations(struct tf *tfp,
 
 		/* If I am a parent */
 		if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
-			/* start with my own count */
-			RTE_ASSERT(cfg[parent].slices);
-			combined_cnt =
-				alloc_cnt[parent] / cfg[parent].slices;
+			uint8_t p_slices = 1;
+
+			/* Shared session doesn't support slices */
+			if (!shared_session)
+				p_slices = cfg[parent].slices;
+
+			RTE_ASSERT(p_slices);
 
-			if (alloc_cnt[parent] % cfg[parent].slices)
+			combined_cnt = alloc_cnt[parent] / p_slices;
+
+			if (alloc_cnt[parent] % p_slices)
 				combined_cnt++;
 
 			if (alloc_cnt[parent]) {
 				dev->ops->tf_dev_get_resource_str(tfp,
 							 cfg[parent].hcapi_type,
 							 &type_str);
+#if (TF_RM_DEBUG == 1)
+				printf("%s:%s cnt(%d) slices(%d)\n",
+				       type_str, tf_tbl_type_2_str(parent),
+				       alloc_cnt[parent], p_slices);
+#endif /* (TF_RM_DEBUG == 1) */
 			}
 
 			/* Search again through all the elements */
@@ -399,20 +458,31 @@  tf_rm_update_parent_reservations(struct tf *tfp,
 				    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
 				    cfg[child].parent_subtype == parent &&
 				    alloc_cnt[child]) {
+					uint8_t c_slices = 1;
 					uint16_t cnt = 0;
-					RTE_ASSERT(cfg[child].slices);
+
+					if (!shared_session)
+						c_slices = cfg[child].slices;
+
+					RTE_ASSERT(c_slices);
 
 					dev->ops->tf_dev_get_resource_str(tfp,
 							  cfg[child].hcapi_type,
 							   &type_str);
+#if (TF_RM_DEBUG == 1)
+					printf("%s:%s cnt(%d) slices(%d)\n",
+					       type_str,
+					       tf_tbl_type_2_str(child),
+					       alloc_cnt[child],
+					       c_slices);
+#endif /* (TF_RM_DEBUG == 1) */
 					/* Increment the parents combined count
 					 * with each child's count adjusted for
-					 * number of slices per RM allocated item.
+					 * number of slices per RM alloc item.
 					 */
-					cnt =
-					 alloc_cnt[child] / cfg[child].slices;
+					cnt = alloc_cnt[child] / c_slices;
 
-					if (alloc_cnt[child] % cfg[child].slices)
+					if (alloc_cnt[child] % c_slices)
 						cnt++;
 
 					combined_cnt += cnt;
@@ -422,6 +492,10 @@  tf_rm_update_parent_reservations(struct tf *tfp,
 			}
 			/* Save the parent count to be requested */
 			req_cnt[parent] = combined_cnt;
+#if (TF_RM_DEBUG == 1)
+			printf("%s calculated total:%d\n\n",
+			       type_str, req_cnt[parent]);
+#endif /* (TF_RM_DEBUG == 1) */
 		}
 	}
 	return 0;
@@ -444,6 +518,7 @@  tf_rm_create_db(struct tf *tfp,
 	struct tf_rm_new_db *rm_db;
 	struct tf_rm_element *db;
 	uint32_t pool_size;
+	bool shared_session = 0;
 
 	TF_CHECK_PARMS2(tfp, parms);
 
@@ -460,7 +535,6 @@  tf_rm_create_db(struct tf *tfp,
 	/* Need device max number of elements for the RM QCAPS */
 	rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
 
-
 	/* Allocate memory for RM QCAPS request */
 	cparms.nitems = max_types;
 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
@@ -496,12 +570,15 @@  tf_rm_create_db(struct tf *tfp,
 	tfp_memcpy(req_cnt, parms->alloc_cnt,
 		   parms->num_elements * sizeof(uint16_t));
 
+	shared_session = tf_session_is_shared_session(tfs);
+
 	/* Update the req_cnt based upon the element configuration
 	 */
 	tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
 					 parms->alloc_cnt,
 					 parms->num_elements,
-					 req_cnt);
+					 req_cnt,
+					 shared_session);
 
 	/* Process capabilities against DB requirements. However, as a
 	 * DB can hold elements that are not HCAPI we can reduce the
@@ -517,6 +594,12 @@  tf_rm_create_db(struct tf *tfp,
 				       &hcapi_items);
 
 	if (hcapi_items == 0) {
+#if (TF_RM_DEBUG == 1)
+		TFP_DRV_LOG(INFO,
+			"%s: module: %s Empty RM DB create request\n",
+			tf_dir_2_str(parms->dir),
+			tf_module_2_str(parms->module));
+#endif
 		parms->rm_db = NULL;
 		return -ENOMEM;
 	}
@@ -565,11 +648,11 @@  tf_rm_create_db(struct tf *tfp,
 							      hcapi_type,
 							      &type_str);
 				TFP_DRV_LOG(ERR,
-					    "Failure, %s:%d:%s req:%d avail:%d\n",
-					    tf_dir_2_str(parms->dir),
-					    hcapi_type, type_str,
-					    req_cnt[i],
-					    query[hcapi_type].max);
+					"Failure, %s:%d:%s req:%d avail:%d\n",
+					tf_dir_2_str(parms->dir),
+					hcapi_type, type_str,
+					req_cnt[i],
+					query[hcapi_type].max);
 				return -EINVAL;
 			}
 		}
@@ -689,6 +772,13 @@  tf_rm_create_db(struct tf *tfp,
 	rm_db->module = parms->module;
 	*parms->rm_db = (void *)rm_db;
 
+#if (TF_RM_DEBUG == 1)
+
+	printf("%s: module:%s\n",
+	       tf_dir_2_str(parms->dir),
+	       tf_module_2_str(parms->module));
+#endif /* (TF_RM_DEBUG == 1) */
+
 	tfp_free((void *)req);
 	tfp_free((void *)resv);
 	tfp_free((void *)req_cnt);
@@ -922,6 +1012,13 @@  tf_rm_create_db_no_reservation(struct tf *tfp,
 	rm_db->module = parms->module;
 	*parms->rm_db = (void *)rm_db;
 
+#if (TF_RM_DEBUG == 1)
+
+	printf("%s: module:%s\n",
+	       tf_dir_2_str(parms->dir),
+	       tf_module_2_str(parms->module));
+#endif /* (TF_RM_DEBUG == 1) */
+
 	tfp_free((void *)req);
 	tfp_free((void *)resv);
 	tfp_free((void *)req_cnt);
@@ -1185,7 +1282,6 @@  tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
 
 	cfg_type = rm_db->db[parms->subtype].cfg_type;
 
-
 	/* Bail out if not controlled by RM */
 	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
 	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
diff --git a/drivers/net/bnxt/tf_core/tf_tbl_sram.c b/drivers/net/bnxt/tf_core/tf_tbl_sram.c
index d7727f7a11..167078a8c6 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl_sram.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl_sram.c
@@ -21,6 +21,10 @@ 
 
 #define DBG_SRAM 0
 
+#define TF_TBL_PTR_TO_RM(new_idx, idx, base, shift) {		\
+		*(new_idx) = (((idx) >> (shift)) - (base));	\
+}
+
 /**
  * tf_sram_tbl_get_info_parms parameter definition
  */
@@ -394,6 +398,7 @@  tf_tbl_sram_set(struct tf *tfp,
 {
 	int rc;
 	bool allocated = 0;
+	int rallocated = 0;
 	uint16_t hcapi_type;
 	struct tf_rm_get_hcapi_parms hparms = { 0 };
 	struct tf_session *tfs;
@@ -402,7 +407,9 @@  tf_tbl_sram_set(struct tf *tfp,
 	void *tbl_db_ptr = NULL;
 	struct tf_tbl_sram_get_info_parms iparms = { 0 };
 	struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+	struct tf_rm_is_allocated_parms raparms = { 0 };
 	void *sram_handle = NULL;
+	uint16_t base = 0, shift = 0;
 
 
 	TF_CHECK_PARMS3(tfp, parms, parms->data);
@@ -442,23 +449,57 @@  tf_tbl_sram_set(struct tf *tfp,
 		return rc;
 	}
 
-	aparms.sram_offset = parms->idx;
-	aparms.slice_size = iparms.slice_size;
-	aparms.bank_id = iparms.bank_id;
-	aparms.dir = parms->dir;
-	aparms.is_allocated = &allocated;
-	rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
-	if (rc || !allocated) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Entry not allocated:%s idx(%d):(%s)\n",
-			    tf_dir_2_str(parms->dir),
-			    tf_tbl_type_2_str(parms->type),
-			    parms->idx,
-			    strerror(-rc));
-		rc = -ENOMEM;
-		return rc;
+	if (tf_session_is_shared_session(tfs)) {
+		/* Only get table info if required for the device */
+		if (dev->ops->tf_dev_get_tbl_info) {
+			rc = dev->ops->tf_dev_get_tbl_info(tfp,
+							   tbl_db->tbl_db[parms->dir],
+							   parms->type,
+							   &base,
+							   &shift);
+			if (rc) {
+				TFP_DRV_LOG(ERR,
+					    "%s: Failed to get table info:%d\n",
+					    tf_dir_2_str(parms->dir),
+					    parms->type);
+				return rc;
+			}
+		}
+		TF_TBL_PTR_TO_RM(&raparms.index, parms->idx, base, shift);
+
+		raparms.rm_db = tbl_db->tbl_db[parms->dir];
+		raparms.subtype = parms->type;
+		raparms.allocated = &rallocated;
+		rc = tf_rm_is_allocated(&raparms);
+		if (rc)
+			return rc;
+
+		if (rallocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {
+			TFP_DRV_LOG(ERR,
+			   "%s, Invalid or not allocated index, type:%s, idx:%d\n",
+			   tf_dir_2_str(parms->dir),
+			   tf_tbl_type_2_str(parms->type),
+			   parms->idx);
+			return -EINVAL;
+		}
+	} else {
+		aparms.sram_offset = parms->idx;
+		aparms.slice_size = iparms.slice_size;
+		aparms.bank_id = iparms.bank_id;
+		aparms.dir = parms->dir;
+		aparms.is_allocated = &allocated;
+		rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+		if (rc || !allocated) {
+			TFP_DRV_LOG(ERR,
+				    "%s: Entry not allocated:%s idx(%d):(%s)\n",
+				    tf_dir_2_str(parms->dir),
+				    tf_tbl_type_2_str(parms->type),
+				    parms->idx,
+				    strerror(-rc));
+			rc = -ENOMEM;
+			return rc;
+		}
 	}
-
 	/* Set the entry */
 	hparms.rm_db = tbl_db->tbl_db[parms->dir];
 	hparms.subtype = parms->type;