[06/11] net/bnxt: update to new version of backing store

Message ID 20210224155553.26893-7-kalesh-anakkur.purayil@broadcom.com (mailing list archive)
State Superseded, archived
Delegated to: Ajit Khaparde
Headers
Series bnxt fixes |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Kalesh A P Feb. 24, 2021, 3:55 p.m. UTC
  From: Ajit Khaparde <ajit.khaparde@broadcom.com>

Update HWRM headers to version 1.10.2.15
which  updates the backing store API for additional TQM rings.
Add support for 9th TQM ring using latest firmware interface.
Also make sure that we set only necessary bits in the enables
field in backing store request.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        | 16 ++++++++++++----
 drivers/net/bnxt/bnxt_ethdev.c | 11 +++++++++--
 drivers/net/bnxt/bnxt_hwrm.c   | 36 +++++++++++++++++++++++++++++++++---
 3 files changed, 54 insertions(+), 9 deletions(-)
  

Patch

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index b4370e5..bf3459e 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -390,9 +390,17 @@  struct bnxt_coal {
 #define BNXT_MAX_RSS_CTXTS_P5 \
 	(BNXT_RSS_TBL_SIZE_P5 / BNXT_RSS_ENTRIES_PER_CTX_P5)
 
-#define BNXT_MAX_TC    8
-#define BNXT_MAX_QUEUE 8
-#define BNXT_MAX_TC_Q  (BNXT_MAX_TC + 1)
+#define BNXT_MAX_QUEUE			8
+#define BNXT_MAX_TQM_SP_RINGS		1
+#define BNXT_MAX_TQM_FP_LEGACY_RINGS	8
+#define BNXT_MAX_TQM_FP_RINGS		9
+#define BNXT_MAX_TQM_LEGACY_RINGS	\
+	(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_LEGACY_RINGS)
+#define BNXT_MAX_TQM_RINGS		\
+	(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+#define BNXT_BACKING_STORE_CFG_LEGACY_LEN	256
+#define BNXT_BACKING_STORE_CFG_LEN	\
+	sizeof(struct hwrm_func_backing_store_cfg_input)
 #define BNXT_PAGE_SHFT 12
 #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHFT)
 #define MAX_CTX_PAGES  (BNXT_PAGE_SIZE / 8)
@@ -461,7 +469,7 @@  struct bnxt_ctx_mem_info {
 	struct bnxt_ctx_pg_info cq_mem;
 	struct bnxt_ctx_pg_info vnic_mem;
 	struct bnxt_ctx_pg_info stat_mem;
-	struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TC_Q];
+	struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
 };
 
 struct bnxt_ctx_mem_buf_info {
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 22c880c..17bda86 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -4326,15 +4326,22 @@  int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	entries = clamp_t(uint32_t, entries, min,
 			  ctx->tqm_max_entries_per_ring);
 	for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
+		/* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7.
+		 * i > 8 is other ext rings.
+		 */
 		ctx_pg = ctx->tqm_mem[i];
 		ctx_pg->entries = i ? entries : entries_sp;
 		if (ctx->tqm_entry_size) {
 			mem_size = ctx->tqm_entry_size * ctx_pg->entries;
-			rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
+			rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size,
+						    "tqm_mem", i);
 			if (rc)
 				return rc;
 		}
-		ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+		if (i < BNXT_MAX_TQM_LEGACY_RINGS)
+			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+		else
+			ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
 	}
 
 	ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index e11502c..fc47950 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -27,7 +27,7 @@ 
 #define HWRM_SPEC_CODE_1_8_3		0x10803
 #define HWRM_VERSION_1_9_1		0x10901
 #define HWRM_VERSION_1_9_2		0x10903
-
+#define HWRM_VERSION_1_10_2_13		0x10a020d
 struct bnxt_plcmodes_cfg {
 	uint32_t	flags;
 	uint16_t	jumbo_thresh;
@@ -105,6 +105,11 @@  static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
 
 	timeout = bp->hwrm_cmd_timeout;
 
+	/* Update the message length for backing store config for new FW. */
+	if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 &&
+	    rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG)
+		msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
+
 	if (bp->flags & BNXT_FLAG_SHORT_CMD ||
 	    msg_len > bp->max_req_len) {
 		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
@@ -5120,8 +5125,21 @@  int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
 	ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
 	ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
 
-	if (!ctx->tqm_fp_rings_count)
-		ctx->tqm_fp_rings_count = bp->max_q;
+	ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ?
+				  RTE_MIN(ctx->tqm_fp_rings_count,
+					  BNXT_MAX_TQM_FP_LEGACY_RINGS) :
+				  bp->max_q;
+
+	/* Check if the ext ring count needs to be counted.
+	 * Ext ring count is available only with new FW so we should not
+	 * look at the field on older FW.
+	 */
+	if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
+	    bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
+		ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
+		ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS,
+						  ctx->tqm_fp_rings_count);
+	}
 
 	tqm_rings = ctx->tqm_fp_rings_count + 1;
 
@@ -5232,6 +5250,18 @@  int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
 	}
 
+	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
+		/* DPDK does not need to configure MRAV and TIM type.
+		 * So we are skipping over MRAV and TIM. Skip to configure
+		 * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8.
+		 */
+		ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS];
+		req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries);
+		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+				      &req.tqm_ring8_pg_size_tqm_ring_lvl,
+				      &req.tqm_ring8_page_dir);
+	}
+
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
 	HWRM_UNLOCK();