[v6,36/47] net/bnxt: tf_ulp: add support for rss flow query to ULP

Message ID 20241021082607.232829-37-sriharsha.basavapatna@broadcom.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ajit Khaparde
Headers
Series TruFlow update for Thor2 |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Sriharsha Basavapatna Oct. 21, 2024, 8:25 a.m. UTC
From: Randy Schacher <stuart.schacher@broadcom.com>

Support flow query rss command for truflow in ULP layer.

Signed-off-by: Randy Schacher <stuart.schacher@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_vnic.c            | 39 +++++++++++++-
 drivers/net/bnxt/bnxt_vnic.h            |  6 +++
 drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c | 71 +++++++++++++++++++++++++
 3 files changed, 114 insertions(+), 2 deletions(-)
  

Patch

diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 29124a6b9d..7b028f2ee5 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -30,6 +30,7 @@ 
 			((BNXT_VNIC_BITMAP_SIZE - 1) - \
 			((i) % BNXT_VNIC_BITMAP_SIZE))) & 1)
 
+static uint16_t rss_query_queues[BNXT_VNIC_MAX_QUEUE_SIZE];
 /*
  * VNIC Functions
  */
@@ -777,16 +778,21 @@  bnxt_vnic_rss_create(struct bnxt *bp,
 	}
 
 	/* hwrm_type conversion */
+	vnic->hash_f = rss_info->rss_func;
+	vnic->rss_types = rss_info->rss_types;
 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_info->rss_types);
 	vnic->hash_mode = bnxt_rte_to_hwrm_hash_level(bp, rss_info->rss_types,
 						      rss_info->rss_level);
 
 	/* configure the key */
-	if (!rss_info->key_len)
+	if (!rss_info->key_len) {
 		/* If hash key has not been specified, use random hash key.*/
 		bnxt_prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
-	else
+		vnic->key_len = HW_HASH_KEY_SIZE;
+	} else {
 		memcpy(vnic->rss_hash_key, rss_info->key, rss_info->key_len);
+		vnic->key_len = rss_info->key_len;
+	}
 
 	/* Prepare the indirection table */
 	bnxt_vnic_populate_rss_table(bp, vnic);
@@ -820,6 +826,35 @@  bnxt_vnic_rss_create(struct bnxt *bp,
 	return NULL;
 }
 
+void
+bnxt_vnic_rss_query_info_fill(struct bnxt *bp,
+			      struct rte_flow_action_rss *rss_conf,
+			      uint16_t vnic_id)
+{
+	struct bnxt_vnic_info *vnic_info;
+	int idx;
+
+	vnic_info = bnxt_vnic_queue_db_get_vnic(bp, vnic_id);
+	if (vnic_info == NULL) {
+		PMD_DRV_LOG_LINE(ERR, "lookup failed for id %d", vnic_id);
+		return;
+	}
+
+	rss_conf->key_len = vnic_info->key_len;
+	rss_conf->key = vnic_info->rss_hash_key;
+	rss_conf->func = vnic_info->hash_f;
+	rss_conf->level = vnic_info->hash_mode;
+	rss_conf->types = vnic_info->rss_types;
+
+	memset(rss_query_queues, 0, sizeof(rss_query_queues));
+	for (idx = 0; idx < BNXT_VNIC_MAX_QUEUE_SIZE; idx++)
+		if (BNXT_VNIC_BITMAP_GET(vnic_info->queue_bitmap, idx)) {
+			rss_query_queues[rss_conf->queue_num] = idx;
+			rss_conf->queue_num += 1;
+		}
+	rss_conf->queue = (const uint16_t *)&rss_query_queues;
+}
+
 int32_t
 bnxt_vnic_rss_queue_status_update(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 {
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index fe3fc4e540..c4a7c5257c 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -75,10 +75,13 @@  struct bnxt_vnic_info {
 	bool		rss_dflt_cr;
 	uint16_t	ref_cnt;
 	uint64_t	queue_bitmap[BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS];
+	uint64_t	rss_types;
+	uint32_t	key_len; /**< Hash key length in bytes. */
 
 	STAILQ_HEAD(, bnxt_filter_info)	filter;
 	STAILQ_HEAD(, rte_flow)	flow_list;
 	uint8_t		ring_select_mode;
+	enum rte_eth_hash_function hash_f;
 	enum rte_eth_hash_function hash_f_local;
 	uint64_t	rss_types_local;
 	uint8_t         metadata_format;
@@ -121,6 +124,9 @@  int32_t bnxt_vnic_queue_db_init(struct bnxt *bp);
 int32_t bnxt_vnic_queue_db_deinit(struct bnxt *bp);
 
 void bnxt_vnic_queue_db_update_dlft_vnic(struct bnxt *bp);
+void bnxt_vnic_rss_query_info_fill(struct bnxt *bp,
+				   struct rte_flow_action_rss *rss_conf,
+				   uint16_t vnic_id);
 int32_t
 bnxt_vnic_rss_queue_status_update(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
index 85fb03e922..4c99c785c0 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
@@ -577,6 +577,66 @@  bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev,
 	return ret;
 }
 
+/*
+ * Fill the rte_flow_query_rss 'rss_conf' argument passed
+ * in the rte_flow_query() with the values obtained and
+ * accumulated locally.
+ *
+ * ctxt [in] The ulp context for the flow counter manager
+ *
+ * flow_id [in] The HW flow ID
+ *
+ * rss_conf [out] The rte_flow_query_count 'data' that is set
+ *
+ */
+static int ulp_flow_query_rss_get(struct bnxt_ulp_context *ctxt,
+			   uint32_t flow_id,
+			   struct rte_flow_action_rss *rss_conf)
+{
+	struct ulp_flow_db_res_params params;
+	uint32_t nxt_resource_index = 0;
+	bool found_cntr_resource = false;
+	struct bnxt *bp;
+	uint16_t vnic_id = 0;
+	int rc = 0;
+
+	bp = bnxt_ulp_cntxt_bp_get(ctxt);
+	if (!bp) {
+		BNXT_DRV_DBG(ERR, "Failed to get bp from ulp cntxt\n");
+		return -EINVAL;
+	}
+
+	if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt)) {
+		BNXT_DRV_DBG(ERR, "Flow db lock acquire failed\n");
+		return -EINVAL;
+	}
+	do {
+		rc = ulp_flow_db_resource_get(ctxt,
+					      BNXT_ULP_FDB_TYPE_REGULAR,
+					      flow_id,
+					      &nxt_resource_index,
+					      &params);
+		if (params.resource_func ==
+		     BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE &&
+		     (params.resource_sub_type ==
+		       BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_RSS ||
+		      params.resource_sub_type ==
+		       BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE)) {
+			vnic_id = params.resource_hndl;
+			found_cntr_resource = true;
+			break;
+		}
+
+	} while (!rc && nxt_resource_index);
+
+	if (found_cntr_resource)
+		bnxt_vnic_rss_query_info_fill(bp, rss_conf, vnic_id);
+
+	bnxt_ulp_cntxt_release_fdb_lock(ctxt);
+
+	return rc;
+}
+
 /* Function to query the rte flows. */
 static int32_t
 bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
@@ -587,6 +647,7 @@  bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
 {
 	int rc = 0;
 	struct bnxt_ulp_context *ulp_ctx;
+	struct rte_flow_action_rss *rss_conf;
 	struct rte_flow_query_count *count;
 	uint32_t flow_id;
 
@@ -602,6 +663,16 @@  bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
 	flow_id = (uint32_t)(uintptr_t)flow;
 
 	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_RSS:
+		rss_conf = (struct rte_flow_action_rss *)data;
+		rc = ulp_flow_query_rss_get(ulp_ctx, flow_id, rss_conf);
+		if (rc) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to query RSS info.");
+		}
+
+		break;
 	case RTE_FLOW_ACTION_TYPE_COUNT:
 		count = data;
 		rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);