[dpdk-dev,28/53] net/qede/base: remove unused parameters

Message ID 1505784633-1171-29-git-send-email-rasesh.mody@cavium.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Mody, Rasesh Sept. 19, 2017, 1:30 a.m. UTC
  This is an attempt to clean up many unused API parameters across the base
code. Most of the changes are related to removing unused p_hwfn or p_ptt
handlers. The warnings are generated using 'unused-parameter' cflags.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/bcm_osal.h            |    1 +
 drivers/net/qede/base/ecore.h               |    3 +-
 drivers/net/qede/base/ecore_cxt.c           |    7 +-
 drivers/net/qede/base/ecore_dcbx.c          |   45 +++++------
 drivers/net/qede/base/ecore_dev.c           |   16 ++--
 drivers/net/qede/base/ecore_hw.c            |    6 +-
 drivers/net/qede/base/ecore_hw.h            |    4 +-
 drivers/net/qede/base/ecore_init_fw_funcs.c |   62 +++++-----------
 drivers/net/qede/base/ecore_init_fw_funcs.h |   78 +++++++------------
 drivers/net/qede/base/ecore_init_ops.c      |   39 +++++-----
 drivers/net/qede/base/ecore_l2.c            |   34 +++------
 drivers/net/qede/base/ecore_l2.h            |   28 -------
 drivers/net/qede/base/ecore_l2_api.h        |   26 +++++++
 drivers/net/qede/base/ecore_mcp.c           |   29 +++-----
 drivers/net/qede/base/ecore_mcp.h           |   11 +--
 drivers/net/qede/base/ecore_mng_tlv.c       |    9 +--
 drivers/net/qede/base/ecore_spq.c           |    5 +-
 drivers/net/qede/base/ecore_sriov.c         |  107 ++++++++++++---------------
 drivers/net/qede/base/ecore_sriov.h         |    8 +-
 drivers/net/qede/base/ecore_vf.c            |   66 ++++++++---------
 drivers/net/qede/base/ecore_vf.h            |   12 +--
 drivers/net/qede/qede_fdir.c                |    2 +-
 22 files changed, 243 insertions(+), 355 deletions(-)
  

Patch

diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 70b1a7f..6368030 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -420,6 +420,7 @@  void qede_hw_err_notify(struct ecore_hwfn *p_hwfn,
 #define OSAL_PAGE_SIZE 4096
 #define OSAL_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
 #define OSAL_IOMEM volatile
+#define OSAL_UNUSED    __attribute__((unused))
 #define OSAL_UNLIKELY(x)  __builtin_expect(!!(x), 0)
 #define OSAL_MIN_T(type, __min1, __min2)	\
 	((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index d921d9e..73024da 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -828,8 +828,7 @@  struct ecore_dev {
  *
  * @return OSAL_INLINE u8
  */
-static OSAL_INLINE u8
-ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
 {
 	u8 vfid     = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
 	u8 pfid     = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 73dc7cb..24aeda9 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -297,8 +297,8 @@  struct ecore_tm_iids {
 	u32 per_vf_tids;
 };
 
-static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
-					  struct ecore_tm_iids *iids)
+static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+			      struct ecore_tm_iids *iids)
 {
 	bool tm_vf_required = false;
 	bool tm_required = false;
@@ -687,7 +687,7 @@  enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
 	p_blk = &p_cli->pf_blks[0];
 
 	ecore_cxt_qm_iids(p_hwfn, &qm_iids);
-	total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+	total = ecore_qm_pf_mem_size(qm_iids.cids,
 				     qm_iids.vf_cids, qm_iids.tids,
 				     p_hwfn->qm_info.num_pqs,
 				     p_hwfn->qm_info.num_vf_pqs);
@@ -1436,7 +1436,6 @@  void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 
 	ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
 			    p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
-			    p_hwfn->first_on_engine,
 			    iids.cids, iids.vf_cids, iids.tids,
 			    qm_info->start_pq,
 			    qm_info->num_pqs - qm_info->num_vf_pqs,
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index e7848c7..25ae21c 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -545,7 +545,6 @@  static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 
 static void
 ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
-			    struct ecore_ptt *p_ptt,
 			    struct ecore_dcbx_get *params)
 {
 	struct dcbx_features *p_feat;
@@ -559,7 +558,6 @@  static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 
 static void
 ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
-			     struct ecore_ptt *p_ptt,
 			     struct ecore_dcbx_get *params)
 {
 	struct dcbx_features *p_feat;
@@ -574,7 +572,6 @@  static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 
 static enum _ecore_status_t
 ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
-				  struct ecore_ptt *p_ptt,
 				  struct ecore_dcbx_get *params)
 {
 	struct ecore_dcbx_operational_params *p_operational;
@@ -633,10 +630,8 @@  static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	return ECORE_SUCCESS;
 }
 
-static void
-ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
-			   struct ecore_ptt *p_ptt,
-			   struct ecore_dcbx_get *params)
+static void  ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+					struct ecore_dcbx_get *params)
 {
 	struct ecore_dcbx_dscp_params *p_dscp;
 	struct dcb_dscp_map *p_dscp_map;
@@ -660,10 +655,8 @@  static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 	}
 }
 
-static void
-ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
-				 struct ecore_ptt *p_ptt,
-				 struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+					     struct ecore_dcbx_get *params)
 {
 	struct lldp_config_params_s *p_local;
 
@@ -676,10 +669,8 @@  static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 		    OSAL_ARRAY_SIZE(p_local->local_port_id));
 }
 
-static void
-ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
-				  struct ecore_ptt *p_ptt,
-				  struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+					      struct ecore_dcbx_get *params)
 {
 	struct lldp_status_params_s *p_remote;
 
@@ -693,34 +684,32 @@  static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
 }
 
 static enum _ecore_status_t
-ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
 		      struct ecore_dcbx_get *p_params,
 		      enum ecore_mib_read_type type)
 {
-	enum _ecore_status_t rc = ECORE_SUCCESS;
-
 	switch (type) {
 	case ECORE_DCBX_REMOTE_MIB:
-		ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+		ecore_dcbx_get_remote_params(p_hwfn, p_params);
 		break;
 	case ECORE_DCBX_LOCAL_MIB:
-		ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+		ecore_dcbx_get_local_params(p_hwfn, p_params);
 		break;
 	case ECORE_DCBX_OPERATIONAL_MIB:
-		ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+		ecore_dcbx_get_operational_params(p_hwfn, p_params);
 		break;
 	case ECORE_DCBX_REMOTE_LLDP_MIB:
-		ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+		ecore_dcbx_get_remote_lldp_params(p_hwfn, p_params);
 		break;
 	case ECORE_DCBX_LOCAL_LLDP_MIB:
-		ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+		ecore_dcbx_get_local_lldp_params(p_hwfn, p_params);
 		break;
 	default:
 		DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
 		return ECORE_INVAL;
 	}
 
-	return rc;
+	return ECORE_SUCCESS;
 }
 
 static enum _ecore_status_t
@@ -869,8 +858,7 @@  enum _ecore_status_t
 		return rc;
 
 	if (type == ECORE_DCBX_OPERATIONAL_MIB) {
-		ecore_dcbx_get_dscp_params(p_hwfn, p_ptt,
-					   &p_hwfn->p_dcbx_info->get);
+		ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
 
 		rc = ecore_dcbx_process_mib_info(p_hwfn);
 		if (!rc) {
@@ -890,7 +878,8 @@  enum _ecore_status_t
 			enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
 		}
 	}
-	ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+	ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
 
 	/* Update the DSCP to TC mapping bit if required */
 	if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
@@ -978,7 +967,7 @@  enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
 	if (rc != ECORE_SUCCESS)
 		goto out;
 
-	rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+	rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
 
 out:
 	ecore_ptt_release(p_hwfn, p_ptt);
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 711a824..c185323 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -3187,8 +3187,7 @@  static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
 	 * Old drivers that don't acquire the lock can run in parallel, and
 	 * their allocation values won't be affected by the updated max values.
 	 */
-	ecore_mcp_resc_lock_default_init(p_hwfn, &resc_lock_params,
-					 &resc_unlock_params,
+	ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
 					 ECORE_RESC_LOCK_RESC_ALLOC, false);
 
 	rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
@@ -5117,8 +5116,7 @@  static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 	}
 }
 
-static void
-ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
 {
 	int i;
 
@@ -5127,8 +5125,7 @@  static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 }
 
 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
-					     struct ecore_ptt *p_ptt,
-					     u32 min_pf_rate)
+					     struct ecore_ptt *p_ptt)
 {
 	struct init_qm_vport_params *vport_params;
 	int i;
@@ -5136,7 +5133,7 @@  static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 	vport_params = p_hwfn->qm_info.qm_vport_params;
 
 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
-		ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+		ecore_init_wfq_default_param(p_hwfn);
 		ecore_init_vport_wfq(p_hwfn, p_ptt,
 				     vport_params[i].first_tx_pq_id,
 				     vport_params[i].vport_wfq);
@@ -5290,7 +5287,7 @@  static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
 	if (rc == ECORE_SUCCESS && use_wfq)
 		ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
 	else
-		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
 
 	return rc;
 }
@@ -5493,8 +5490,7 @@  void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 	p_link = &p_hwfn->mcp_info->link_output;
 
 	if (p_link->min_pf_rate)
-		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
-						 p_link->min_pf_rate);
+		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
 
 	OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
 		    sizeof(*p_hwfn->qm_info.wfq_data) *
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 31e2776..36457ac 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -136,7 +136,7 @@  void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 	OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
 }
 
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
 {
 	/* The HW is using DWORDS and we need to translate it to Bytes */
 	return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
@@ -159,7 +159,7 @@  void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
 {
 	u32 prev_hw_addr;
 
-	prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+	prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
 
 	if (new_hw_addr == prev_hw_addr)
 		return;
@@ -181,7 +181,7 @@  void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
 			 struct ecore_ptt *p_ptt, u32 hw_addr)
 {
-	u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+	u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
 	u32 offset;
 
 	offset = hw_addr - win_hw_addr;
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 726bc18..0f3e88b 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -102,13 +102,11 @@  void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
 /**
  * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
  *
- * @param p_hwfn
  * @param p_ptt
  *
  * @return u32
  */
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn	*p_hwfn,
-			  struct ecore_ptt	*p_ptt);
+u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt);
 
 /**
  * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index b5ef173..ad697ad 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -361,7 +361,6 @@  static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 				    u8 port_id,
 				    u8 pf_id,
 				    u8 max_phys_tcs_per_port,
-				    bool is_first_pf,
 				    u32 num_pf_cids,
 				    u32 num_vf_cids,
 				    u16 start_pq,
@@ -473,10 +472,10 @@  static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
 
 /* Prepare Other PQ mapping runtime init values for the specified PF */
 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
-				       u8 port_id,
 				       u8 pf_id,
 				       u32 num_pf_cids,
-				       u32 num_tids, u32 base_mem_addr_4kb)
+				       u32 num_tids,
+				       u32 base_mem_addr_4kb)
 {
 	u32 pq_size, pq_mem_4kb, mem_addr_4kb;
 	u16 i, pq_id, pq_group;
@@ -684,10 +683,11 @@  static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
 
 /******************** INTERFACE IMPLEMENTATION *********************/
 
-u32 ecore_qm_pf_mem_size(u8 pf_id,
-			 u32 num_pf_cids,
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
 			 u32 num_vf_cids,
-			 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+			 u32 num_tids,
+			 u16 num_pf_pqs,
+			 u16 num_vf_pqs)
 {
 	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
 	    QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -748,7 +748,6 @@  int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
 			u8 port_id,
 			u8 pf_id,
 			u8 max_phys_tcs_per_port,
-			bool is_first_pf,
 			u32 num_pf_cids,
 			u32 num_vf_cids,
 			u32 num_tids,
@@ -775,16 +774,14 @@  int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
 
 	/* Map Other PQs (if any) */
 #if QM_OTHER_PQS_PER_PF > 0
-	ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
-				   num_tids, 0);
+	ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
 #endif
 
 	/* Map Tx PQs */
 	ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
-				max_phys_tcs_per_port, is_first_pf, num_pf_cids,
-				num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
-				start_vport, other_mem_size_4kb, pq_params,
-				vport_params);
+				max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
+				start_pq, num_pf_pqs, num_vf_pqs, start_vport,
+				other_mem_size_4kb, pq_params, vport_params);
 
 	/* Init PF WFQ */
 	if (pf_wfq)
@@ -1335,23 +1332,8 @@  void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
 	}
 }
 
-/* In MF should be called once per engine to set EtherType of OuterTag */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
-					struct ecore_ptt *p_ptt, u32 ethType)
-{
-	/* Update PRS register */
-	STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
-	/* Update NIG register */
-	STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
-	/* Update PBF register */
-	STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-}
-
 /* In MF should be called once per port to set EtherType of OuterTag */
-void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
-				      struct ecore_ptt *p_ptt, u32 ethType)
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
 {
 	/* Update DORQ register */
 	STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
@@ -1733,9 +1715,7 @@  u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
 /* Calculate and return CDU validation byte per connection type / region /
  * cid
  */
-static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
-					 u8 conn_type,
-					 u8 region, u32 cid)
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
 {
 	const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
 
@@ -1794,9 +1774,8 @@  static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
 }
 
 /* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
-				       void *p_ctx_mem,
-				       u16 ctx_size, u8 ctx_type, u32 cid)
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+				       u8 ctx_type, u32 cid)
 {
 	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
 
@@ -1807,14 +1786,14 @@  void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
 
 	OSAL_MEMSET(p_ctx, 0, ctx_size);
 
-	*x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
-	*t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
-	*u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
+	*x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+	*t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+	*u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
 }
 
 /* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
-				    u16 ctx_size, u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
+				    u32 tid)
 {
 	u8 *p_ctx, *region1_val_ptr;
 
@@ -1823,8 +1802,7 @@  void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
 
 	OSAL_MEMSET(p_ctx, 0, ctx_size);
 
-	*region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
-								1, tid);
+	*region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
 }
 
 /* Memset session context to 0 while preserving validation bytes */
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 488dc00..a258bd1 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -18,7 +18,6 @@ 
  * Returns the required host memory size in 4KB units.
  * Must be called before all QM init HSI functions.
  *
- * @param pf_id -	physical function ID
  * @param num_pf_cids - number of connections used by this PF
  * @param num_vf_cids -	number of connections used by VFs of this PF
  * @param num_tids -	number of tasks used by this PF
@@ -27,12 +26,11 @@ 
  *
  * @return The required host memory size in 4KB units.
  */
-u32 ecore_qm_pf_mem_size(u8 pf_id,
-						 u32 num_pf_cids,
-						 u32 num_vf_cids,
-						 u32 num_tids,
-						 u16 num_pf_pqs,
-						 u16 num_vf_pqs);
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+			 u32 num_vf_cids,
+			 u32 num_tids,
+			 u16 num_pf_pqs,
+			 u16 num_vf_pqs);
 
 /**
  * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
@@ -66,7 +64,6 @@  int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
  * @param port_id		- port ID
  * @param pf_id			- PF ID
  * @param max_phys_tcs_per_port	- max number of physical TCs per port in HW
- * @param is_first_pf		- 1 = first PF in engine, 0 = othwerwise
  * @param num_pf_cids		- number of connections used by this PF
  * @param num_vf_cids		- number of connections used by VFs of this PF
  * @param num_tids		- number of tasks used by this PF
@@ -88,23 +85,22 @@  int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
  * @return 0 on success, -1 on error.
  */
 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
-				struct ecore_ptt *p_ptt,
-				u8 port_id,
-				u8 pf_id,
-				u8 max_phys_tcs_per_port,
-				bool is_first_pf,
-				u32 num_pf_cids,
-				u32 num_vf_cids,
-				u32 num_tids,
-				u16 start_pq,
-				u16 num_pf_pqs,
-				u16 num_vf_pqs,
-				u8 start_vport,
-				u8 num_vports,
-				u16 pf_wfq,
-				u32 pf_rl,
-				struct init_qm_pq_params *pq_params,
-				struct init_qm_vport_params *vport_params);
+			struct ecore_ptt *p_ptt,
+			u8 port_id,
+			u8 pf_id,
+			u8 max_phys_tcs_per_port,
+			u32 num_pf_cids,
+			u32 num_vf_cids,
+			u32 num_tids,
+			u16 start_pq,
+			u16 num_pf_pqs,
+			u16 num_vf_pqs,
+			u8 start_vport,
+			u8 num_vports,
+			u16 pf_wfq,
+			u32 pf_rl,
+			struct init_qm_pq_params *pq_params,
+			struct init_qm_vport_params *vport_params);
 
 /**
  * @brief ecore_init_pf_wfq  Initializes the WFQ weight of the specified PF
@@ -261,28 +257,14 @@  void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
 
 #ifndef UNUSED_HSI_FUNC
 /**
- * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
- *                                             ethType Regs to  input ethType
- *                                             should Be called once per engine
- *                                             if engine
- *  is in BD mode.
- *
- * @param p_ptt   - ptt window used for writing the registers.
- * @param ethType - etherType to configure
- */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
-			struct ecore_ptt *p_ptt, u32 ethType);
-
-/**
  * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
  *                                           input ethType should Be called
  *                                           once per port.
  *
- * @param p_ptt   - ptt window used for writing the registers.
+ * @param p_hwfn -	    HW device data
  * @param ethType - etherType to configure
  */
-void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
-			struct ecore_ptt *p_ptt, u32 ethType);
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
 #endif /* UNUSED_HSI_FUNC */
 
 /**
@@ -431,25 +413,19 @@  void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
  * @param ctx_type -	context type.
  * @param cid -		context cid.
  */
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
-				       void *p_ctx_mem,
-				       u16 ctx_size,
-				       u8 ctx_type,
-				       u32 cid);
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+				       u8 ctx_type, u32 cid);
+
 /**
  * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
  * context.
  *
- * @param p_hwfn -		    HW device data
  * @param p_ctx_mem -	pointer to context memory.
  * @param ctx_size -	context size.
  * @param ctx_type -	context type.
  * @param tid -		    context tid.
  */
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
-				    void *p_ctx_mem,
-				    u16 ctx_size,
-				    u8 ctx_type,
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
 				    u32 tid);
 /**
  * @brief ecore_memset_session_ctx - Memset session context to 0 while
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index c76cc07..1a2d2f4 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -176,8 +176,7 @@  static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
 
 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
 						 struct ecore_ptt *p_ptt,
-						 u32 addr, u32 fill,
-						 u32 fill_count)
+						 u32 addr, u32 fill_count)
 {
 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
 
@@ -309,7 +308,7 @@  static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
 	case INIT_SRC_ZEROS:
 		data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
 		if (b_must_dmae || (b_can_dmae && (data >= 64)))
-			rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+			rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
 		else
 			ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
 		break;
@@ -397,10 +396,13 @@  static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
 		       OSAL_LE32_TO_CPU(cmd->op_data));
 }
 
-/* init_ops callbacks entry point */
+/* init_ops callbacks entry point.
+ * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is actually used.
+ */
 static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
-			      struct ecore_ptt *p_ptt,
-			      struct init_callback_op *p_cmd)
+			      struct ecore_ptt OSAL_UNUSED * p_ptt,
+			      struct init_callback_op OSAL_UNUSED * p_cmd)
 {
 	DP_NOTICE(p_hwfn, true,
 		  "Currently init values have no need of callbacks\n");
@@ -444,8 +446,7 @@  static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
 				 INIT_IF_MODE_OP_CMD_OFFSET);
 }
 
-static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
-				struct init_if_phase_op *p_cmd,
+static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
 				u32 phase, u32 phase_id)
 {
 	u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
@@ -500,8 +501,8 @@  enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
 						       modes);
 			break;
 		case INIT_OP_IF_PHASE:
-			cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
-							phase, phase_id);
+			cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
+							phase_id);
 			b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
 			break;
 		case INIT_OP_DELAY:
@@ -573,7 +574,11 @@  void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
 }
 
 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
-					const u8 *data)
+#ifdef CONFIG_ECORE_BINARY_FW
+					const u8 *fw_data)
+#else
+					const u8 OSAL_UNUSED * fw_data)
+#endif
 {
 	struct ecore_fw_data *fw = p_dev->fw_data;
 
@@ -581,24 +586,24 @@  enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
 	struct bin_buffer_hdr *buf_hdr;
 	u32 offset, len;
 
-	if (!data) {
+	if (!fw_data) {
 		DP_NOTICE(p_dev, true, "Invalid fw data\n");
 		return ECORE_INVAL;
 	}
 
-	buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+	buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
 
 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
-	fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
+	fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
 
 	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
-	fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
+	fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
 
 	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
-	fw->arr_data = (u32 *)((uintptr_t)(data + offset));
+	fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
 
 	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
-	fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
+	fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
 	fw->init_ops_size = len / sizeof(struct init_raw_op);
 #else
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 3140fdd..3071b46 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -433,8 +433,7 @@  enum _ecore_status_t
 	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
 
 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
-	p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
-						    p_params->concrete_fid);
+	p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
 
 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
@@ -633,8 +632,7 @@  enum _ecore_status_t
 }
 
 static void
-ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
-			      struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
 			      struct ecore_sge_tpa_params *p_params)
 {
 	struct eth_vport_tpa_param *p_tpa;
@@ -665,8 +663,7 @@  enum _ecore_status_t
 }
 
 static void
-ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
-			  struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
 			  struct ecore_sp_vport_update_params *p_params)
 {
 	int i;
@@ -775,11 +772,10 @@  enum _ecore_status_t
 	}
 
 	/* Update mcast bins for VFs, PF doesn't use this functionality */
-	ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+	ecore_sp_update_mcast_bin(p_ramrod, p_params);
 
 	ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
-	ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
-				      p_params->sge_tpa_params);
+	ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
 	if (p_params->mtu) {
 		p_ramrod->common.update_mtu_flg = 1;
 		p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
@@ -1503,8 +1499,7 @@  enum _ecore_status_t
  *         Note: crc32_length MUST be aligned to 8
  * Return:
  ******************************************************************************/
-static u32 ecore_calc_crc32c(u8 *crc32_packet,
-			     u32 crc32_length, u32 crc32_seed, u8 complement)
+static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
 {
 	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
 	u8 msb = 0, current_byte = 0;
@@ -1529,25 +1524,23 @@  static u32 ecore_calc_crc32c(u8 *crc32_packet,
 	return crc32_result;
 }
 
-static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+static u32 ecore_crc32c_le(u32 seed, u8 *mac)
 {
 	u32 packet_buf[2] = { 0 };
 
 	OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
-	return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+	return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
 }
 
 u8 ecore_mcast_bin_from_mac(u8 *mac)
 {
-	u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
-				  mac, ETH_ALEN);
+	u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
 
 	return crc & 0xff;
 }
 
 static enum _ecore_status_t
 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
-			  u16 opaque_fid,
 			  struct ecore_filter_mcast *p_filter_cmd,
 			  enum spq_mode comp_mode,
 			  struct ecore_spq_comp_cb *p_comp_data)
@@ -1642,16 +1635,13 @@  enum _ecore_status_t
 
 	for_each_hwfn(p_dev, i) {
 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-		u16 opaque_fid;
 
 		if (IS_VF(p_dev)) {
 			ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
 			continue;
 		}
 
-		opaque_fid = p_hwfn->hw_info.opaque_fid;
 		rc = ecore_sp_eth_filter_mcast(p_hwfn,
-					       opaque_fid,
 					       p_filter_cmd,
 					       comp_mode, p_comp_data);
 		if (rc != ECORE_SUCCESS)
@@ -1741,8 +1731,7 @@  static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
 
 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
 				     struct ecore_ptt *p_ptt,
-				     struct ecore_eth_stats *p_stats,
-				     u16 statistics_bin)
+				     struct ecore_eth_stats *p_stats)
 {
 	struct tstorm_per_port_stat tstats;
 	u32 tstats_addr, tstats_len;
@@ -1954,7 +1943,7 @@  void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
 {
 	__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
 	__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
-	__ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+	__ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
 	__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
 
 #ifndef ASIC_ONLY
@@ -2091,7 +2080,6 @@  void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
 
 enum _ecore_status_t
 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
-				  struct ecore_ptt *p_ptt,
 				  struct ecore_spq_comp_cb *p_cb,
 				  dma_addr_t p_addr, u16 length,
 				  u16 qid, u8 vport_id,
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 02aa5e8..3618ae6 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -140,32 +140,4 @@  enum _ecore_status_t
 			   u16 pq_id);
 
 u8 ecore_mcast_bin_from_mac(u8 *mac);
-
-/**
- * @brief - ecore_configure_rfs_ntuple_filter
- *
- * This ramrod should be used to add or remove arfs hw filter
- *
- * @params p_hwfn
- * @params p_ptt
- * @params p_cb		Used for ECORE_SPQ_MODE_CB,where client would initialize
-			it with cookie and callback function address, if not
-			using this mode then client must pass NULL.
- * @params p_addr	p_addr is an actual packet header that needs to be
- *			filter. It has to mapped with IO to read prior to
- *			calling this, [contains 4 tuples- src ip, dest ip,
- *			src port, dest port].
- * @params length	length of p_addr header up to past the transport header.
- * @params qid		receive packet will be directed to this queue.
- * @params vport_id
- * @params b_is_add	flag to add or remove filter.
- *
- */
-enum _ecore_status_t
-ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
-				  struct ecore_ptt *p_ptt,
-				  struct ecore_spq_comp_cb *p_cb,
-				  dma_addr_t p_addr, u16 length,
-				  u16 qid, u8 vport_id,
-				  bool b_is_add);
 #endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index a6740d5..ed9837b 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -436,4 +436,30 @@  void ecore_get_vport_stats(struct ecore_dev *p_dev,
 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
 			       struct ecore_ptt *p_ptt,
 			       struct ecore_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb		Used for ECORE_SPQ_MODE_CB,where client would initialize
+ *			it with cookie and callback function address, if not
+ *			using this mode then client must pass NULL.
+ * @params p_addr	p_addr is an actual packet header that needs to be
+ *			filter. It has to mapped with IO to read prior to
+ *			calling this, [contains 4 tuples- src ip, dest ip,
+ *			src port, dest port].
+ * @params length	length of p_addr header up to past the transport header.
+ * @params qid		receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add	flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+				  struct ecore_spq_comp_cb *p_cb,
+				  dma_addr_t p_addr, u16 length,
+				  u16 qid, u8 vport_id,
+				  bool b_is_add);
 #endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index f1010ee..5aa3210 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -901,8 +901,7 @@  struct ecore_load_req_out_params {
 	return ECORE_SUCCESS;
 }
 
-static void ecore_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
-				   enum ecore_drv_role drv_role,
+static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
 				   u8 *p_mfw_drv_role)
 {
 	switch (drv_role) {
@@ -921,8 +920,7 @@  enum ecore_load_req_force {
 	ECORE_LOAD_REQ_FORCE_ALL,
 };
 
-static void ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
-				    enum ecore_load_req_force force_cmd,
+static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
 				    u8 *p_mfw_force_cmd)
 {
 	switch (force_cmd) {
@@ -959,11 +957,10 @@  enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
 	in_params.drv_ver_0 = ECORE_VERSION;
 	in_params.drv_ver_1 = ecore_get_config_bitmap();
 	in_params.fw_ver = STORM_FW_VERSION;
-	ecore_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+	ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
 	in_params.drv_role = mfw_drv_role;
 	in_params.timeout_val = p_params->timeout_val;
-	ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
-				&mfw_force_cmd);
+	ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
 	in_params.force_cmd = mfw_force_cmd;
 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
 
@@ -1000,8 +997,7 @@  enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
 				out_params.exist_drv_ver_0,
 				out_params.exist_drv_ver_1);
 
-			ecore_get_mfw_force_cmd(p_hwfn,
-						ECORE_LOAD_REQ_FORCE_ALL,
+			ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
 						&mfw_force_cmd);
 
 			in_params.force_cmd = mfw_force_cmd;
@@ -1614,8 +1610,7 @@  static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
 		      &param);
 }
 
-static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
-					 struct ecore_ptt *p_ptt)
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
 {
 	/* A single notification should be sent to upper driver in CMT mode */
 	if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
@@ -1924,7 +1919,7 @@  enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
 			ecore_mcp_update_bw(p_hwfn, p_ptt);
 			break;
 		case MFW_DRV_MSG_FAILURE_DETECTED:
-			ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+			ecore_mcp_handle_fan_failure(p_hwfn);
 			break;
 		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
 			ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
@@ -3492,12 +3487,10 @@  enum _ecore_status_t
 	return ECORE_SUCCESS;
 }
 
-void
-ecore_mcp_resc_lock_default_init(struct ecore_hwfn *p_hwfn,
-				 struct ecore_resc_lock_params *p_lock,
-				 struct ecore_resc_unlock_params *p_unlock,
-				 enum ecore_resc_lock resource,
-				 bool b_is_permanent)
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+				      struct ecore_resc_unlock_params *p_unlock,
+				      enum ecore_resc_lock resource,
+				      bool b_is_permanent)
 {
 	if (p_lock != OSAL_NULL) {
 		OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index f69b425..9f3fd70 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -493,18 +493,15 @@  enum _ecore_status_t
 /**
  * @brief - default initialization for lock/unlock resource structs
  *
- * @param p_hwfn
  * @param p_lock - lock params struct to be initialized; Can be OSAL_NULL
  * @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL
  * @param resource - the requested resource
  * @paral b_is_permanent - disable retries & aging when set
  */
-void
-ecore_mcp_resc_lock_default_init(struct ecore_hwfn *p_hwfn,
-				 struct ecore_resc_lock_params *p_lock,
-				 struct ecore_resc_unlock_params *p_unlock,
-				 enum ecore_resc_lock resource,
-				 bool b_is_permanent);
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+				      struct ecore_resc_unlock_params *p_unlock,
+				      enum ecore_resc_lock resource,
+				      bool b_is_permanent);
 
 /**
  * @brief Learn of supported MFW features; To be done during early init
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
index 0bf1be8..3a1de09 100644
--- a/drivers/net/qede/base/ecore_mng_tlv.c
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -1403,9 +1403,9 @@ 
 	return -1;
 }
 
-static enum _ecore_status_t
-ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
-		      struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn,
+						  u8 tlv_group, u8 *p_mfw_buf,
+						  u32 size)
 {
 	union ecore_mfw_tlv_data *p_tlv_data;
 	struct ecore_drv_tlv_hdr tlv;
@@ -1512,8 +1512,7 @@  enum _ecore_status_t
 	/* Update the TLV values in the local buffer */
 	for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
 		if (tlv_group & id) {
-			if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
-						  size))
+			if (ecore_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
 				goto drv_done;
 		}
 	}
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 716799a..ee0f06c 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -36,9 +36,8 @@ 
 /***************************************************************************
  * Blocking Imp. (BLOCK/EBLOCK mode)
  ***************************************************************************/
-static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
-				  void *cookie,
-				  union event_ring_data *data,
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
+				  union event_ring_data OSAL_UNUSED * data,
 				  u8 fw_return_code)
 {
 	struct ecore_spq_comp_done *comp_done;
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 792cf75..82ba198 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -57,8 +57,7 @@ 
 	"CHANNEL_TLV_MAX"
 };
 
-static u8 ecore_vf_calculate_legacy(struct ecore_hwfn *p_hwfn,
-				    struct ecore_vf_info *p_vf)
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
 {
 	u8 legacy = 0;
 
@@ -210,9 +209,7 @@  struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
 }
 
 static struct ecore_queue_cid *
-ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
-			      struct ecore_vf_info *p_vf,
-			      struct ecore_vf_queue *p_queue)
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
 {
 	int i;
 
@@ -231,8 +228,7 @@  enum ecore_iov_validate_q_mode {
 	ECORE_IOV_VALIDATE_Q_DISABLE,
 };
 
-static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
-					  struct ecore_vf_info *p_vf,
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
 					  u16 qid,
 					  enum ecore_iov_validate_q_mode mode,
 					  bool b_is_tx)
@@ -274,8 +270,7 @@  static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
 		return false;
 	}
 
-	return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
-					     mode, false);
+	return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
 }
 
 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
@@ -291,8 +286,7 @@  static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
 		return false;
 	}
 
-	return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
-					     mode, true);
+	return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
 }
 
 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
@@ -314,13 +308,12 @@  static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
 }
 
 /* Is there at least 1 queue open? */
-static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
-					  struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
 {
 	u8 i;
 
 	for (i = 0; i < p_vf->num_rxqs; i++)
-		if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+		if (ecore_iov_validate_queue_mode(p_vf, i,
 						  ECORE_IOV_VALIDATE_Q_ENABLE,
 						  false))
 			return true;
@@ -328,13 +321,12 @@  static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
 	return false;
 }
 
-static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
-					  struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
 {
 	u8 i;
 
 	for (i = 0; i < p_vf->num_txqs; i++)
-		if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+		if (ecore_iov_validate_queue_mode(p_vf, i,
 						  ECORE_IOV_VALIDATE_Q_ENABLE,
 						  true))
 			return true;
@@ -1302,8 +1294,7 @@  static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
 }
 
 /* place a given tlv on the tlv buffer, continuing current tlv list */
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
-		    u8 **offset, u16 type, u16 length)
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
 {
 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
 
@@ -1359,7 +1350,12 @@  void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
 				    struct ecore_ptt *p_ptt,
 				    struct ecore_vf_info *p_vf,
-				    u16 length, u8 status)
+#ifdef CONFIG_ECORE_SW_CHANNEL
+				    u16 length,
+#else
+				    u16 OSAL_UNUSED length,
+#endif
+				    u8 status)
 {
 	struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
 	struct ecore_dmae_params params;
@@ -1398,8 +1394,7 @@  static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
 }
 
-static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
-				  enum ecore_iov_vport_update_flag flag)
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
 {
 	switch (flag) {
 	case ECORE_IOV_VP_UPDATE_ACTIVATE:
@@ -1437,15 +1432,15 @@  static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
 	size = sizeof(struct pfvf_def_resp_tlv);
 	total_len = size;
 
-	ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+	ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
 
 	/* Prepare response for all extended tlvs if they are found by PF */
 	for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
 		if (!(tlvs_mask & (1 << i)))
 			continue;
 
-		resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
-				     ecore_iov_vport_to_tlv(p_hwfn, i), size);
+		resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+				     size);
 
 		if (tlvs_accepted & (1 << i))
 			resp->hdr.status = status;
@@ -1455,12 +1450,13 @@  static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 			   "VF[%d] - vport_update resp: TLV %d, status %02x\n",
 			   p_vf->relative_vf_id,
-			   ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+			   ecore_iov_vport_to_tlv(i),
+			   resp->hdr.status);
 
 		total_len += size;
 	}
 
-	ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+	ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
 	return total_len;
@@ -1475,8 +1471,8 @@  static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
 
 	mbx->offset = (u8 *)mbx->reply_virt;
 
-	ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
-	ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+	ecore_add_tlv(&mbx->offset, type, length);
+	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
 	ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
@@ -1531,7 +1527,6 @@  static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
 }
 
 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
-					struct ecore_ptt *p_ptt,
 					struct ecore_vf_info *p_vf,
 					struct vf_pf_resc_request *p_req,
 					struct pf_vf_resc *p_resp)
@@ -1609,8 +1604,7 @@  static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
 	return PFVF_STATUS_SUCCESS;
 }
 
-static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
-					   struct pfvf_stats_info *p_stats)
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
 {
 	p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
 				  OFFSETOF(struct mstorm_vf_zone,
@@ -1733,7 +1727,7 @@  static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 	if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
 
-	ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+	ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
 
 	OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
 		    ETH_ALEN);
@@ -1758,7 +1752,7 @@  static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 	/* Fill resources available to VF; Make sure there are enough to
 	 * satisfy the VF's request.
 	 */
-	vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+	vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, vf,
 						    &req->resc_request, resc);
 	if (vfpf_status != PFVF_STATUS_SUCCESS)
 		goto out;
@@ -1974,8 +1968,7 @@  static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
 			struct ecore_queue_cid *p_cid = OSAL_NULL;
 
 			/* There can be at most 1 Rx queue on qzone. Find it */
-			p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
-							      p_queue);
+			p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
 			if (p_cid == OSAL_NULL)
 				continue;
 
@@ -2114,8 +2107,8 @@  static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
 	vf->vport_instance--;
 	vf->spoof_chk = false;
 
-	if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
-	    (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+	if ((ecore_iov_validate_active_rxq(vf)) ||
+	    (ecore_iov_validate_active_txq(vf))) {
 		vf->b_malicious = true;
 		DP_NOTICE(p_hwfn, false,
 			  "VF [%02x] - considered malicious;"
@@ -2162,9 +2155,8 @@  static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
 	else
 		length = sizeof(struct pfvf_def_resp_tlv);
 
-	p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
-			      length);
-	ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+	p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
 	/* Update the TLV with the response */
@@ -2245,7 +2237,7 @@  static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
 	if (p_queue->cids[qid_usage_idx].p_cid)
 		goto out;
 
-	vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
+	vf_legacy = ecore_vf_calculate_legacy(vf);
 
 	/* Acquire a new queue-cid */
 	OSAL_MEMSET(&params, 0, sizeof(params));
@@ -2440,11 +2432,11 @@  static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
 	}
 
 send_resp:
-	p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+	p_resp = ecore_add_tlv(&mbx->offset,
 			       CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
 
 	ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
-	ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
 	ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
@@ -2476,9 +2468,8 @@  static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
 	else
 		length = sizeof(struct pfvf_def_resp_tlv);
 
-	p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
-			      length);
-	ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+	p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+	ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
 	/* Update the TLV with the response */
@@ -2521,7 +2512,7 @@  static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
 	if (p_queue->cids[qid_usage_idx].p_cid)
 		goto out;
 
-	vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
+	vf_legacy = ecore_vf_calculate_legacy(vf);
 
 	/* Acquire a new queue-cid */
 	params.queue_id = p_queue->fw_tx_qid;
@@ -2590,7 +2581,7 @@  static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
 	    p_queue->cids[qid_usage_idx].b_is_tx) {
 		struct ecore_queue_cid *p_cid;
 
-		p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, p_queue);
+		p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
 		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
 			   "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
 			    vf->relative_vf_id, rxq_id, qid_usage_idx,
@@ -3012,8 +3003,7 @@  void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
 			goto out;
 		}
 
-		p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
-						      &vf->vf_queues[q_idx]);
+		p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
 		p_rss->rss_ind_table[i] = p_cid;
 	}
 
@@ -3026,7 +3016,6 @@  void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
 
 static void
 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
-				  struct ecore_vf_info *vf,
 				  struct ecore_sp_vport_update_params *p_data,
 				  struct ecore_sge_tpa_params *p_sge_tpa,
 				  struct ecore_iov_vf_mbx *p_mbx,
@@ -3116,7 +3105,7 @@  static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
 	ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
 	ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
 	ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
-	ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+	ecore_iov_vp_update_sge_tpa_param(p_hwfn, &params,
 					  &sge_tpa_params, mbx, &tlvs_mask);
 
 	tlvs_accepted = tlvs_mask;
@@ -3503,8 +3492,7 @@  static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
 		   vf->abs_vf_id, rx_coal, tx_coal, qid);
 
 	if (rx_coal) {
-		p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
-						      &vf->vf_queues[qid]);
+		p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
 
 		rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
 		if (rc != ECORE_SUCCESS) {
@@ -3590,8 +3578,7 @@  enum _ecore_status_t
 		   vf->abs_vf_id, rx_coal, tx_coal, qid);
 
 	if (rx_coal) {
-		p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
-						      &vf->vf_queues[qid]);
+		p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
 
 		rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
 		if (rc != ECORE_SUCCESS) {
@@ -3903,11 +3890,11 @@  void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
 	p_bulletin = p_vf->bulletin.p_virt;
 
 	if (p_params)
-		__ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+		__ecore_vf_get_link_params(p_params, p_bulletin);
 	if (p_link)
-		__ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+		__ecore_vf_get_link_state(p_link, p_bulletin);
 	if (p_caps)
-		__ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+		__ecore_vf_get_link_caps(p_caps, p_bulletin);
 }
 
 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 8923730..effeb69 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -206,17 +206,13 @@  struct ecore_pf_iov {
 /**
  * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
  *
- * @param p_hwfn
- * @param p_iov
+ * @param offset
  * @param type
  * @param length
  *
  * @return pointer to the newly placed tlv
  */
-void *ecore_add_tlv(struct ecore_hwfn	*p_hwfn,
-		    u8			**offset,
-		    u16			type,
-		    u16			length);
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
 
 /**
  * @brief list the types and lengths of the tlvs on the buffer
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 5002ada..c37341e 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -44,7 +44,7 @@  static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
 	OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
 
 	/* Init type and length */
-	p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
+	p_tlv = ecore_add_tlv(&p_iov->offset, type, length);
 
 	/* Init first tlv header */
 	((struct vfpf_first_tlv *)p_tlv)->reply_address =
@@ -146,7 +146,7 @@  static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
 	      PFVF_ACQUIRE_CAP_QUEUE_QIDS))
 		return;
 
-	p_qid_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+	p_qid_tlv = ecore_add_tlv(&p_iov->offset,
 				  CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
 	p_qid_tlv->qid = p_cid->qid_usage_idx;
 }
@@ -222,7 +222,7 @@  static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
 	req->bulletin_size = p_iov->bulletin.size;
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -610,7 +610,7 @@  enum _ecore_status_t
 				     ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -679,7 +679,7 @@  enum _ecore_status_t
 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -736,7 +736,7 @@  enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -782,7 +782,7 @@  enum _ecore_status_t
 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -835,7 +835,7 @@  enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
 	ecore_vf_pf_add_qid(p_hwfn, p_cid);
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -891,7 +891,7 @@  enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
 	ecore_vf_pf_add_qid(p_hwfn, *pp_cid);
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -940,7 +940,7 @@  enum _ecore_status_t
 	}
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -971,7 +971,7 @@  enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
 			 sizeof(struct vfpf_first_tlv));
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -1078,7 +1078,7 @@  enum _ecore_status_t
 		struct vfpf_vport_update_activate_tlv *p_act_tlv;
 
 		size = sizeof(struct vfpf_vport_update_activate_tlv);
-		p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+		p_act_tlv = ecore_add_tlv(&p_iov->offset,
 					  CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
 					  size);
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1098,7 +1098,7 @@  enum _ecore_status_t
 		struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
 
 		size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
-		p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+		p_vlan_tlv = ecore_add_tlv(&p_iov->offset,
 					   CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
 					   size);
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1111,7 +1111,7 @@  enum _ecore_status_t
 
 		size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
 		tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
-		p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+		p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset,
 						tlv, size);
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
 
@@ -1122,7 +1122,7 @@  enum _ecore_status_t
 		struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
 
 		size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
-		p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+		p_mcast_tlv = ecore_add_tlv(&p_iov->offset,
 					    CHANNEL_TLV_VPORT_UPDATE_MCAST,
 					    size);
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1140,7 +1140,7 @@  enum _ecore_status_t
 
 		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
 		size = sizeof(struct vfpf_vport_update_accept_param_tlv);
-		p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+		p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
 
 		if (update_rx) {
@@ -1162,7 +1162,7 @@  enum _ecore_status_t
 		int i, table_size;
 
 		size = sizeof(struct vfpf_vport_update_rss_tlv);
-		p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+		p_rss_tlv = ecore_add_tlv(&p_iov->offset,
 					  CHANNEL_TLV_VPORT_UPDATE_RSS, size);
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
 
@@ -1200,8 +1200,7 @@  enum _ecore_status_t
 
 		size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
 		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
-		p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
-					       tlv, size);
+		p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
 
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
 		p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
@@ -1215,7 +1214,7 @@  enum _ecore_status_t
 
 		sge_tpa_params = p_params->sge_tpa_params;
 		size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
-		p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+		p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset,
 					      CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
 					      size);
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1253,7 +1252,7 @@  enum _ecore_status_t
 	}
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -1285,7 +1284,7 @@  enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -1319,7 +1318,7 @@  enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
 	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -1405,7 +1404,7 @@  enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
 	req->vlan = p_ucast->vlan;
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -1436,7 +1435,7 @@  enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
 			 sizeof(struct vfpf_first_tlv));
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset,
+	ecore_add_tlv(&p_iov->offset,
 		      CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
@@ -1477,7 +1476,7 @@  enum _ecore_status_t
 		   rx_coal, tx_coal, req->qid);
 
 	/* add list termination tlv */
-	ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+	ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
 		      sizeof(struct channel_list_end_tlv));
 
 	resp = &p_iov->pf2vf_reply->default_resp;
@@ -1562,8 +1561,7 @@  enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
-				struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
 				struct ecore_bulletin_content *p_bulletin)
 {
 	OSAL_MEMSET(p_params, 0, sizeof(*p_params));
@@ -1580,12 +1578,11 @@  void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
 			      struct ecore_mcp_link_params *params)
 {
-	__ecore_vf_get_link_params(p_hwfn, params,
+	__ecore_vf_get_link_params(params,
 				   &p_hwfn->vf_iov_info->bulletin_shadow);
 }
 
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
-			       struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
 			       struct ecore_bulletin_content *p_bulletin)
 {
 	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
@@ -1607,12 +1604,11 @@  void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
 			     struct ecore_mcp_link_state *link)
 {
-	__ecore_vf_get_link_state(p_hwfn, link,
+	__ecore_vf_get_link_state(link,
 				  &p_hwfn->vf_iov_info->bulletin_shadow);
 }
 
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
-			      struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
 			      struct ecore_bulletin_content *p_bulletin)
 {
 	OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
@@ -1622,7 +1618,7 @@  void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
 			    struct ecore_mcp_link_capabilities *p_link_caps)
 {
-	__ecore_vf_get_link_caps(p_hwfn, p_link_caps,
+	__ecore_vf_get_link_caps(p_link_caps,
 				 &p_hwfn->vf_iov_info->bulletin_shadow);
 }
 
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index d9ee96b..0945522 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -273,34 +273,28 @@  void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
 /**
  * @brief - return the link params in a given bulletin board
  *
- * @param p_hwfn
  * @param p_params - pointer to a struct to fill with link params
  * @param p_bulletin
  */
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
-				struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
 				struct ecore_bulletin_content *p_bulletin);
 
 /**
  * @brief - return the link state in a given bulletin board
  *
- * @param p_hwfn
  * @param p_link - pointer to a struct to fill with link state
  * @param p_bulletin
  */
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
-			       struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
 			       struct ecore_bulletin_content *p_bulletin);
 
 /**
  * @brief - return the link capabilities in a given bulletin board
  *
- * @param p_hwfn
  * @param p_link - pointer to a struct to fill with link capabilities
  * @param p_bulletin
  */
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
-			      struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
 			      struct ecore_bulletin_content *p_bulletin);
 
 enum _ecore_status_t
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index 7bd5c5d..7db7521 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -171,7 +171,7 @@  void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
 					  &qdev->fdir_info.arfs);
 	}
 	/* configure filter with ECORE_SPQ_MODE_EBLOCK */
-	rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
+	rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
 					       (dma_addr_t)mz->phys_addr,
 					       pkt_len,
 					       fdir_filter->action.rx_queue,