[v2,39/70] net/ice/base: add data typecasting to match sizes

Message ID 20220815073206.2917968-40-qi.z.zhang@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Qi Zhang
Headers
Series ice base code update |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Qi Zhang Aug. 15, 2022, 7:31 a.m. UTC
  Adding typecast to variables to avoid compiler warnings generated if
variables of a particular data type are assigned to ones of a
smaller data type. For example assigning an unsigned 16 bit integer
to an 8 bit integer could trigger data loss warnings or errors.

Signed-off-by: Vignesh Sridhar <vignesh.sridhar@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/ice/base/ice_acl_ctrl.c   | 34 +++++++++++++--------------
 drivers/net/ice/base/ice_adminq_cmd.h |  4 ++--
 drivers/net/ice/base/ice_common.c     | 13 +++++-----
 drivers/net/ice/base/ice_dcb.c        |  8 +++----
 drivers/net/ice/base/ice_flex_pipe.c  |  2 +-
 drivers/net/ice/base/ice_flow.c       | 26 ++++++++++----------
 drivers/net/ice/base/ice_nvm.c        |  2 +-
 drivers/net/ice/base/ice_sched.c      |  5 ++--
 drivers/net/ice/base/ice_switch.c     | 12 +++++-----
 9 files changed, 52 insertions(+), 54 deletions(-)
  

Patch

diff --git a/drivers/net/ice/base/ice_acl_ctrl.c b/drivers/net/ice/base/ice_acl_ctrl.c
index 27aa6b62d4..2dd08e326e 100644
--- a/drivers/net/ice/base/ice_acl_ctrl.c
+++ b/drivers/net/ice/base/ice_acl_ctrl.c
@@ -6,10 +6,10 @@ 
 #include "ice_flow.h"
 
 /* Determine the TCAM index of entry 'e' within the ACL table */
-#define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH)
+#define ICE_ACL_TBL_TCAM_IDX(e) ((u8)((e) / ICE_AQC_ACL_TCAM_DEPTH))
 
 /* Determine the entry index within the TCAM */
-#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH)
+#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((u16)((e) % ICE_AQC_ACL_TCAM_DEPTH))
 
 #define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF
 
@@ -251,10 +251,8 @@  ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam,
  */
 static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl)
 {
-	u16 num_cscd, stack_level, stack_idx, min_act_mem;
-	u8 tcam_idx = tbl->first_tcam;
-	u16 max_idx_to_get_extra;
-	u8 mem_idx = 0;
+	u16 num_cscd, stack_level, stack_idx, max_idx_to_get_extra;
+	u8 min_act_mem, tcam_idx = tbl->first_tcam, mem_idx = 0;
 
 	/* Determine number of stacked TCAMs */
 	stack_level = DIVIDE_AND_ROUND_UP(tbl->info.depth,
@@ -326,7 +324,8 @@  ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
 	depth = ICE_ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT);
 
 	if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) {
-		params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
+		params->entry_act_pairs =
+			(u8)(width / ICE_AQC_ACL_KEY_WIDTH_BYTES);
 
 		if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS)
 			params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS;
@@ -587,7 +586,7 @@  ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf,
 	 */
 	for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) {
 		/* PKT DIR uses the 1st location of Byte Selection Base: + 1 */
-		u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx;
+		u8 val = (u8)(ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx);
 
 		if (tcam_idx_in_cascade == cascade_cnt - 1) {
 			if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM)
@@ -793,7 +792,7 @@  ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
 	/* set the START_SET bit at the beginning of the stack */
 	scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET;
 	while (k <= last_tcam) {
-		u8 last_tcam_idx_cascade = cascade_cnt + k - 1;
+		u16 last_tcam_idx_cascade = cascade_cnt + k - 1;
 
 		/* set start_cmp for the first cascaded TCAM */
 		scen_buf.tcam_cfg[k].start_cmp_set |=
@@ -972,10 +971,10 @@  ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
 		  enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts,
 		  struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx)
 {
-	u8 i, entry_tcam, num_cscd, offset;
 	struct ice_aqc_acl_data buf;
+	u8 entry_tcam, offset;
+	u16 i, num_cscd, idx;
 	enum ice_status status = ICE_SUCCESS;
-	u16 idx;
 
 	if (!scen)
 		return ICE_ERR_DOES_NOT_EXIST;
@@ -1005,7 +1004,7 @@  ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
 		 * be programmed first; the TCAM entry of the leftmost TCAM
 		 * should be programmed last.
 		 */
-		offset = num_cscd - i - 1;
+		offset = (u8)(num_cscd - i - 1);
 		ice_memcpy(&buf.entry_key.val,
 			   &keys[offset * sizeof(buf.entry_key.val)],
 			   sizeof(buf.entry_key.val), ICE_NONDMA_TO_NONDMA);
@@ -1049,10 +1048,9 @@  ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
 		 struct ice_acl_act_entry *acts, u8 acts_cnt,
 		 u16 entry_idx)
 {
-	u8 entry_tcam, num_cscd, i, actx_idx = 0;
+	u16 idx, entry_tcam, num_cscd, i, actx_idx = 0;
 	struct ice_aqc_actpair act_buf;
 	enum ice_status status = ICE_SUCCESS;
-	u16 idx;
 
 	if (entry_idx >= scen->num_entry)
 		return ICE_ERR_MAX_LIMIT;
@@ -1112,9 +1110,9 @@  ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
 {
 	struct ice_aqc_actpair act_buf;
 	struct ice_aqc_acl_data buf;
-	u8 entry_tcam, num_cscd, i;
 	enum ice_status status = ICE_SUCCESS;
-	u16 idx;
+	u16 num_cscd, idx, i;
+	u8 entry_tcam;
 
 	if (!scen)
 		return ICE_ERR_DOES_NOT_EXIST;
@@ -1135,8 +1133,8 @@  ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
 	/* invalidate the flow entry */
 	ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
 	for (i = 0; i < num_cscd; i++) {
-		status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf,
-						  NULL);
+		status = ice_aq_program_acl_entry(hw, (u8)(entry_tcam + i),
+						  idx, &buf, NULL);
 		if (status)
 			ice_debug(hw, ICE_DBG_ACL, "AQ program ACL entry failed status: %d\n",
 				  status);
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 8efbb137da..7f9bdd3cb0 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -2802,8 +2802,8 @@  struct ice_aqc_get_pkg_info_resp {
 struct ice_aqc_driver_shared_params {
 	u8 set_or_get_op;
 #define ICE_AQC_DRIVER_PARAM_OP_MASK		BIT(0)
-#define ICE_AQC_DRIVER_PARAM_SET		0
-#define ICE_AQC_DRIVER_PARAM_GET		1
+#define ICE_AQC_DRIVER_PARAM_SET		((u8)0)
+#define ICE_AQC_DRIVER_PARAM_GET		((u8)1)
 	u8 param_indx;
 #define ICE_AQC_DRIVER_PARAM_MAX_IDX		15
 	u8 rsvd[2];
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index db78bf4152..f8a3017df8 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -2420,7 +2420,7 @@  ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
 	case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
 	{
-		u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
+		u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
 
 		caps->ext_topo_dev_img_ver_high[index] = number;
 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
@@ -2534,11 +2534,10 @@  ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
 	info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
 	info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
 
-	info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
 	info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
-
-	if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
-		info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+	clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+	if (clk_freq < NUM_ICE_TIME_REF_FREQ) {
+		info->time_ref = (enum ice_time_ref_freq)clk_freq;
 	} else {
 		/* Unknown clock frequency, so assume a (probably incorrect)
 		 * default to avoid out-of-bounds look ups of frequency
@@ -5621,7 +5620,7 @@  ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
 
 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
-	cmd->param_indx = idx;
+	cmd->param_indx = (u8)idx;
 	cmd->param_val = CPU_TO_LE32(value);
 
 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
@@ -5655,7 +5654,7 @@  ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
 
 	cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
-	cmd->param_indx = idx;
+	cmd->param_indx = (u8)idx;
 
 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 	if (status)
diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c
index 3d630757f8..7a850e62f4 100644
--- a/drivers/net/ice/base/ice_dcb.c
+++ b/drivers/net/ice/base/ice_dcb.c
@@ -691,9 +691,9 @@  ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
 		       bool *dcbx_agent_status, struct ice_sq_cd *cd)
 {
 	struct ice_aqc_lldp_stop_start_specific_agent *cmd;
-	enum ice_status status;
+	enum ice_adminq_opc opcode;
 	struct ice_aq_desc desc;
-	u16 opcode;
+	enum ice_status status;
 
 	cmd = &desc.params.lldp_agent_ctrl;
 
@@ -885,8 +885,8 @@  ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
 		 */
 		if (!err && sync && oper) {
 			dcbcfg->app[app_index].priority =
-				(app_prio & ice_aqc_cee_app_mask) >>
-				ice_aqc_cee_app_shift;
+				(u8)((app_prio & ice_aqc_cee_app_mask) >>
+				     ice_aqc_cee_app_shift);
 			dcbcfg->app[app_index].selector = ice_app_sel_type;
 			dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
 			app_index++;
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 2d95ce4d74..63ddda2df9 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -3445,7 +3445,7 @@  ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
 			p->type = ICE_VSIG_REM;
 			p->orig_vsig = vsig;
 			p->vsig = ICE_DEFAULT_VSIG;
-			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+			p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis);
 
 			LIST_ADD(&p->list_entry, chg);
 
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index b196e51276..80e7a447c3 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -1325,7 +1325,7 @@  ice_flow_xtract_pkt_flags(struct ice_hw *hw,
 			  struct ice_flow_prof_params *params,
 			  enum ice_flex_mdid_pkt_flags flags)
 {
-	u8 fv_words = hw->blk[params->blk].es.fvw;
+	u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
 	u8 idx;
 
 	/* Make sure the number of extraction sequence entries required does not
@@ -1341,7 +1341,7 @@  ice_flow_xtract_pkt_flags(struct ice_hw *hw,
 		idx = params->es_cnt;
 
 	params->es[idx].prot_id = ICE_PROT_META_ID;
-	params->es[idx].off = flags;
+	params->es[idx].off = (u16)flags;
 	params->es_cnt++;
 
 	return ICE_SUCCESS;
@@ -1364,8 +1364,8 @@  ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
 		    u8 seg, enum ice_flow_field fld, u64 match)
 {
 	enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
+	u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
 	enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
-	u8 fv_words = hw->blk[params->blk].es.fvw;
 	struct ice_flow_fld_info *flds;
 	u16 cnt, ese_bits, i;
 	u16 sib_mask = 0;
@@ -1548,7 +1548,7 @@  ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
 	 */
 	ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
 
-	flds[fld].xtrct.prot_id = prot_id;
+	flds[fld].xtrct.prot_id = (u8)prot_id;
 	flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
 		ICE_FLOW_FV_EXTRACT_SZ;
 	flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
@@ -1590,7 +1590,7 @@  ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
 			else
 				idx = params->es_cnt;
 
-			params->es[idx].prot_id = prot_id;
+			params->es[idx].prot_id = (u8)prot_id;
 			params->es[idx].off = off;
 			params->mask[idx] = mask | sib_mask;
 			params->es_cnt++;
@@ -1769,10 +1769,10 @@  ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
 
 	for (i = 0; i < params->prof->segs_cnt; i++) {
 		struct ice_flow_seg_info *seg = &params->prof->segs[i];
-		u8 j;
+		u16 j;
 
 		ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
-				     ICE_FLOW_FIELD_IDX_MAX) {
+				     (u16)ICE_FLOW_FIELD_IDX_MAX) {
 			struct ice_flow_fld_info *fld = &seg->fields[j];
 
 			fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
@@ -2765,7 +2765,7 @@  ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
 		/* If the caller want to add two actions of the same type, then
 		 * it is considered invalid configuration.
 		 */
-		if (ice_test_and_set_bit(acts[i].type, dup_check))
+		if (ice_test_and_set_bit((u16)acts[i].type, dup_check))
 			return ICE_ERR_PARAM;
 	}
 
@@ -2826,7 +2826,7 @@  ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
 			(*(u16 *)(data + info->src.last)) << info->xtrct.disp;
 		u16 new_low =
 			(*(u16 *)(data + info->src.val)) << info->xtrct.disp;
-		u8 range_idx = info->entry.val;
+		u8 range_idx = (u8)info->entry.val;
 
 		range_buf->checker_cfg[range_idx].low_boundary =
 			CPU_TO_BE16(new_low);
@@ -2983,10 +2983,10 @@  ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
 
 	for (i = 0; i < prof->segs_cnt; i++) {
 		struct ice_flow_seg_info *seg = &prof->segs[i];
-		u8 j;
+		u16 j;
 
 		ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
-				     ICE_FLOW_FIELD_IDX_MAX) {
+				     (u16)ICE_FLOW_FIELD_IDX_MAX) {
 			struct ice_flow_fld_info *info = &seg->fields[j];
 
 			if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
@@ -3753,13 +3753,13 @@  ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
 {
 	struct ice_flow_seg_info *seg;
 	u64 val;
-	u8 i;
+	u16 i;
 
 	/* set inner most segment */
 	seg = &segs[seg_cnt - 1];
 
 	ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
-			     ICE_FLOW_FIELD_IDX_MAX)
+			     (u16)ICE_FLOW_FIELD_IDX_MAX)
 		ice_flow_set_fld(seg, (enum ice_flow_field)i,
 				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
 				 ICE_FLOW_FLD_OFF_INVAL, false);
diff --git a/drivers/net/ice/base/ice_nvm.c b/drivers/net/ice/base/ice_nvm.c
index ad2496e873..293b71905d 100644
--- a/drivers/net/ice/base/ice_nvm.c
+++ b/drivers/net/ice/base/ice_nvm.c
@@ -171,7 +171,7 @@  ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
 	status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
 
 	/* Report the number of words successfully read */
-	*words = bytes / 2;
+	*words = (u16)(bytes / 2);
 
 	/* Byte swap the words up to the amount we actually read */
 	for (i = 0; i < *words; i++)
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 4d31e96fd0..f87b1c4897 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -1369,9 +1369,10 @@  enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
 	if (status)
 		goto sched_query_out;
 
-	hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels);
+	hw->num_tx_sched_layers =
+		(u8)LE16_TO_CPU(buf->sched_props.logical_levels);
 	hw->num_tx_sched_phys_layers =
-		LE16_TO_CPU(buf->sched_props.phys_levels);
+		(u8)LE16_TO_CPU(buf->sched_props.phys_levels);
 	hw->flattened_layers = buf->sched_props.flattening_bitmap;
 	hw->max_cgds = buf->sched_props.max_pf_cgds;
 
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index edcfa89bcb..a8f83f62ff 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -2272,8 +2272,8 @@  ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
 				    ~ICE_AQ_RECIPE_RESULT_EN, result_bm);
 
 		/* get the first profile that is associated with rid */
-		prof = ice_find_first_bit(recipe_to_profile[idx],
-					  ICE_MAX_NUM_PROFILES);
+		prof = (u8)ice_find_first_bit(recipe_to_profile[idx],
+					      ICE_MAX_NUM_PROFILES);
 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
 
@@ -4023,7 +4023,7 @@  ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
 				 ice_aqc_opc_update_sw_rules, NULL);
 	if (!status) {
 		m_ent->lg_act_idx = l_id;
-		m_ent->counter_index = counter_id;
+		m_ent->counter_index = (u8)counter_id;
 	}
 
 	ice_free(hw, lg_act);
@@ -6341,7 +6341,7 @@  ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
 		break;
 	case ICE_SW_LKUP_PROMISC:
 	case ICE_SW_LKUP_PROMISC_VLAN:
-		ice_remove_promisc(hw, lkup, &remove_list_head);
+		ice_remove_promisc(hw, (u8)lkup, &remove_list_head);
 		break;
 	case ICE_SW_LKUP_MAC_VLAN:
 		ice_remove_mac_vlan(hw, &remove_list_head);
@@ -7183,7 +7183,7 @@  ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
 	/* Allocate the recipe resources, and configure them according to the
 	 * match fields from protocol headers and extracted field vectors.
 	 */
-	chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+	chain_idx = (u8)ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
 	LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
 		u8 i;
 
@@ -7376,7 +7376,7 @@  ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
 		is_root = (rm->root_rid == entry->rid);
 		recp->is_root = is_root;
 
-		recp->root_rid = entry->rid;
+		recp->root_rid = (u8)entry->rid;
 		recp->big_recp = (is_root && rm->n_grp_count > 1);
 
 		ice_memcpy(&recp->ext_words, entry->r_group.pairs,