@@ -823,9 +823,6 @@ void bnxt_set_mark_in_mbuf(struct bnxt *bp,
struct rte_mbuf *mbuf)
{
uint32_t cfa_code = 0;
- uint8_t meta_fmt = 0;
- uint16_t flags2 = 0;
- uint32_t meta = 0;
cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
if (!cfa_code)
@@ -834,25 +831,6 @@ void bnxt_set_mark_in_mbuf(struct bnxt *bp,
if (cfa_code && !bp->mark_table[cfa_code].valid)
return;
- flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
- meta = rte_le_to_cpu_32(rxcmp1->metadata);
- if (meta) {
- meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
-
- /* The flags field holds extra bits of info from [6:4]
- * which indicate if the flow is in TCAM or EM or EEM
- */
- meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
- BNXT_CFA_META_FMT_SHFT;
-
- /* meta_fmt == 4 => 'b100 => 'b10x => EM.
- * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
- * meta_fmt == 6 => 'b110 => 'b11x => EEM
- * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
- */
- meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
- }
-
mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
@@ -200,7 +200,6 @@ ulp_bs_push_msb(uint8_t *bs, uint16_t pos, uint8_t len, uint8_t *val)
{
int i;
int cnt = (len + 7) / 8;
- int tlen = len;
/* Handle any remainder bits */
int tmp = len % 8;
@@ -211,12 +210,10 @@ ulp_bs_push_msb(uint8_t *bs, uint16_t pos, uint8_t len, uint8_t *val)
ulp_bs_put_msb(bs, pos, tmp, val[0]);
pos += tmp;
- tlen -= tmp;
for (i = 1; i < cnt; i++) {
ulp_bs_put_msb(bs, pos, 8, val[i]);
pos += 8;
- tlen -= 8;
}
return len;
@@ -133,16 +133,12 @@ static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg,
int msg_len)
{
- u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE;
-
memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
- cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV;
memcpy(mgmt_cmd, header, sizeof(*header));
mgmt_cmd += sizeof(*header);
- cmd_buf_max -= sizeof(*header);
memcpy(mgmt_cmd, msg, msg_len);
}
@@ -615,7 +611,6 @@ static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
void *msg_body = header + sizeof(msg_header);
u8 *dest_msg;
u8 seq_id, seq_len;
- u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE;
u8 front_id;
u16 msg_id;
@@ -635,7 +630,6 @@ static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
}
dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN;
- msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN;
memcpy(dest_msg, msg_body, seq_len);
if (!HINIC_MSG_HEADER_GET(msg_header, LAST))
@@ -1050,7 +1050,6 @@ lio_update_read_index(struct lio_instr_queue *iq)
int
lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
{
- uint32_t tot_inst_processed = 0;
uint32_t inst_processed = 0;
int tx_done = 1;
@@ -1073,7 +1072,6 @@ lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
iq->stats.instr_processed += inst_processed;
}
- tot_inst_processed += inst_processed;
inst_processed = 0;
} while (1);
@@ -58,7 +58,7 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
off_t offset, nfp_offset;
uint32_t cpp_id, pos, len;
uint32_t tmpbuf[16];
- size_t count, curlen, totlen = 0;
+ size_t count, curlen;
int err = 0;
PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
@@ -135,7 +135,6 @@ nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
}
nfp_offset += pos;
- totlen += pos;
nfp_cpp_area_release(area);
nfp_cpp_area_free(area);
@@ -160,7 +159,7 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
off_t offset, nfp_offset;
uint32_t cpp_id, pos, len;
uint32_t tmpbuf[16];
- size_t count, curlen, totlen = 0;
+ size_t count, curlen;
int err = 0;
PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
@@ -236,7 +235,6 @@ nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
}
nfp_offset += pos;
- totlen += pos;
nfp_cpp_area_release(area);
nfp_cpp_area_free(area);
@@ -365,7 +365,7 @@ __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
const uint16_t flag)
{
uint16_t nb_segs, nb_desc = 0;
- uint16_t gaura_id, len = 0;
+ uint16_t gaura_id;
struct rte_mbuf *m_next = NULL, *m_tofree;
rte_iova_t iova;
uint16_t data_len;
@@ -425,7 +425,6 @@ __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
cmd_buf[nb_desc++] = iova;
nb_segs--;
- len += data_len;
tx_pkt = m_next;
} while (nb_segs);
@@ -73,7 +73,7 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
#endif
static enum _ecore_status_t
ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
- u8 *done, u32 resp_size)
+ u8 *done, __rte_unused u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
@@ -86,9 +86,6 @@ ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
/* output tlvs list */
ecore_dp_tlv_list(p_hwfn, p_req);
- /* need to add the END TLV to the message size */
- resp_size += sizeof(struct channel_list_end_tlv);
-
/* Send TLVs over HW channel */
OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
@@ -1291,6 +1291,9 @@ txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
/* Workround for ICR lost */
intr->flags |= TXGBE_FLAG_MAILBOX;
+ /* To avoid compiler warnings set eicr to used. */
+ RTE_SET_USED(eicr);
+
return 0;
}
@@ -340,6 +340,9 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
}
PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
+
+ /* To avoid compiler warnings when not in DEBUG mode set completed to used. */
+ RTE_SET_USED(completed);
}
uint16_t