@@ -444,20 +444,20 @@ main(int argc, char **argv)
ctx[i]);
i++;
}
- /* fallthrough */
+ __rte_fallthrough;
case ST_INPUT_DATA:
rte_free(test_data->input_data);
- /* fallthrough */
+ __rte_fallthrough;
case ST_COMPDEV:
for (i = 0; i < nb_compressdevs &&
i < RTE_COMPRESS_MAX_DEVS; i++) {
rte_compressdev_stop(enabled_cdevs[i]);
rte_compressdev_close(enabled_cdevs[i]);
}
- /* fallthrough */
+ __rte_fallthrough;
case ST_TEST_DATA:
rte_free(test_data);
- /* fallthrough */
+ __rte_fallthrough;
case ST_CLEAR:
default:
i = rte_eal_cleanup();
@@ -11021,7 +11021,7 @@ cmd_flow_director_filter_parsed(void *parsed_result,
case RTE_ETH_FLOW_FRAG_IPV4:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
entry.input.flow.ip4_flow.proto = res->proto_value;
- /* fall-through */
+ __rte_fallthrough;
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
IPV4_ADDR_TO_UINT(res->ip_dst,
@@ -11054,7 +11054,7 @@ cmd_flow_director_filter_parsed(void *parsed_result,
case RTE_ETH_FLOW_FRAG_IPV6:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
entry.input.flow.ipv6_flow.proto = res->proto_value;
- /* fall-through */
+ __rte_fallthrough;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
IPV6_ADDR_TO_ARRAY(res->ip_dst,
@@ -3266,7 +3266,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
pr_err("Invalid era for selected algorithm\n");
return -ENOTSUP;
}
- /* fallthrough */
+ __rte_fallthrough;
case PDCP_CIPHER_TYPE_AES:
case PDCP_CIPHER_TYPE_SNOW:
case PDCP_CIPHER_TYPE_NULL:
@@ -3470,7 +3470,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
pr_err("Invalid era for selected algorithm\n");
return -ENOTSUP;
}
- /* fallthrough */
+ __rte_fallthrough;
case PDCP_CIPHER_TYPE_AES:
case PDCP_CIPHER_TYPE_SNOW:
case PDCP_CIPHER_TYPE_NULL:
@@ -2728,7 +2728,7 @@ static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
/* DEL command deletes all currently configured MACs */
case ECORE_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* fall-through */
+ __rte_fallthrough;
/* RESTORE command will restore the entire multicast configuration */
case ECORE_MCAST_CMD_RESTORE:
@@ -7255,7 +7255,7 @@ elink_status_t elink_set_led(struct elink_params *params,
*/
if (!vars->link_up)
break;
- /* fallthrough */
+ __rte_fallthrough;
case ELINK_LED_MODE_ON:
if (((params->phy[ELINK_EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) ||
@@ -13416,13 +13416,13 @@ static void elink_phy_def_cfg(struct elink_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fallthrough */
+ __rte_fallthrough;
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = ELINK_SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fallthrough */
+ __rte_fallthrough;
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = ELINK_SPEED_100;
break;
@@ -334,12 +334,12 @@ STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
msec_delay(50);
- /* fall-through */
+ __rte_fallthrough;
case e1000_pch2lan:
if (e1000_phy_is_accessible_pchlan(hw))
break;
- /* fall-through */
+ __rte_fallthrough;
case e1000_pchlan:
if ((hw->mac.type == e1000_pchlan) &&
(fwsm & E1000_ICH_FWSM_FW_VALID))
@@ -462,7 +462,7 @@ STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
return ret_val;
if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
break;
- /* fall-through */
+ __rte_fallthrough;
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
@@ -768,7 +768,7 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch2lan:
mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch2lan;
- /* fall-through */
+ __rte_fallthrough;
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
@@ -776,7 +776,7 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/* multicast address update for pch2 */
mac->ops.update_mc_addr_list =
e1000_update_mc_addr_list_pch2lan;
- /* fall-through */
+ __rte_fallthrough;
#endif
case e1000_pchlan:
#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
@@ -1678,7 +1678,7 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
ret_val = e1000_k1_workaround_lv(hw);
if (ret_val)
return ret_val;
- /* fall-thru */
+ __rte_fallthrough;
case e1000_pchlan:
if (hw->phy.type == e1000_phy_82578) {
ret_val = e1000_link_stall_workaround_hv(hw);
@@ -3401,7 +3401,7 @@ STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
return E1000_SUCCESS;
}
DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
- /* fall-thru */
+ __rte_fallthrough;
default:
/* set bank to 0 in case flash read fails */
*bank = 0;
@@ -1008,7 +1008,7 @@ STATIC s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
break;
case e1000_ms_auto:
phy_data &= ~CR_1000T_MS_ENABLE;
- /* fall-through */
+ __rte_fallthrough;
default:
break;
}
@@ -5075,7 +5075,7 @@ igb_start_timecounters(struct rte_eth_dev *dev)
case e1000_i354:
/* 32 LSB bits + 8 MSB bits = 40 bits */
mask = (1ULL << 40) - 1;
- /* fall-through */
+ __rte_fallthrough;
case e1000_i210:
case e1000_i211:
/*
@@ -5170,7 +5170,7 @@ igb_timesync_enable(struct rte_eth_dev *dev)
case e1000_i210:
case e1000_i211:
E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
- /* fall-through */
+ __rte_fallthrough;
case e1000_82576:
E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
@@ -284,12 +284,12 @@ fs_dev_remove(struct sub_device *sdev)
failsafe_rx_intr_uninstall_subdevice(sdev);
rte_eth_dev_stop(PORT_ID(sdev));
sdev->state = DEV_ACTIVE;
- /* fallthrough */
+ __rte_fallthrough;
case DEV_ACTIVE:
failsafe_eth_dev_unregister_callbacks(sdev);
rte_eth_dev_close(PORT_ID(sdev));
sdev->state = DEV_PROBED;
- /* fallthrough */
+ __rte_fallthrough;
case DEV_PROBED:
ret = rte_dev_remove(sdev->dev);
if (ret < 0) {
@@ -299,7 +299,7 @@ fs_dev_remove(struct sub_device *sdev)
rte_eth_dev_release_port(ETH(sdev));
}
sdev->state = DEV_PARSED;
- /* fallthrough */
+ __rte_fallthrough;
case DEV_PARSED:
case DEV_UNDEFINED:
sdev->state = DEV_UNDEFINED;
@@ -269,7 +269,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_100G_CAUI4:
if (ice_is_media_cage_present(pi))
return ICE_MEDIA_DA;
- /* fall-through */
+ __rte_fallthrough;
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_X:
@@ -290,7 +290,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
if (ice_is_media_cage_present(pi))
return ICE_MEDIA_DA;
- /* fall-through */
+ __rte_fallthrough;
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
}
@@ -2978,7 +2978,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
- /* fall-through */
+ __rte_fallthrough;
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
@@ -1931,7 +1931,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_ETHERTYPE_MAC:
daddr = f_info->l_data.ethertype_mac.mac_addr;
- /* fall-through */
+ __rte_fallthrough;
case ICE_SW_LKUP_ETHERTYPE:
off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
@@ -1942,7 +1942,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_PROMISC_VLAN:
vlan_id = f_info->l_data.mac_vlan.vlan_id;
- /* fall-through */
+ __rte_fallthrough;
case ICE_SW_LKUP_PROMISC:
daddr = f_info->l_data.mac_vlan.mac_addr;
break;
@@ -732,7 +732,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp4_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp4_flow.dst_port;
- /* fall-through */
+ __rte_fallthrough;
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
@@ -748,7 +748,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp6_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp6_flow.dst_port;
- /* fall-through */
+ __rte_fallthrough;
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
@@ -524,7 +524,7 @@ mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf,
dseg++;
data_len = sbuf->data_len;
data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
- /* fallthrough */
+ __rte_fallthrough;
case 3:
lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1))
@@ -540,7 +540,7 @@ mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf,
dseg++;
data_len = sbuf->data_len;
data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
- /* fallthrough */
+ __rte_fallthrough;
case 2:
lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1))
@@ -556,7 +556,7 @@ mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf,
dseg++;
data_len = sbuf->data_len;
data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
- /* fallthrough */
+ __rte_fallthrough;
case 1:
lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1))
@@ -572,7 +572,7 @@ mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf,
dseg++;
data_len = sbuf->data_len;
data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
- /* fallthrough */
+ __rte_fallthrough;
}
/* Wrap dseg if it points at the end of the queue. */
if ((volatile uint8_t *)dseg >= sq->eob)
@@ -812,7 +812,7 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
sbuf = sbuf->next;
dseg++;
nb_segs--;
- /* fallthrough */
+ __rte_fallthrough;
case 2:
lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
@@ -828,7 +828,7 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
sbuf = sbuf->next;
dseg++;
nb_segs--;
- /* fallthrough */
+ __rte_fallthrough;
case 1:
lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
@@ -847,7 +847,7 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
dseg++;
goto txbb_head_seg;
}
- /* fallthrough */
+ __rte_fallthrough;
case 0:
break;
}
@@ -1631,7 +1631,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
ecore_eth_cqe_completion(
&edev->hwfns[rxq->queue_id % edev->num_hwfns],
(struct eth_slow_path_rx_cqe *)cqe);
- /* fall-thru */
+ __rte_fallthrough;
default:
goto next_cqe;
}
@@ -2776,14 +2776,14 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev,
RTE_TM_ERROR_TYPE_NODE_WEIGHT,
NULL,
rte_strerror(EINVAL));
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_SUBPORT:
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_WEIGHT,
NULL,
rte_strerror(EINVAL));
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_PIPE:
#ifdef RTE_SCHED_SUBPORT_TC_OV
if (update_pipe_weight(dev, n, weight))
@@ -2800,16 +2800,16 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
#endif
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_TC:
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_WEIGHT,
NULL,
rte_strerror(EINVAL));
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_QUEUE:
- /* fall-through */
+ __rte_fallthrough;
default:
if (update_queue_weight(dev, n, weight))
return -rte_tm_error_set(error,
@@ -2988,7 +2988,7 @@ pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
NULL,
rte_strerror(EINVAL));
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_SUBPORT:
if (update_subport_rate(dev, n, sp))
return -rte_tm_error_set(error,
@@ -2997,7 +2997,7 @@ pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
return 0;
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_PIPE:
if (update_pipe_rate(dev, n, sp))
return -rte_tm_error_set(error,
@@ -3006,7 +3006,7 @@ pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
return 0;
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_TC:
if (update_tc_rate(dev, n, sp))
return -rte_tm_error_set(error,
@@ -3015,9 +3015,9 @@ pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
return 0;
- /* fall-through */
+ __rte_fallthrough;
case TM_NODE_LEVEL_QUEUE:
- /* fall-through */
+ __rte_fallthrough;
default:
return -rte_tm_error_set(error,
EINVAL,
@@ -220,12 +220,12 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
@@ -212,15 +212,15 @@ send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[],
case 0:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 3:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 2:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 1:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
@@ -242,15 +242,15 @@ send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[],
case 0:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 3:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 2:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 1:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
@@ -119,15 +119,15 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
case 3:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
- /* fall-through */
+ __rte_fallthrough;
}
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
@@ -127,12 +127,12 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
struct rte_ether_hdr *) + 1);
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 2:
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
struct rte_ether_hdr *) + 1);
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 1:
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
struct rte_ether_hdr *) + 1);
@@ -146,12 +146,12 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
portid);
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 2:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
portid);
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 1:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
portid);
@@ -104,11 +104,11 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
case 3:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
j++;
@@ -195,12 +195,12 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 2:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fallthrough */
+ __rte_fallthrough;
case 1:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
@@ -192,12 +192,12 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
process_packet(pkts_burst[j], dst_port + j);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
@@ -700,15 +700,15 @@ send_packetsx4(uint16_t port,
case 0:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
- /* fall-through */
+ __rte_fallthrough;
case 3:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
qconf->tx_mbufs[port].m_table[len + j] = m[j];
j++;
@@ -730,15 +730,15 @@ send_packetsx4(uint16_t port,
case 0:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
- /* fall-through */
+ __rte_fallthrough;
case 3:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
qconf->tx_mbufs[port].m_table[j] = m[n + j];
j++;
@@ -1815,12 +1815,12 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
process_packet(pkts_burst[j], dst_port + j, portid);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
process_packet(pkts_burst[j], dst_port + j, portid);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
j++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
process_packet(pkts_burst[j], dst_port + j, portid);
GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
@@ -196,7 +196,7 @@ cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
case HEX:
st = HEX_OK;
- /* fall-through */
+ __rte_fallthrough;
case HEX_OK:
if (c >= '0' && c <= '9') {
if (add_to_res(c - '0', &res1, 16) < 0)
@@ -228,7 +228,7 @@ cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
case BIN:
st = BIN_OK;
- /* fall-through */
+ __rte_fallthrough;
case BIN_OK:
if (c >= '0' && c <= '1') {
if (add_to_res(c - '0', &res1, 2) < 0)
@@ -206,17 +206,17 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
rte_mov64((uint8_t *)dst, (const uint8_t *)src);
n -= 64;
dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
+ src = (const uint8_t *)src + 64; __rte_fallthrough;
case 0x01:
rte_mov64((uint8_t *)dst, (const uint8_t *)src);
n -= 64;
dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
+ src = (const uint8_t *)src + 64; __rte_fallthrough;
case 0x02:
rte_mov64((uint8_t *)dst, (const uint8_t *)src);
n -= 64;
dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
+ src = (const uint8_t *)src + 64; __rte_fallthrough;
default:
break;
}
@@ -230,17 +230,17 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
n -= 16;
dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
+ src = (const uint8_t *)src + 16; __rte_fallthrough;
case 0x01:
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
n -= 16;
dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
+ src = (const uint8_t *)src + 16; __rte_fallthrough;
case 0x02:
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
n -= 16;
dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
+ src = (const uint8_t *)src + 16; __rte_fallthrough;
default:
break;
}
@@ -146,17 +146,17 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
rte_mov64((uint8_t *)dst, (const uint8_t *)src);
n -= 64;
dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
+ src = (const uint8_t *)src + 64; __rte_fallthrough;
case 0x01:
rte_mov64((uint8_t *)dst, (const uint8_t *)src);
n -= 64;
dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
+ src = (const uint8_t *)src + 64; __rte_fallthrough;
case 0x02:
rte_mov64((uint8_t *)dst, (const uint8_t *)src);
n -= 64;
dst = (uint8_t *)dst + 64;
- src = (const uint8_t *)src + 64; /* fallthrough */
+ src = (const uint8_t *)src + 64; __rte_fallthrough;
default:
;
}
@@ -170,17 +170,17 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
n -= 16;
dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
+ src = (const uint8_t *)src + 16; __rte_fallthrough;
case 0x01:
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
n -= 16;
dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
+ src = (const uint8_t *)src + 16; __rte_fallthrough;
case 0x02:
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
n -= 16;
dst = (uint8_t *)dst + 16;
- src = (const uint8_t *)src + 16; /* fallthrough */
+ src = (const uint8_t *)src + 16; __rte_fallthrough;
default:
;
}
@@ -83,6 +83,11 @@ typedef uint16_t unaligned_uint16_t;
*/
#define __rte_unused __attribute__((__unused__))
+/**
+ * short definition for a switch/case intentional fallthrough
+ */
+#define __rte_fallthrough __attribute__ ((fallthrough))
+
/**
* definition to mark a variable or function parameter as used so
* as to avoid a compiler warning
@@ -741,9 +746,9 @@ rte_str_to_size(const char *str)
endptr++; /* allow 1 space gap */
switch (*endptr){
- case 'G': case 'g': size *= 1024; /* fall-through */
- case 'M': case 'm': size *= 1024; /* fall-through */
- case 'K': case 'k': size *= 1024; /* fall-through */
+ case 'G': case 'g': size *= 1024; __rte_fallthrough;
+ case 'M': case 'm': size *= 1024; __rte_fallthrough;
+ case 'K': case 'k': size *= 1024; __rte_fallthrough;
default:
break;
}
@@ -4886,7 +4886,7 @@ rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
state = 1;
pair->key = letter;
- /* fall-thru */
+ __rte_fallthrough;
case 1: /* Parsing key */
if (*letter == '=') {
@@ -77,10 +77,10 @@ rte_hash_crc_set_alg(uint8_t alg)
case CRC32_ARM64:
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
alg = CRC32_SW;
- /* fall-through */
+ __rte_fallthrough;
case CRC32_SW:
crc32_alg = alg;
- /* fall-through */
+ __rte_fallthrough;
default:
break;
}
@@ -106,10 +106,10 @@ copy_iv(uint64_t dst[IPSEC_MAX_IV_QWORD],
switch (len) {
case IPSEC_MAX_IV_SIZE:
dst[1] = src[1];
- /* fallthrough */
+ __rte_fallthrough;
case sizeof(uint64_t):
dst[0] = src[0];
- /* fallthrough */
+ __rte_fallthrough;
case 0:
break;
default:
@@ -840,22 +840,21 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
- /* fall-through */
+ __rte_fallthrough;
case 3:
MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
- /* fall-through */
+ __rte_fallthrough;
case 2:
MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
- /* fall-through */
+ __rte_fallthrough;
case 1:
MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
- /* fall-through */
}
}
return 0;
@@ -151,16 +151,16 @@ rte_net_crc_set_alg(enum rte_net_crc_alg alg)
handlers = handlers_sse42;
break;
#elif defined ARM64_NEON_PMULL
- /* fall-through */
+ __rte_fallthrough;
case RTE_NET_CRC_NEON:
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_PMULL)) {
handlers = handlers_neon;
break;
}
#endif
- /* fall-through */
+ __rte_fallthrough;
case RTE_NET_CRC_SCALAR:
- /* fall-through */
+ __rte_fallthrough;
default:
handlers = handlers_scalar;
break;
@@ -251,9 +251,9 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
switch (n & 0x3) { \
case 3: \
- ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ ring[idx++] = obj_table[i++]; __rte_fallthrough; \
case 2: \
- ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ ring[idx++] = obj_table[i++]; __rte_fallthrough; \
case 1: \
ring[idx++] = obj_table[i++]; \
} \
@@ -282,9 +282,9 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
switch (n & 0x3) { \
case 3: \
- obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ obj_table[i++] = ring[idx++]; __rte_fallthrough; \
case 2: \
- obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ obj_table[i++] = ring[idx++]; __rte_fallthrough; \
case 1: \
obj_table[i++] = ring[idx++]; \
} \
@@ -87,7 +87,7 @@ HOST_WERROR_FLAGS := $(WERROR_FLAGS)
ifeq ($(shell test $(HOST_GCC_VERSION) -gt 70 && echo 1), 1)
# Tell GCC only to error for switch fallthroughs without a suitable comment
-HOST_WERROR_FLAGS += -Wimplicit-fallthrough=2
+HOST_WERROR_FLAGS += -Wimplicit-fallthrough=5
# Ignore errors for snprintf truncation
HOST_WERROR_FLAGS += -Wno-format-truncation
endif