@@ -1956,7 +1956,7 @@ int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
int i, array_len;
wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
- array_len = ARRAY_SIZE(mcr->querywq.wq_len);
+ array_len = RTE_DIM(mcr->querywq.wq_len);
for (i = 0; i < array_len; i++)
wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
}
@@ -2023,7 +2023,7 @@ int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
be32_to_cpu(cgrd->cgr.wr_parm_r.word);
cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
- for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
+ for (i = 0; i < RTE_DIM(cgrd->cscn_targ_swp); i++)
cgrd->cscn_targ_swp[i] =
be32_to_cpu(cgrd->cscn_targ_swp[i]);
return 0;
@@ -2049,7 +2049,7 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
return -EIO;
}
- for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
+ for (i = 0; i < RTE_DIM(congestion->state.state); i++)
congestion->state.state[i] =
be32_to_cpu(congestion->state.state[i]);
return 0;
@@ -67,11 +67,6 @@
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
-#ifdef ARRAY_SIZE
-#undef ARRAY_SIZE
-#endif
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-
/* Debugging */
#define prflush(fmt, args...) \
do { \
@@ -67,14 +67,6 @@
#endif
#endif /* pr_warn */
-/**
- * ARRAY_SIZE - returns the number of elements in an array
- * @x: array
- */
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
#ifndef ALIGN
#define ALIGN(x, a) (((x) + ((__typeof__(x))(a) - 1)) & \
~((__typeof__(x))(a) - 1))
@@ -132,12 +132,12 @@ rta_jump(struct program *program, uint64_t address,
/* write test condition field */
if ((jump_type != LOCAL_JUMP_INC) && (jump_type != LOCAL_JUMP_DEC)) {
__rta_map_flags(test_condition, jump_test_cond,
- ARRAY_SIZE(jump_test_cond), &opcode);
+ RTE_DIM(jump_test_cond), &opcode);
} else {
uint32_t val = 0;
ret = __rta_map_opcode(src_dst, jump_src_dst,
- ARRAY_SIZE(jump_src_dst), &val);
+ RTE_DIM(jump_src_dst), &val);
if (ret < 0) {
pr_err("JUMP_INCDEC: SRC_DST not supported. SEC PC: %d; Instr: %d\n",
program->current_pc,
@@ -147,7 +147,7 @@ rta_jump(struct program *program, uint64_t address,
opcode |= val;
__rta_map_flags(test_condition, jump_test_math_cond,
- ARRAY_SIZE(jump_test_math_cond), &opcode);
+ RTE_DIM(jump_test_math_cond), &opcode);
}
/* write local offset field for local jumps and user-defined halt */
@@ -118,7 +118,7 @@ rta_nfifo_load(struct program *program, uint32_t src,
opcode |= val;
/* write type field */
- ret = __rta_map_opcode(data, nfifo_data, ARRAY_SIZE(nfifo_data), &val);
+ ret = __rta_map_opcode(data, nfifo_data, RTE_DIM(nfifo_data), &val);
if (ret < 0) {
pr_err("NFIFO: Invalid data. SEC PC: %d; Instr: %d\n",
program->current_pc, program->current_instruction);
@@ -26,7 +26,6 @@
#define mdelay rte_delay_ms
#define udelay rte_delay_us
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define BIT(x) (1UL << (x))
#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
@@ -647,7 +647,7 @@ static int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
- ARRAY_SIZE(mac_addr));
+ RTE_DIM(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -827,7 +827,7 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
unsigned int hw_atl_utils_hw_get_reg_length(void)
{
- return ARRAY_SIZE(hw_atl_utils_hw_mac_regs);
+ return RTE_DIM(hw_atl_utils_hw_mac_regs);
}
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
@@ -221,7 +221,7 @@ int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
err = hw_atl_utils_fw_downld_dwords(self,
efuse_addr + (40U * 4U),
mac_addr,
- ARRAY_SIZE(mac_addr));
+ RTE_DIM(mac_addr));
if (err)
return err;
mac_addr[0] = rte_constant_bswap32(mac_addr[0]);
@@ -40,9 +40,6 @@
#include <rte_io.h>
#define BIT(nr) (1 << (nr))
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-#endif
#define AXGBE_HZ 250
@@ -640,7 +640,7 @@ static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
unsigned int i;
int ret;
- for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ for (i = 0; i < RTE_DIM(pdata->rss_table); i++) {
ret = axgbe_write_rss_reg(pdata,
AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
pdata->rss_table[i]);
@@ -11054,14 +11054,12 @@ static void bnx2x_tx_hw_flushed(struct bnx2x_softc *sc, uint32_t poll_count)
uint32_t i;
/* Verify the command queues are flushed P0, P1, P4 */
- for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
+ for (i = 0; i < RTE_DIM(cmd_regs); i++)
bnx2x_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
- }
/* Verify the transmission buffers are flushed P0, P1, P4 */
- for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
+ for (i = 0; i < RTE_DIM(buf_regs); i++)
bnx2x_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
- }
}
static void bnx2x_hw_enable_status(struct bnx2x_softc *sc)
@@ -91,12 +91,6 @@
#define PCIM_EXP_CTL_MAX_READ_REQUEST PCIEM_CTL_MAX_READ_REQUEST
#endif
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-#endif
-#ifndef ARRSIZE
-#define ARRSIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-#endif
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#endif
@@ -709,7 +709,7 @@ static inline void ecore_set_mcp_parity(struct bnx2x_softc *sc, uint8_t enable)
uint32_t i;
uint32_t reg_val;
- for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) {
+ for (i = 0; i < RTE_DIM(mcp_attn_ctl_regs); i++) {
reg_val = REG_RD(sc, mcp_attn_ctl_regs[i]);
if (enable)
@@ -735,7 +735,7 @@ static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
{
uint32_t i;
- for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ for (i = 0; i < RTE_DIM(ecore_blocks_parity_data); i++) {
uint32_t dis_mask = ecore_parity_reg_mask(sc, i);
if (dis_mask) {
@@ -769,7 +769,7 @@ static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
- for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ for (i = 0; i < RTE_DIM(ecore_blocks_parity_data); i++) {
uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
if (reg_mask) {
@@ -801,7 +801,7 @@ static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc)
{
uint32_t i;
- for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
+ for (i = 0; i < RTE_DIM(ecore_blocks_parity_data); i++) {
uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
if (reg_mask)
@@ -5231,7 +5231,7 @@ static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_st
rdata->dcb_version = tx_start_params->dcb_version;
rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
- for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
+ for (i = 0; i < RTE_DIM(rdata->traffic_type_to_priority_cos); i++)
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
@@ -4514,7 +4514,7 @@ static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3 << 6));
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ for (i = 0; i < RTE_DIM(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -4549,7 +4549,7 @@ static void elink_disable_kr2(struct elink_params *params,
};
ELINK_DEBUG_P0(sc, "Disabling 20G-KR2");
- for (i = 0; i < (int)ARRAY_SIZE(reg_set); i++)
+ for (i = 0; i < (int)RTE_DIM(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
@@ -4603,7 +4603,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
};
ELINK_DEBUG_P0(sc, "Enable Auto Negotiation for KR");
/* Set to default registers that may be overridden by 10G force */
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ for (i = 0; i < RTE_DIM(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -4757,7 +4757,7 @@ static void elink_warpcore_set_10G_KR(struct elink_phy *phy,
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
};
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ for (i = 0; i < RTE_DIM(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -5176,7 +5176,7 @@ static void elink_warpcore_clear_regs(struct elink_phy *phy,
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, (3 << 13));
- for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
+ for (i = 0; i < RTE_DIM(wc_regs); i++)
elink_cl45_write(sc, phy, wc_regs[i].devad, wc_regs[i].reg,
wc_regs[i].val);
@@ -10652,7 +10652,7 @@ static void elink_save_848xx_spirom_version(struct elink_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ for (i = 0; i < RTE_DIM(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -10723,7 +10723,7 @@ static void elink_848xx_set_led(struct bnx2x_softc *sc,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL, val);
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ for (i = 0; i < RTE_DIM(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -86,7 +86,6 @@ extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc);
#define ELINK_EVENT_ID_SFP_UNQUALIFIED_MODULE 1
#define ELINK_EVENT_ID_SFP_POWER_FAULT 2
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
/* Debug prints */
#ifdef ELINK_DEBUG
@@ -387,7 +387,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
*/
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
- if (delay_idx < ARRAY_SIZE(delay) - 1)
+ if (delay_idx < RTE_DIM(delay) - 1)
delay_idx++;
msleep(ms);
} else {
@@ -470,7 +470,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
- if (delay_idx < ARRAY_SIZE(delay) - 1)
+ if (delay_idx < RTE_DIM(delay) - 1)
delay_idx++;
msleep(ms);
} else {
@@ -1923,12 +1923,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
switch (chip_version) {
case CHELSIO_T5:
reg_ranges = t5_reg_ranges;
- reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+ reg_ranges_size = RTE_DIM(t5_reg_ranges);
break;
case CHELSIO_T6:
reg_ranges = t6_reg_ranges;
- reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ reg_ranges_size = RTE_DIM(t6_reg_ranges);
break;
default:
@@ -2653,7 +2653,7 @@ static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
int ret;
ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
- ARRAY_SIZE(exprom_header_buf),
+ RTE_DIM(exprom_header_buf),
exprom_header_buf, 0);
if (ret)
return ret;
@@ -3112,7 +3112,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"KR_SFP28",
};
- if (port_type < ARRAY_SIZE(port_type_description))
+ if (port_type < RTE_DIM(port_type_description))
return port_type_description[port_type];
return "UNKNOWN";
}
@@ -4492,7 +4492,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc)
"Reserved",
};
- if (link_down_rc >= ARRAY_SIZE(reason))
+ if (link_down_rc >= RTE_DIM(reason))
return "Bad Reason Code";
return reason[link_down_rc];
@@ -4815,7 +4815,7 @@ int t4_get_flash_params(struct adapter *adapter)
/**
* Check to see if it's one of our non-standard supported Flash parts.
*/
- for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
+ for (part = 0; part < RTE_DIM(supported_flash); part++) {
if (supported_flash[part].vendor_and_model_id == flashid) {
adapter->params.sf_size =
supported_flash[part].size_mb;
@@ -143,7 +143,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
*/
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
- if (delay_idx < ARRAY_SIZE(delay) - 1)
+ if (delay_idx < RTE_DIM(delay) - 1)
delay_idx++;
msleep(ms);
} else {
@@ -186,7 +186,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter,
for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
- if (delay_idx < ARRAY_SIZE(delay) - 1)
+ if (delay_idx < RTE_DIM(delay) - 1)
delay_idx++;
msleep(ms);
} else {
@@ -144,8 +144,6 @@ typedef uint64_t dma_addr_t;
(type *)((char *)__mptr - offsetof(type, member)); })
#endif
-#define ARRAY_SIZE(arr) RTE_DIM(arr)
-
#define cpu_to_be16(o) rte_cpu_to_be_16(o)
#define cpu_to_be32(o) rte_cpu_to_be_32(o)
#define cpu_to_be64(o) rte_cpu_to_be_64(o)
@@ -891,13 +891,13 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
{
struct adapter *adap = ethdev2adap(flow->dev);
const struct rte_flow_item *i;
- char repeat[ARRAY_SIZE(parseitem)] = {0};
+ char repeat[RTE_DIM(parseitem)] = {0};
for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
struct chrte_fparse *idx;
int ret;
- if (i->type >= ARRAY_SIZE(parseitem))
+ if (i->type >= RTE_DIM(parseitem))
return rte_flow_error_set(e, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
i, "Item not supported");
@@ -208,7 +208,7 @@ static int closest_timer(const struct sge *s, int time)
unsigned int i, match = 0;
int delta, min_delta = INT_MAX;
- for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ for (i = 0; i < RTE_DIM(s->timer_val); i++) {
delta = time - s->timer_val[i];
if (delta < 0)
delta = -delta;
@@ -225,7 +225,7 @@ static int closest_thres(const struct sge *s, int thres)
unsigned int i, match = 0;
int delta, min_delta = INT_MAX;
- for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ for (i = 0; i < RTE_DIM(s->counter_val); i++) {
delta = thres - s->counter_val[i];
if (delta < 0)
delta = -delta;
@@ -547,7 +547,7 @@ void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
qidx += pi->n_rx_qsets;
}
- for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ for (i = 0; i < RTE_DIM(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
@@ -555,7 +555,7 @@ void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
r->fl.size = (r->usembufs ? 1024 : 72);
}
- for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+ for (i = 0; i < RTE_DIM(s->ethtxq); i++)
s->ethtxq[i].q.size = 1024;
init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
@@ -1325,7 +1325,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
- else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ else if (pi->mod_type < RTE_DIM(mod_str))
dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
mod_str[pi->mod_type]);
else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
@@ -2298,7 +2298,7 @@ void t4_free_sge_resources(struct adapter *adap)
}
/* clean up control Tx queues */
- for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ for (i = 0; i < RTE_DIM(adap->sge.ctrlq); i++) {
struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
if (cq->q.desc) {
@@ -54,8 +54,6 @@
#define ENA_HASH_KEY_SIZE 40
#define ETH_GSTRING_LEN 32
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
#define ENA_MIN_RING_DESC 128
enum ethtool_stringset {
@@ -119,9 +117,9 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(bad_req_id),
};
-#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
-#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
-#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
+#define ENA_STATS_ARRAY_GLOBAL RTE_DIM(ena_stats_global_strings)
+#define ENA_STATS_ARRAY_TX RTE_DIM(ena_stats_tx_strings)
+#define ENA_STATS_ARRAY_RX RTE_DIM(ena_stats_rx_strings)
#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
DEV_TX_OFFLOAD_UDP_CKSUM |\
@@ -417,11 +417,11 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
switch (vdev->proxy) {
case PROXY_BY_INDEX:
err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
- args, ARRAY_SIZE(args), wait);
+ args, RTE_DIM(args), wait);
break;
case PROXY_BY_BDF:
err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
- args, ARRAY_SIZE(args), wait);
+ args, RTE_DIM(args), wait);
break;
case PROXY_NONE:
default:
@@ -63,8 +63,6 @@
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
@@ -268,7 +268,7 @@ hns3_is_special_opcode(uint16_t opcode)
HNS3_OPC_QUERY_64_BIT_REG};
uint32_t i;
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
+ for (i = 0; i < RTE_DIM(spec_opcode); i++)
if (spec_opcode[i] == opcode)
return true;
@@ -570,8 +570,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
#define hns3_read_dev(a, reg) \
hns3_read_reg((a)->io_base, (reg))
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
do { \
act = (actions) + (index); \
@@ -982,37 +982,37 @@ hns3_parse_normal(const struct rte_flow_item *item,
case RTE_FLOW_ITEM_TYPE_ETH:
ret = hns3_parse_eth(item, rule, error);
step_mngr->items = L2_next_items;
- step_mngr->count = ARRAY_SIZE(L2_next_items);
+ step_mngr->count = RTE_DIM(L2_next_items);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = hns3_parse_vlan(item, rule, error);
step_mngr->items = L2_next_items;
- step_mngr->count = ARRAY_SIZE(L2_next_items);
+ step_mngr->count = RTE_DIM(L2_next_items);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = hns3_parse_ipv4(item, rule, error);
step_mngr->items = L3_next_items;
- step_mngr->count = ARRAY_SIZE(L3_next_items);
+ step_mngr->count = RTE_DIM(L3_next_items);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = hns3_parse_ipv6(item, rule, error);
step_mngr->items = L3_next_items;
- step_mngr->count = ARRAY_SIZE(L3_next_items);
+ step_mngr->count = RTE_DIM(L3_next_items);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = hns3_parse_tcp(item, rule, error);
step_mngr->items = L4_next_items;
- step_mngr->count = ARRAY_SIZE(L4_next_items);
+ step_mngr->count = RTE_DIM(L4_next_items);
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = hns3_parse_udp(item, rule, error);
step_mngr->items = L4_next_items;
- step_mngr->count = ARRAY_SIZE(L4_next_items);
+ step_mngr->count = RTE_DIM(L4_next_items);
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
ret = hns3_parse_sctp(item, rule, error);
step_mngr->items = L4_next_items;
- step_mngr->count = ARRAY_SIZE(L4_next_items);
+ step_mngr->count = RTE_DIM(L4_next_items);
break;
default:
return rte_flow_error_set(error, ENOTSUP,
@@ -1122,7 +1122,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
"fdir_conf.mode isn't perfect");
step_mngr.items = first_items;
- step_mngr.count = ARRAY_SIZE(first_items);
+ step_mngr.count = RTE_DIM(first_items);
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
continue;
@@ -1136,7 +1136,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
step_mngr.items = tunnel_next_items;
- step_mngr.count = ARRAY_SIZE(tunnel_next_items);
+ step_mngr.count = RTE_DIM(tunnel_next_items);
} else {
ret = hns3_parse_normal(item, rule, &step_mngr, error);
if (ret)
@@ -37,7 +37,7 @@ static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw,
int i;
orig_val = rd32(hw, reg);
- for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ for (i = 0; i < RTE_DIM(patterns); i++) {
pat = patterns[i];
wr32(hw, reg, (pat & mask));
val = rd32(hw, reg);
@@ -150,8 +150,6 @@ static inline uint32_t i40e_read_addr(volatile void *addr)
I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value))
#define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT)))
-#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
-
/* memory allocation tracking */
struct i40e_dma_mem {
void *va;
@@ -121,8 +121,6 @@ uint32_t iavf_read_addr(volatile void *addr)
#define wr32(a, reg, value) \
IAVF_PCI_REG_WRITE(IAVF_PCI_REG_ADDR((a), (reg)), (value))
-#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
-
#define iavf_debug(h, m, s, ...) \
do { \
if (((m) & (h)->debug_mask)) \
@@ -350,7 +350,7 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
},
};
-#define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt)
+#define ICE_FDIR_NUM_PKT RTE_DIM(ice_fdir_pkt)
/* Flow Direcotr (FD) filter program descriptor Context */
static const struct ice_ctx_ele ice_fd_fltr_desc_ctx_info[] = {
@@ -4219,7 +4219,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
{ ICE_PROT_SCTP_IL, 1, 2 }
};
-#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
+#define ICE_FD_SRC_DST_PAIR_COUNT RTE_DIM(ice_fd_pairs)
/**
* ice_update_fd_swap - set register appropriately for a FD FV extraction
@@ -794,7 +794,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
return ICE_SUCCESS;
if (params->prof->segs[seg].raws_cnt >
- ARRAY_SIZE(params->prof->segs[seg].raws))
+ RTE_DIM(params->prof->segs[seg].raws))
return ICE_ERR_MAX_LIMIT;
/* Offsets within the segment headers are not supported */
@@ -90,7 +90,6 @@ typedef uint64_t s64;
#define min(a, b) RTE_MIN(a, b)
#define max(a, b) RTE_MAX(a, b)
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
#define MAKEMASK(m, s) ((m) << (s))
@@ -4774,7 +4774,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
if (((u16 *)&rule->m_u)[j] &&
- (unsigned long)rule->type < ARRAY_SIZE(ice_prot_ext)) {
+ (unsigned long)rule->type < RTE_DIM(ice_prot_ext)) {
/* No more space to accommodate */
if (word >= ICE_MAX_CHAIN_WORDS)
return 0;
@@ -22,10 +22,6 @@
#define BIT_ULL(x) (1ULL << (x))
#endif
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#endif
-
#define NFP_ERRNO(err) (errno = (err), -1)
#define NFP_ERRNO_RET(err, ret) (errno = (err), (ret))
#define NFP_NOERR(errv) (errno)
@@ -573,7 +573,7 @@ nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed)
uint32_t xpbaddr;
size_t tgt;
- for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
+ for (tgt = 0; tgt < RTE_DIM(cpp->imb_cat_table); tgt++) {
/* Hardcoded XPB IMB Base, island 0 */
xpbaddr = 0x000a0000 + (tgt * 4);
err = nfp_xpb_readl(cpp, xpbaddr,
@@ -60,7 +60,7 @@ nfp_nsp_print_extended_error(uint32_t ret_val)
if (!ret_val)
return;
- for (i = 0; i < (int)ARRAY_SIZE(nsp_errors); i++)
+ for (i = 0; i < (int)RTE_DIM(nsp_errors); i++)
if (ret_val == (uint32_t)nsp_errors[i].code)
printf("err msg: %s\n", nsp_errors[i].msg);
}
@@ -138,7 +138,7 @@ nfp_eth_rate2speed(enum nfp_eth_rate rate)
{
int i;
- for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ for (i = 0; i < (int)RTE_DIM(nsp_eth_rate_tbl); i++)
if (nsp_eth_rate_tbl[i].rate == rate)
return nsp_eth_rate_tbl[i].speed;
@@ -150,7 +150,7 @@ nfp_eth_speed2rate(unsigned int speed)
{
int i;
- for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ for (i = 0; i < (int)RTE_DIM(nsp_eth_rate_tbl); i++)
if (nsp_eth_rate_tbl[i].speed == speed)
return nsp_eth_rate_tbl[i].rate;
@@ -1408,7 +1408,7 @@ ef10_ev_rxlabel_init(
#endif
_NOTE(ARGUNUSED(type))
- EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ EFSYS_ASSERT3U(label, <, RTE_DIM(eep->ee_rxq_state));
eersp = &eep->ee_rxq_state[label];
EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
@@ -1457,7 +1457,7 @@ ef10_ev_rxlabel_fini(
{
efx_evq_rxq_state_t *eersp;
- EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ EFSYS_ASSERT3U(label, <, RTE_DIM(eep->ee_rxq_state));
eersp = &eep->ee_rxq_state[label];
EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
@@ -1209,7 +1209,7 @@ ef10_filter_insert_multicast_list(
count = 0;
if (count + (brdcst ? 1 : 0) >
- EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
+ RTE_DIM(eftp->eft_mulcst_filter_indexes)) {
/* Too many MAC addresses */
rc = EINVAL;
goto fail1;
@@ -1378,8 +1378,8 @@ ef10_filter_insert_encap_filters(
uint32_t i;
efx_rc_t rc;
- EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(ef10_filter_encap_list) <=
- EFX_ARRAY_SIZE(table->eft_encap_filter_indexes));
+ EFX_STATIC_ASSERT(RTE_DIM(ef10_filter_encap_list) <=
+ RTE_DIM(table->eft_encap_filter_indexes));
/*
* On Medford, full-featured firmware can identify packets as being
@@ -1396,7 +1396,7 @@ ef10_filter_insert_encap_filters(
* may well, however, fail to insert on unprivileged functions.)
*/
table->eft_encap_filter_count = 0;
- for (i = 0; i < EFX_ARRAY_SIZE(ef10_filter_encap_list); i++) {
+ for (i = 0; i < RTE_DIM(ef10_filter_encap_list); i++) {
efx_filter_spec_t spec;
ef10_filter_encap_entry_t *encap_filter =
&ef10_filter_encap_list[i];
@@ -1448,7 +1448,7 @@ ef10_filter_remove_old(
ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
uint32_t i;
- for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ for (i = 0; i < RTE_DIM(table->eft_entry); i++) {
if (ef10_filter_entry_is_auto_old(table, i)) {
(void) ef10_filter_delete_internal(enp, i);
}
@@ -1708,7 +1708,7 @@ ef10_filter_reconfigure(
EFSYS_PROBE1(fail1, efx_rc_t, rc);
/* Clear auto old flags */
- for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ for (i = 0; i < RTE_DIM(table->eft_entry); i++) {
if (ef10_filter_entry_is_auto_old(table, i)) {
ef10_filter_set_entry_not_auto_old(table, i);
}
@@ -472,7 +472,7 @@ ef10_mac_stats_get_mask(
efx_rc_t rc;
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_common, EFX_ARRAY_SIZE(ef10_common))) != 0)
+ ef10_common, RTE_DIM(ef10_common))) != 0)
goto fail1;
if (epp->ep_phy_cap_mask & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
@@ -481,18 +481,18 @@ ef10_mac_stats_get_mask(
};
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_40g_extra, EFX_ARRAY_SIZE(ef10_40g_extra))) != 0)
+ ef10_40g_extra, RTE_DIM(ef10_40g_extra))) != 0)
goto fail2;
if (encp->enc_mac_stats_40g_tx_size_bins) {
if ((rc = efx_mac_stats_mask_add_ranges(maskp,
mask_size, ef10_tx_size_bins,
- EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ RTE_DIM(ef10_tx_size_bins))) != 0)
goto fail3;
}
} else {
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ ef10_tx_size_bins, RTE_DIM(ef10_tx_size_bins))) != 0)
goto fail4;
}
@@ -502,7 +502,7 @@ ef10_mac_stats_get_mask(
};
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_pm_and_rxdp, EFX_ARRAY_SIZE(ef10_pm_and_rxdp))) != 0)
+ ef10_pm_and_rxdp, RTE_DIM(ef10_pm_and_rxdp))) != 0)
goto fail5;
}
@@ -513,7 +513,7 @@ ef10_mac_stats_get_mask(
};
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_vadaptor, EFX_ARRAY_SIZE(ef10_vadaptor))) != 0)
+ ef10_vadaptor, RTE_DIM(ef10_vadaptor))) != 0)
goto fail6;
}
@@ -523,7 +523,7 @@ ef10_mac_stats_get_mask(
EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3 },
};
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_fec, EFX_ARRAY_SIZE(ef10_fec))) != 0)
+ ef10_fec, RTE_DIM(ef10_fec))) != 0)
goto fail7;
}
@@ -534,7 +534,7 @@ ef10_mac_stats_get_mask(
};
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_rxdp_sdt, EFX_ARRAY_SIZE(ef10_rxdp_sdt))) != 0)
+ ef10_rxdp_sdt, RTE_DIM(ef10_rxdp_sdt))) != 0)
goto fail8;
}
@@ -543,7 +543,7 @@ ef10_mac_stats_get_mask(
{ EFX_MAC_RXDP_HLB_IDLE, EFX_MAC_RXDP_HLB_TIMEOUT },
};
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- ef10_hlb, EFX_ARRAY_SIZE(ef10_hlb))) != 0)
+ ef10_hlb, RTE_DIM(ef10_hlb))) != 0)
goto fail9;
}
@@ -821,7 +821,7 @@ ef10_nic_alloc_piobufs(
unsigned int i;
EFSYS_ASSERT3U(max_piobuf_count, <=,
- EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+ RTE_DIM(enp->en_arch.ef10.ena_piobuf_handle));
enp->en_arch.ef10.ena_piobuf_count = 0;
@@ -1718,7 +1718,7 @@ ef10_external_port_mapping(
* Infer the internal port -> external number mapping from
* the possible port modes for this NIC.
*/
- for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ for (i = 0; i < RTE_DIM(__ef10_external_port_mappings); ++i) {
struct ef10_external_port_map_s *eepmp =
&__ef10_external_port_mappings[i];
if (eepmp->family != enp->en_family)
@@ -2357,17 +2357,17 @@ ef10_parttbl_get(
switch (enp->en_family) {
case EFX_FAMILY_HUNTINGTON:
*parttblp = hunt_parttbl;
- *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl);
+ *parttbl_rowsp = RTE_DIM(hunt_parttbl);
break;
case EFX_FAMILY_MEDFORD:
*parttblp = medford_parttbl;
- *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
+ *parttbl_rowsp = RTE_DIM(medford_parttbl);
break;
case EFX_FAMILY_MEDFORD2:
*parttblp = medford2_parttbl;
- *parttbl_rowsp = EFX_ARRAY_SIZE(medford2_parttbl);
+ *parttbl_rowsp = RTE_DIM(medford2_parttbl);
break;
default:
@@ -20,9 +20,6 @@ extern "C" {
#define EFX_STATIC_ASSERT(_cond) \
((void)sizeof (char[(_cond) ? 1 : -1]))
-#define EFX_ARRAY_SIZE(_array) \
- (sizeof (_array) / sizeof ((_array)[0]))
-
#define EFX_FIELD_OFFSET(_type, _field) \
((size_t)&(((_type *)0)->_field))
@@ -190,7 +190,7 @@ efx_loopback_type_name(
__in efx_nic_t *enp,
__in efx_loopback_type_t type)
{
- EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) ==
+ EFX_STATIC_ASSERT(RTE_DIM(__efx_loopback_type_name) ==
EFX_LOOPBACK_NTYPES);
_NOTE(ARGUNUSED(enp))
@@ -566,7 +566,7 @@ efx_rx_scale_mode_set(
unsigned int type_nflags;
rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags,
- EFX_ARRAY_SIZE(type_flags), &type_nflags);
+ RTE_DIM(type_flags), &type_nflags);
if (rc != 0)
goto fail2;
@@ -223,7 +223,7 @@ siena_mac_stats_get_mask(
_NOTE(ARGUNUSED(enp))
if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
- siena_stats, EFX_ARRAY_SIZE(siena_stats))) != 0)
+ siena_stats, RTE_DIM(siena_stats))) != 0)
goto fail1;
return (0);
@@ -743,10 +743,10 @@ siena_nic_register_test(
efx_rc_t rc;
/* Fill out the register mask entries */
- EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
- == EFX_ARRAY_SIZE(__siena_registers) * 4);
+ EFX_STATIC_ASSERT(RTE_DIM(__siena_register_masks)
+ == RTE_DIM(__siena_registers) * 4);
- nitems = EFX_ARRAY_SIZE(__siena_registers);
+ nitems = RTE_DIM(__siena_registers);
dwordp = __siena_register_masks;
for (count = 0; count < nitems; ++count) {
rsp = __siena_registers + count;
@@ -757,10 +757,10 @@ siena_nic_register_test(
}
/* Fill out the register table entries */
- EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
- == EFX_ARRAY_SIZE(__siena_tables) * 4);
+ EFX_STATIC_ASSERT(RTE_DIM(__siena_table_masks)
+ == RTE_DIM(__siena_tables) * 4);
- nitems = EFX_ARRAY_SIZE(__siena_tables);
+ nitems = RTE_DIM(__siena_tables);
dwordp = __siena_table_masks;
for (count = 0; count < nitems; ++count) {
rsp = __siena_tables + count;
@@ -771,21 +771,21 @@ siena_nic_register_test(
}
if ((rc = siena_nic_test_registers(enp, __siena_registers,
- EFX_ARRAY_SIZE(__siena_registers))) != 0)
+ RTE_DIM(__siena_registers))) != 0)
goto fail1;
if ((rc = siena_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BYTE_ALTERNATE,
- EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ RTE_DIM(__siena_tables))) != 0)
goto fail2;
if ((rc = siena_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BYTE_CHANGING,
- EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ RTE_DIM(__siena_tables))) != 0)
goto fail3;
if ((rc = siena_nic_test_tables(enp, __siena_tables,
- EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ EFX_PATTERN_BIT_SWEEP, RTE_DIM(__siena_tables))) != 0)
goto fail4;
return (0);
@@ -246,7 +246,7 @@ siena_nvram_type_to_partn(
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT(partnp != NULL);
- for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ for (i = 0; i < RTE_DIM(siena_parttbl); i++) {
siena_parttbl_entry_t *entry = &siena_parttbl[i];
if (entry->port == emip->emi_port && entry->nvtype == type) {
@@ -274,7 +274,7 @@ siena_nvram_test(
* Iterate over the list of supported partition types
* applicable to *this* port
*/
- for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ for (i = 0; i < RTE_DIM(siena_parttbl); i++) {
entry = &siena_parttbl[i];
if (entry->port != emip->emi_port ||
@@ -517,7 +517,7 @@ siena_nvram_partn_get_version(
* that have access to this partition.
*/
version[0] = version[1] = version[2] = version[3] = 0;
- for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ for (i = 0; i < RTE_DIM(siena_parttbl); i++) {
siena_mc_fw_version_t *verp;
unsigned int nitems;
uint16_t temp[4];
@@ -134,7 +134,7 @@ nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
dump_stdout = data ? 0 : 1;
- for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
+ for (i = 0; i < RTE_DIM(nicvf_reg_tbl); i++)
if (dump_stdout)
nicvf_log("%24s = 0x%" PRIx64 "\n",
nicvf_reg_tbl[i].name,
@@ -142,7 +142,7 @@ nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
else
*data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
- for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
+ for (i = 0; i < RTE_DIM(nicvf_multi_reg_tbl); i++)
if (dump_stdout)
nicvf_log("%24s = 0x%" PRIx64 "\n",
nicvf_multi_reg_tbl[i].name,
@@ -153,7 +153,7 @@ nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
nicvf_multi_reg_tbl[i].offset);
for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
- for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
+ for (i = 0; i < RTE_DIM(nicvf_qset_cq_reg_tbl); i++)
if (dump_stdout)
nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
nicvf_qset_cq_reg_tbl[i].name, q,
@@ -164,7 +164,7 @@ nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
nicvf_qset_cq_reg_tbl[i].offset, q);
for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
- for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
+ for (i = 0; i < RTE_DIM(nicvf_qset_rq_reg_tbl); i++)
if (dump_stdout)
nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
nicvf_qset_rq_reg_tbl[i].name, q,
@@ -175,7 +175,7 @@ nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
nicvf_qset_rq_reg_tbl[i].offset, q);
for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
- for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
+ for (i = 0; i < RTE_DIM(nicvf_qset_sq_reg_tbl); i++)
if (dump_stdout)
nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
nicvf_qset_sq_reg_tbl[i].name, q,
@@ -186,7 +186,7 @@ nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
nicvf_qset_sq_reg_tbl[i].offset, q);
for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
- for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
+ for (i = 0; i < RTE_DIM(nicvf_qset_rbdr_reg_tbl); i++)
if (dump_stdout)
nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
nicvf_qset_rbdr_reg_tbl[i].name, q,
@@ -203,15 +203,15 @@ nicvf_reg_get_count(void)
{
int nr_regs;
- nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
- nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
- nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
+ nr_regs = RTE_DIM(nicvf_reg_tbl);
+ nr_regs += RTE_DIM(nicvf_multi_reg_tbl);
+ nr_regs += RTE_DIM(nicvf_qset_cq_reg_tbl) *
MAX_CMP_QUEUES_PER_QS;
- nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
+ nr_regs += RTE_DIM(nicvf_qset_rq_reg_tbl) *
MAX_RCV_QUEUES_PER_QS;
- nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
+ nr_regs += RTE_DIM(nicvf_qset_sq_reg_tbl) *
MAX_SND_QUEUES_PER_QS;
- nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
+ nr_regs += RTE_DIM(nicvf_qset_rbdr_reg_tbl) *
MAX_RCV_BUF_DESC_RINGS_PER_QS;
return nr_regs;
@@ -470,7 +470,7 @@ nicvf_qsize_rbdr_roundup(uint32_t val)
RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
RBDR_QUEUE_SZ_512K};
- return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+ return nicvf_roundup_list(val, list, RTE_DIM(list));
}
int
@@ -579,7 +579,7 @@ nicvf_qsize_sq_roundup(uint32_t val)
SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
SND_QUEUE_SZ_64K};
- return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+ return nicvf_roundup_list(val, list, RTE_DIM(list));
}
int
@@ -684,7 +684,7 @@ nicvf_qsize_cq_roundup(uint32_t val)
CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
CMP_QUEUE_SZ_64K};
- return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+ return nicvf_roundup_list(val, list, RTE_DIM(list));
}
@@ -17,8 +17,6 @@
#define PCI_SUB_DEVICE_ID_CN81XX_NICVF 0xA234
#define PCI_SUB_DEVICE_ID_CN83XX_NICVF 0xA334
-#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
#define NICVF_GET_RX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
#define NICVF_GET_TX_STATS(reg) \
@@ -34,8 +34,6 @@
#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
#define opae_free(addr) rte_free(addr)
-#define ARRAY_SIZE(arr) RTE_DIM(arr)
-
#define min(a, b) RTE_MIN(a, b)
#define max(a, b) RTE_MAX(a, b)