net/mlx5: avoid implicit conversion to 64 bits
Checks
Commit Message
When compiling with MSVC, errors like the one below pop up:
../drivers/crypto/mlx5/mlx5_crypto_xts.c(488): warning C4334:
'<<': result of 32-bit shift implicitly converted to 64 bits
(was 64-bit shift intended?)
Depending on the situation, the fix is to do a 64-bit shift, or
be explicit about the type conversion by adding a cast.
Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
---
drivers/crypto/mlx5/mlx5_crypto_xts.c | 4 ++--
drivers/net/mlx5/mlx5_devx.c | 2 +-
drivers/net/mlx5/mlx5_rx.c | 2 +-
drivers/net/mlx5/mlx5_rxq.c | 2 +-
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.c | 4 ++--
6 files changed, 8 insertions(+), 8 deletions(-)
@@ -485,7 +485,7 @@ mlx5_crypto_xts_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
alloc_size += (sizeof(struct rte_crypto_op *) +
sizeof(struct mlx5_devx_obj *)) *
- RTE_BIT32(log_nb_desc);
+ (size_t)RTE_BIT32(log_nb_desc);
qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
socket_id);
if (qp == NULL) {
@@ -529,7 +529,7 @@ mlx5_crypto_xts_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto error;
qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
RTE_CACHE_LINE_SIZE);
- qp->ops = (struct rte_crypto_op **)(qp->mkey + RTE_BIT32(log_nb_desc));
+ qp->ops = (struct rte_crypto_op **)(qp->mkey + (size_t)RTE_BIT32(log_nb_desc));
qp->entries_n = 1 << log_nb_desc;
if (mlx5_crypto_indirect_mkeys_prepare(priv, qp, &mkey_attr,
mlx5_crypto_gcm_mkey_klm_update)) {
@@ -1358,7 +1358,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
MLX5_ASSERT(hca_attr->hairpin_sq_wqe_bb_size > 0);
rte_memcpy(&host_mem_attr, &dev_mem_attr, sizeof(host_mem_attr));
umem_size = MLX5_WQE_SIZE *
- RTE_BIT32(host_mem_attr.wq_attr.log_hairpin_num_packets);
+ (size_t)RTE_BIT32(host_mem_attr.wq_attr.log_hairpin_num_packets);
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
@@ -383,7 +383,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
scat = &((volatile struct mlx5_wqe_mprq *)
rxq->wqes)[i].dseg;
addr = (uintptr_t)mlx5_mprq_buf_addr
- (buf, RTE_BIT32(rxq->log_strd_num));
+ (buf, (uintptr_t)RTE_BIT32(rxq->log_strd_num));
byte_count = RTE_BIT32(rxq->log_strd_sz) *
RTE_BIT32(rxq->log_strd_num);
lkey = mlx5_rx_addr2mr(rxq, addr);
@@ -1472,7 +1472,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
MLX5_ASSERT(log_strd_num && log_strd_sz);
buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
- RTE_BIT32(log_strd_num) *
+ (size_t)RTE_BIT32(log_strd_num) *
sizeof(struct rte_mbuf_ext_shared_info) +
RTE_PKTMBUF_HEADROOM;
/*
@@ -1186,7 +1186,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
fine_inline = rte_mbuf_dynflag_lookup
(RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
if (fine_inline >= 0)
- rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
+ rte_net_mlx5_dynf_inline_mask = RTE_BIT64(fine_inline);
else
rte_net_mlx5_dynf_inline_mask = 0;
if (dev->data->nb_rx_queues > 0) {
@@ -109,12 +109,12 @@ mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
(const void *)((uintptr_t)
txq->cqes),
sizeof(struct mlx5_error_cqe) *
- (1 << txq->cqe_n));
+ (size_t)RTE_BIT32(txq->cqe_n));
mlx5_dump_debug_information(name, "MLX5 Error SQ:",
(const void *)((uintptr_t)
txq->wqes),
MLX5_WQE_SIZE *
- (1 << txq->wqe_n));
+ (size_t)RTE_BIT32(txq->wqe_n));
txq_ctrl->dump_file_n++;
}
if (!seen)