net/mana: prevent values overflow returned from RDMA layer
Checks
Commit Message
From: Long Li <longli@microsoft.com>
The device capabilities reported from RDMA layer are in int. Those values can
overflow with the data types defined in dev_info_get().
Fix this by doing a upper bound before returning those values.
Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
Cc: stable@dpdk.org
Signed-off-by: Long Li <longli@microsoft.com>
---
drivers/net/mana/mana.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
Comments
On 1/18/2024 6:05 PM, longli@linuxonhyperv.com wrote:
> From: Long Li <longli@microsoft.com>
>
> The device capabilities reported from RDMA layer are in int. Those values can
> overflow with the data types defined in dev_info_get().
>
> Fix this by doing a upper bound before returning those values.
>
> Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
> Cc: stable@dpdk.org
>
> Signed-off-by: Long Li <longli@microsoft.com>
>
Applied to dpdk-next-net/main, thanks.
'%lu' format specifiers replaced with 'PRIu64' for 'priv->max_mr_size'
while merging.
@@ -296,8 +296,8 @@ mana_dev_info_get(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = MANA_MAX_MTU + RTE_ETHER_HDR_LEN;
- dev_info->max_rx_queues = priv->max_rx_queues;
- dev_info->max_tx_queues = priv->max_tx_queues;
+ dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, UINT16_MAX);
+ dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, UINT16_MAX);
dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR;
dev_info->max_hash_mac_addrs = 0;
@@ -338,16 +338,20 @@ mana_dev_info_get(struct rte_eth_dev *dev,
/* Buffer limits */
dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
- dev_info->rx_desc_lim.nb_max = priv->max_rx_desc;
+ dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, UINT16_MAX);
dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
- dev_info->rx_desc_lim.nb_seg_max = priv->max_recv_sge;
- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge;
+ dev_info->rx_desc_lim.nb_seg_max =
+ RTE_MIN(priv->max_recv_sge, UINT16_MAX);
+ dev_info->rx_desc_lim.nb_mtu_seg_max =
+ RTE_MIN(priv->max_recv_sge, UINT16_MAX);
dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE;
- dev_info->tx_desc_lim.nb_max = priv->max_tx_desc;
+ dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, UINT16_MAX);
dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE;
- dev_info->tx_desc_lim.nb_seg_max = priv->max_send_sge;
- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge;
+ dev_info->tx_desc_lim.nb_seg_max =
+ RTE_MIN(priv->max_send_sge, UINT16_MAX);
+ dev_info->tx_desc_lim.nb_mtu_seg_max =
+ RTE_MIN(priv->max_send_sge, UINT16_MAX);
/* Speed */
dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
@@ -1385,9 +1389,9 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr,
priv->max_mr = dev_attr->orig_attr.max_mr;
priv->max_mr_size = dev_attr->orig_attr.max_mr_size;
- DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d",
+ DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d mr %lu",
name, priv->max_rx_queues, priv->max_rx_desc,
- priv->max_send_sge);
+ priv->max_send_sge, priv->max_mr_size);
rte_eth_copy_pci_info(eth_dev, pci_dev);