[v7,11/21] net/ice: remove use of VLAs

Message ID 1731376933-19275-12-git-send-email-andremue@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series remove use of VLAs for Windows |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Andre Muezerie Nov. 12, 2024, 2:02 a.m. UTC
From: Konstantin Ananyev <konstantin.ananyev@huawei.com>

../drivers/net/ice/ice_rxtx.c:1871:29: warning:
    variable length array used [-Wvla]

Here VLA is used as a temp array for mbufs that will be used as a split
RX data buffers.
As at any given time only one thread can do RX from particular queue,
at rx_queue_setup() we can allocate extra space for that array, and then
safely use it at RX fast-path.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 drivers/net/ice/ice_rxtx.c | 18 ++++++++++++------
 drivers/net/ice/ice_rxtx.h |  2 ++
 2 files changed, 14 insertions(+), 6 deletions(-)
  

Patch

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 024d97cb46..a52a759031 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1176,7 +1176,7 @@  ice_rx_queue_setup(struct rte_eth_dev *dev,
 	struct ice_vsi *vsi = pf->main_vsi;
 	struct ice_rx_queue *rxq;
 	const struct rte_memzone *rz;
-	uint32_t ring_size;
+	uint32_t ring_size, tlen;
 	uint16_t len;
 	int use_def_burst_func = 1;
 	uint64_t offloads;
@@ -1284,9 +1284,14 @@  ice_rx_queue_setup(struct rte_eth_dev *dev,
 	/* always reserve more for bulk alloc */
 	len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
 
+	/* allocate extra entries for SW split buffer */
+	tlen = ((rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0) ?
+		rxq->rx_free_thresh : 0;
+	tlen += len;
+
 	/* Allocate the software ring. */
 	rxq->sw_ring = rte_zmalloc_socket(NULL,
-					  sizeof(struct ice_rx_entry) * len,
+					  sizeof(struct ice_rx_entry) * tlen,
 					  RTE_CACHE_LINE_SIZE,
 					  socket_id);
 	if (!rxq->sw_ring) {
@@ -1295,6 +1300,8 @@  ice_rx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
+	rxq->sw_split_buf = (tlen == len) ? NULL : rxq->sw_ring + len;
+
 	ice_reset_rx_queue(rxq);
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
@@ -1873,7 +1880,6 @@  ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
 	uint64_t dma_addr;
 	int diag, diag_pay;
 	uint64_t pay_addr;
-	struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh];
 
 	/* Allocate buffers in bulk */
 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
@@ -1888,7 +1894,7 @@  ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
 
 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
 		diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp,
-				(void *)mbufs_pay, rxq->rx_free_thresh);
+				(void *)rxq->sw_split_buf, rxq->rx_free_thresh);
 		if (unlikely(diag_pay != 0)) {
 			PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk");
 			return -ENOMEM;
@@ -1913,8 +1919,8 @@  ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
 			rxdp[i].read.hdr_addr = 0;
 			rxdp[i].read.pkt_addr = dma_addr;
 		} else {
-			mb->next = mbufs_pay[i];
-			pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i]));
+			mb->next = rxq->sw_split_buf[i].mbuf;
+			pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb->next));
 			rxdp[i].read.hdr_addr = dma_addr;
 			rxdp[i].read.pkt_addr = pay_addr;
 		}
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f7276cfc9f..d0f0b6c1d2 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -139,6 +139,8 @@  struct ice_rx_queue {
 	uint32_t hw_time_high; /* high 32 bits of timestamp */
 	uint32_t hw_time_low; /* low 32 bits of timestamp */
 	uint64_t hw_time_update; /* SW time of HW record updating */
+	struct ice_rx_entry *sw_split_buf;
+	/* address of temp buffer for RX split mbufs */
 	struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG];
 	uint32_t rxseg_nb;
 	bool ts_enable; /* if rxq timestamp is enabled */