@@ -212,7 +212,7 @@ static inline bool
check_tx_vec_allow(struct ci_tx_queue *txq)
{
if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
- txq->tx_rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
+ txq->tx_rs_thresh >= IAVF_VPMD_TX_BURST &&
txq->tx_rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
return true;
@@ -23,11 +23,12 @@
#define IAVF_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128)
/* used for Vector PMD */
-#define IAVF_VPMD_RX_MAX_BURST 32
-#define IAVF_VPMD_TX_MAX_BURST 32
-#define IAVF_RXQ_REARM_THRESH 32
-#define IAVF_VPMD_DESCS_PER_LOOP 4
-#define IAVF_VPMD_TX_MAX_FREE_BUF 64
+#define IAVF_VPMD_RX_BURST 32
+#define IAVF_VPMD_TX_BURST 32
+#define IAVF_VPMD_RXQ_REARM_THRESH 32
+#define IAVF_VPMD_DESCS_PER_LOOP 4
+#define IAVF_VPMD_DESCS_PER_LOOP_WIDE 8
+#define IAVF_VPMD_TX_MAX_FREE_BUF 64
#define IAVF_TX_NO_VECTOR_FLAGS ( \
RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
@@ -20,8 +20,6 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
uint16_t nb_pkts, uint8_t *split_packet,
bool offload)
{
-#define IAVF_DESCS_PER_LOOP_AVX 8
-
/* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */
const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
@@ -34,13 +32,13 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
rte_prefetch0(rxdp);
- /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
+ /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP_WIDE */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP_WIDE);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > IAVF_VPMD_RXQ_REARM_THRESH)
iavf_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
@@ -178,8 +176,8 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
uint16_t i, received;
for (i = 0, received = 0; i < nb_pkts;
- i += IAVF_DESCS_PER_LOOP_AVX,
- rxdp += IAVF_DESCS_PER_LOOP_AVX) {
+ i += IAVF_VPMD_DESCS_PER_LOOP_WIDE,
+ rxdp += IAVF_VPMD_DESCS_PER_LOOP_WIDE) {
/* step 1, copy over 8 mbuf pointers to rx_pkts array */
_mm256_storeu_si256((void *)&rx_pkts[i],
_mm256_loadu_si256((void *)&sw_ring[i]));
@@ -217,7 +215,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
if (split_packet) {
int j;
- for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
+ for (j = 0; j < IAVF_VPMD_DESCS_PER_LOOP_WIDE; j++)
rte_mbuf_prefetch_part2(rx_pkts[i + j]);
}
@@ -436,7 +434,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
*(uint64_t *)split_packet =
_mm_cvtsi128_si64(split_bits);
- split_packet += IAVF_DESCS_PER_LOOP_AVX;
+ split_packet += IAVF_VPMD_DESCS_PER_LOOP_WIDE;
}
/* perform dd_check */
@@ -452,7 +450,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
(_mm_cvtsi128_si64
(_mm256_castsi256_si128(status0_7)));
received += burst;
- if (burst != IAVF_DESCS_PER_LOOP_AVX)
+ if (burst != IAVF_VPMD_DESCS_PER_LOOP_WIDE)
break;
}
@@ -492,8 +490,6 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
uint16_t nb_pkts, uint8_t *split_packet,
bool offload)
{
-#define IAVF_DESCS_PER_LOOP_AVX 8
-
struct iavf_adapter *adapter = rxq->vsi->adapter;
uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
const uint32_t *type_table = adapter->ptype_tbl;
@@ -506,13 +502,13 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
rte_prefetch0(rxdp);
- /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
+ /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP_WIDE */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP_WIDE);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > IAVF_VPMD_RXQ_REARM_THRESH)
iavf_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
@@ -720,8 +716,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
uint16_t i, received;
for (i = 0, received = 0; i < nb_pkts;
- i += IAVF_DESCS_PER_LOOP_AVX,
- rxdp += IAVF_DESCS_PER_LOOP_AVX) {
+ i += IAVF_VPMD_DESCS_PER_LOOP_WIDE,
+ rxdp += IAVF_VPMD_DESCS_PER_LOOP_WIDE) {
/* step 1, copy over 8 mbuf pointers to rx_pkts array */
_mm256_storeu_si256((void *)&rx_pkts[i],
_mm256_loadu_si256((void *)&sw_ring[i]));
@@ -777,7 +773,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
if (split_packet) {
int j;
- for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
+ for (j = 0; j < IAVF_VPMD_DESCS_PER_LOOP_WIDE; j++)
rte_mbuf_prefetch_part2(rx_pkts[i + j]);
}
@@ -1337,7 +1333,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
*(uint64_t *)split_packet =
_mm_cvtsi128_si64(split_bits);
- split_packet += IAVF_DESCS_PER_LOOP_AVX;
+ split_packet += IAVF_VPMD_DESCS_PER_LOOP_WIDE;
}
/* perform dd_check */
@@ -1398,7 +1394,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
}
- if (burst != IAVF_DESCS_PER_LOOP_AVX)
+ if (burst != IAVF_VPMD_DESCS_PER_LOOP_WIDE)
break;
}
@@ -1466,7 +1462,7 @@ iavf_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, bool offload)
{
struct iavf_rx_queue *rxq = rx_queue;
- uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ uint8_t split_flags[IAVF_VPMD_RX_BURST] = {0};
/* get some new buffers */
uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
@@ -1509,12 +1505,12 @@ iavf_recv_scattered_pkts_vec_avx2_common(void *rx_queue, struct rte_mbuf **rx_pk
{
uint16_t retval = 0;
- while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ while (nb_pkts > IAVF_VPMD_RX_BURST) {
uint16_t burst = iavf_recv_scattered_burst_vec_avx2(rx_queue,
- rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST, offload);
+ rx_pkts + retval, IAVF_VPMD_RX_BURST, offload);
retval += burst;
nb_pkts -= burst;
- if (burst < IAVF_VPMD_RX_MAX_BURST)
+ if (burst < IAVF_VPMD_RX_BURST)
return retval;
}
return retval + iavf_recv_scattered_burst_vec_avx2(rx_queue,
@@ -1555,7 +1551,7 @@ iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue,
uint16_t nb_pkts, bool offload)
{
struct iavf_rx_queue *rxq = rx_queue;
- uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ uint8_t split_flags[IAVF_VPMD_RX_BURST] = {0};
/* get some new buffers */
uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rxq,
@@ -1599,14 +1595,14 @@ iavf_recv_scattered_pkts_vec_avx2_flex_rxd_common(void *rx_queue,
{
uint16_t retval = 0;
- while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ while (nb_pkts > IAVF_VPMD_RX_BURST) {
uint16_t burst =
iavf_recv_scattered_burst_vec_avx2_flex_rxd
- (rx_queue, rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST,
+ (rx_queue, rx_pkts + retval, IAVF_VPMD_RX_BURST,
offload);
retval += burst;
nb_pkts -= burst;
- if (burst < IAVF_VPMD_RX_MAX_BURST)
+ if (burst < IAVF_VPMD_RX_BURST)
return retval;
}
return retval + iavf_recv_scattered_burst_vec_avx2_flex_rxd(rx_queue,
@@ -6,7 +6,6 @@
#include <rte_vect.h>
-#define IAVF_DESCS_PER_LOOP_AVX 8
#define PKTLEN_SHIFT 10
/******************************************************************************
@@ -51,13 +50,13 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
rte_prefetch0(rxdp);
- /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
+ /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP_WIDE */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP_WIDE);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > IAVF_VPMD_RXQ_REARM_THRESH)
iavf_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
@@ -148,8 +147,8 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
uint16_t i, received;
for (i = 0, received = 0; i < nb_pkts;
- i += IAVF_DESCS_PER_LOOP_AVX,
- rxdp += IAVF_DESCS_PER_LOOP_AVX) {
+ i += IAVF_VPMD_DESCS_PER_LOOP_WIDE,
+ rxdp += IAVF_VPMD_DESCS_PER_LOOP_WIDE) {
/* step 1, copy over 8 mbuf pointers to rx_pkts array */
_mm256_storeu_si256((void *)&rx_pkts[i],
_mm256_loadu_si256((void *)&sw_ring[i]));
@@ -196,7 +195,7 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
if (split_packet) {
int j;
- for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
+ for (j = 0; j < IAVF_VPMD_DESCS_PER_LOOP_WIDE; j++)
rte_mbuf_prefetch_part2(rx_pkts[i + j]);
}
@@ -527,7 +526,7 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
*(uint64_t *)split_packet =
_mm_cvtsi128_si64(split_bits);
- split_packet += IAVF_DESCS_PER_LOOP_AVX;
+ split_packet += IAVF_VPMD_DESCS_PER_LOOP_WIDE;
}
/* perform dd_check */
@@ -543,7 +542,7 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
(_mm_cvtsi128_si64
(_mm256_castsi256_si128(status0_7)));
received += burst;
- if (burst != IAVF_DESCS_PER_LOOP_AVX)
+ if (burst != IAVF_VPMD_DESCS_PER_LOOP_WIDE)
break;
}
@@ -598,13 +597,13 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
rte_prefetch0(rxdp);
- /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX);
+ /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP_WIDE */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP_WIDE);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > IAVF_VPMD_RXQ_REARM_THRESH)
iavf_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
@@ -712,8 +711,8 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
uint16_t i, received;
for (i = 0, received = 0; i < nb_pkts;
- i += IAVF_DESCS_PER_LOOP_AVX,
- rxdp += IAVF_DESCS_PER_LOOP_AVX) {
+ i += IAVF_VPMD_DESCS_PER_LOOP_WIDE,
+ rxdp += IAVF_VPMD_DESCS_PER_LOOP_WIDE) {
/* step 1, copy over 8 mbuf pointers to rx_pkts array */
_mm256_storeu_si256((void *)&rx_pkts[i],
_mm256_loadu_si256((void *)&sw_ring[i]));
@@ -761,7 +760,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
if (split_packet) {
int j;
- for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++)
+ for (j = 0; j < IAVF_VPMD_DESCS_PER_LOOP_WIDE; j++)
rte_mbuf_prefetch_part2(rx_pkts[i + j]);
}
@@ -1526,7 +1525,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
*(uint64_t *)split_packet =
_mm_cvtsi128_si64(split_bits);
- split_packet += IAVF_DESCS_PER_LOOP_AVX;
+ split_packet += IAVF_VPMD_DESCS_PER_LOOP_WIDE;
}
/* perform dd_check */
@@ -1589,7 +1588,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
}
#endif
- if (burst != IAVF_DESCS_PER_LOOP_AVX)
+ if (burst != IAVF_VPMD_DESCS_PER_LOOP_WIDE)
break;
}
@@ -1644,7 +1643,7 @@ iavf_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, bool offload)
{
struct iavf_rx_queue *rxq = rx_queue;
- uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ uint8_t split_flags[IAVF_VPMD_RX_BURST] = {0};
/* get some new buffers */
uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
@@ -1687,12 +1686,12 @@ iavf_recv_scattered_pkts_vec_avx512_cmn(void *rx_queue, struct rte_mbuf **rx_pkt
{
uint16_t retval = 0;
- while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ while (nb_pkts > IAVF_VPMD_RX_BURST) {
uint16_t burst = iavf_recv_scattered_burst_vec_avx512(rx_queue,
- rx_pkts + retval, IAVF_VPMD_RX_MAX_BURST, offload);
+ rx_pkts + retval, IAVF_VPMD_RX_BURST, offload);
retval += burst;
nb_pkts -= burst;
- if (burst < IAVF_VPMD_RX_MAX_BURST)
+ if (burst < IAVF_VPMD_RX_BURST)
return retval;
}
return retval + iavf_recv_scattered_burst_vec_avx512(rx_queue,
@@ -1720,7 +1719,7 @@ iavf_recv_scattered_burst_vec_avx512_flex_rxd(void *rx_queue,
bool offload)
{
struct iavf_rx_queue *rxq = rx_queue;
- uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ uint8_t split_flags[IAVF_VPMD_RX_BURST] = {0};
/* get some new buffers */
uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx512_flex_rxd(rxq,
@@ -1765,14 +1764,14 @@ iavf_recv_scattered_pkts_vec_avx512_flex_rxd_cmn(void *rx_queue,
{
uint16_t retval = 0;
- while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ while (nb_pkts > IAVF_VPMD_RX_BURST) {
uint16_t burst =
iavf_recv_scattered_burst_vec_avx512_flex_rxd
(rx_queue, rx_pkts + retval,
- IAVF_VPMD_RX_MAX_BURST, offload);
+ IAVF_VPMD_RX_BURST, offload);
retval += burst;
nb_pkts -= burst;
- if (burst < IAVF_VPMD_RX_MAX_BURST)
+ if (burst < IAVF_VPMD_RX_BURST)
return retval;
}
return retval + iavf_recv_scattered_burst_vec_avx512_flex_rxd(rx_queue,
@@ -59,7 +59,7 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
if (!rte_is_power_of_2(rxq->nb_rx_desc))
return -1;
- if (rxq->rx_free_thresh < IAVF_VPMD_RX_MAX_BURST)
+ if (rxq->rx_free_thresh < IAVF_VPMD_RX_BURST)
return -1;
if (rxq->nb_rx_desc % rxq->rx_free_thresh)
@@ -80,7 +80,7 @@ iavf_tx_vec_queue_default(struct ci_tx_queue *txq)
if (!txq)
return -1;
- if (txq->tx_rs_thresh < IAVF_VPMD_TX_MAX_BURST ||
+ if (txq->tx_rs_thresh < IAVF_VPMD_TX_BURST ||
txq->tx_rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF)
return -1;
@@ -252,8 +252,8 @@ iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512)
/* Pull 'n' more MBUFs into the software ring */
if (rte_mempool_get_bulk(rxq->mp,
(void *)rxp,
- IAVF_RXQ_REARM_THRESH) < 0) {
- if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
+ IAVF_VPMD_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + IAVF_VPMD_RXQ_REARM_THRESH >=
rxq->nb_rx_desc) {
__m128i dma_addr0;
@@ -265,7 +265,7 @@ iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512)
}
}
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
- IAVF_RXQ_REARM_THRESH;
+ IAVF_VPMD_RXQ_REARM_THRESH;
return;
}
@@ -274,7 +274,7 @@ iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512)
__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
RTE_PKTMBUF_HEADROOM);
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
- for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxp += 2) {
+ for (i = 0; i < IAVF_VPMD_RXQ_REARM_THRESH; i += 2, rxp += 2) {
__m128i vaddr0, vaddr1;
mb0 = rxp[0];
@@ -299,11 +299,11 @@ iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512)
_mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp++->read), dma_addr1);
}
- rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
+ rxq->rxrearm_start += IAVF_VPMD_RXQ_REARM_THRESH;
if (rxq->rxrearm_start >= rxq->nb_rx_desc)
rxq->rxrearm_start = 0;
- rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
+ rxq->rxrearm_nb -= IAVF_VPMD_RXQ_REARM_THRESH;
rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
@@ -31,8 +31,8 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq)
/* Pull 'n' more MBUFs into the software ring */
if (unlikely(rte_mempool_get_bulk(rxq->mp,
(void *)rxep,
- IAVF_RXQ_REARM_THRESH) < 0)) {
- if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
+ IAVF_VPMD_RXQ_REARM_THRESH) < 0)) {
+ if (rxq->rxrearm_nb + IAVF_VPMD_RXQ_REARM_THRESH >=
rxq->nb_rx_desc) {
for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
rxep[i] = &rxq->fake_mbuf;
@@ -40,12 +40,12 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq)
}
}
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
- IAVF_RXQ_REARM_THRESH;
+ IAVF_VPMD_RXQ_REARM_THRESH;
return;
}
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
- for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ for (i = 0; i < IAVF_VPMD_RXQ_REARM_THRESH; i += 2, rxep += 2) {
mb0 = rxep[0];
mb1 = rxep[1];
@@ -60,11 +60,11 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq)
vst1q_u64(RTE_CAST_PTR(uint64_t *, &rxdp++->read), dma_addr1);
}
- rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
+ rxq->rxrearm_start += IAVF_VPMD_RXQ_REARM_THRESH;
if (rxq->rxrearm_start >= rxq->nb_rx_desc)
rxq->rxrearm_start = 0;
- rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
+ rxq->rxrearm_nb -= IAVF_VPMD_RXQ_REARM_THRESH;
rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
@@ -233,7 +233,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *__rte_restrict rxq,
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > IAVF_VPMD_RXQ_REARM_THRESH)
iavf_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
@@ -1150,7 +1150,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
/* Notice:
* - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * - nb_pkts > IAVF_VPMD_RX_BURST, only scan IAVF_VPMD_RX_BURST
* numbers of DD bits
*/
uint16_t
@@ -1162,7 +1162,7 @@ iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Notice:
* - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
+ * - nb_pkts > IAVF_VPMD_RX_BURST, only scan IAVF_VPMD_RX_BURST
* numbers of DD bits
*/
uint16_t
@@ -1183,7 +1183,7 @@ iavf_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct iavf_rx_queue *rxq = rx_queue;
- uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ uint8_t split_flags[IAVF_VPMD_RX_BURST] = {0};
unsigned int i = 0;
/* get some new buffers */
@@ -1222,15 +1222,15 @@ iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
{
uint16_t retval = 0;
- while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ while (nb_pkts > IAVF_VPMD_RX_BURST) {
uint16_t burst;
burst = iavf_recv_scattered_burst_vec(rx_queue,
rx_pkts + retval,
- IAVF_VPMD_RX_MAX_BURST);
+ IAVF_VPMD_RX_BURST);
retval += burst;
nb_pkts -= burst;
- if (burst < IAVF_VPMD_RX_MAX_BURST)
+ if (burst < IAVF_VPMD_RX_BURST)
return retval;
}
@@ -1252,7 +1252,7 @@ iavf_recv_scattered_burst_vec_flex_rxd(void *rx_queue,
uint16_t nb_pkts)
{
struct iavf_rx_queue *rxq = rx_queue;
- uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
+ uint8_t split_flags[IAVF_VPMD_RX_BURST] = {0};
unsigned int i = 0;
/* get some new buffers */
@@ -1292,15 +1292,15 @@ iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
{
uint16_t retval = 0;
- while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ while (nb_pkts > IAVF_VPMD_RX_BURST) {
uint16_t burst;
burst = iavf_recv_scattered_burst_vec_flex_rxd(rx_queue,
rx_pkts + retval,
- IAVF_VPMD_RX_MAX_BURST);
+ IAVF_VPMD_RX_BURST);
retval += burst;
nb_pkts -= burst;
- if (burst < IAVF_VPMD_RX_MAX_BURST)
+ if (burst < IAVF_VPMD_RX_BURST)
return retval;
}