@@ -4481,7 +4481,7 @@ ring_rxd_display_dword(union igb_ring_dword dword)
static void
ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
portid_t port_id,
#else
__rte_unused portid_t port_id,
@@ -4490,7 +4490,7 @@ ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
{
struct igb_ring_desc_16_bytes *ring =
(struct igb_ring_desc_16_bytes *)ring_mz->addr;
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
int ret;
struct rte_eth_dev_info dev_info;
@@ -137,7 +137,7 @@
/* i40e defines */
#define RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC 1
-// RTE_LIBRTE_I40E_16BYTE_RX_DESC is not set
+/* RTE_NET_INTEL_USE_16BYTE_DESC is not set */
#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF 64
#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM 4
@@ -961,7 +961,9 @@ Use 16 Bytes RX Descriptor Size
As i40e PMD supports both 16 and 32 bytes RX descriptor sizes, and 16 bytes size can provide helps to high performance of small packets.
In ``config/rte_config.h`` set the following to use 16 bytes size RX descriptors::
- #define RTE_LIBRTE_I40E_16BYTE_RX_DESC 1
+ #define RTE_NET_INTEL_USE_16BYTE_DESC 1
+
+Note however that setting this up will make it so that all PMD's supporting this definition will also use 16-byte descriptors.
Input set requirement of each pctype for FDIR
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -112,7 +112,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
rx_ctx.hbuff = 0;
rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
rx_ctx.qlen = rxq->nb_rx_desc;
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
rx_ctx.dsize = 1;
#endif
rx_ctx.dtype = i40e_header_split_none;
@@ -401,7 +401,7 @@ i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
rx_ctx.qlen = rxq->ring_len;
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
rx_ctx.dsize = 1;
#endif
@@ -125,7 +125,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
} else {
mb->vlan_tci = 0;
}
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
@@ -217,7 +217,7 @@ static inline uint64_t
i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
{
uint64_t flags = 0;
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
uint16_t flexbh, flexbl;
flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
@@ -2925,10 +2925,10 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
rxd->read.hdr_addr = 0;
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
rxd->read.rsvd1 = 0;
rxd->read.rsvd2 = 0;
-#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+#endif /* RTE_NET_INTEL_USE_16BYTE_DESC */
rxe[i].mbuf = mbuf;
}
@@ -3010,7 +3010,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
rx_ctx.qlen = rxq->nb_rx_desc;
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
rx_ctx.dsize = 1;
#endif
rx_ctx.dtype = rxq->hs_mode;
@@ -68,7 +68,7 @@ enum i40e_header_split_mode {
I40E_HEADER_SPLIT_SCTP)
/* HW desc structure, both 16-byte and 32-byte types are supported */
-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifdef RTE_NET_INTEL_USE_16BYTE_DESC
#define i40e_rx_desc i40e_16byte_rx_desc
#else
#define i40e_rx_desc i40e_32byte_rx_desc
@@ -41,7 +41,7 @@ i40e_rxq_rearm_common(struct i40e_rx_queue *rxq, __rte_unused bool avx512)
return;
}
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
struct rte_mbuf *mb0, *mb1;
__m128i dma_addr0, dma_addr1;
__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
@@ -21,7 +21,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
i40e_rxq_rearm_common(rxq, false);
}
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
/* Handles 32B descriptor FDIR ID processing:
* rxdp: receive descriptor ring, required to load 2nd 16B half of each desc
* rx_pkts: required to store metadata back to mbufs
@@ -99,7 +99,7 @@ desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp,
/* NOT REACHED, see above switch returns */
return _mm256_setzero_si256();
}
-#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+#endif /* RTE_NET_INTEL_USE_16BYTE_DESC */
#define PKTLEN_SHIFT 10
@@ -398,7 +398,7 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* not always performed. Branch over the code when not enabled.
*/
if (rxq->fdir_enabled) {
-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifdef RTE_NET_INTEL_USE_16BYTE_DESC
/* 16B descriptor code path:
* RSS and FDIR ID use the same offset in the desc, so
* only one can be present at a time. The code below
@@ -490,7 +490,7 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
fdir_add_flags = desc_fdir_processing_32b(rxdp, rx_pkts, i, 6);
mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_add_flags);
/* End 32B desc handling */
-#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+#endif /* RTE_NET_INTEL_USE_16BYTE_DESC */
} /* if() on FDIR enabled */
@@ -23,7 +23,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
i40e_rxq_rearm_common(rxq, true);
}
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
/* Handles 32B descriptor FDIR ID processing:
* rxdp: receive descriptor ring, required to load 2nd 16B half of each desc
* rx_pkts: required to store metadata back to mbufs
@@ -102,7 +102,7 @@ desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp,
/* NOT REACHED, see above switch returns */
return _mm256_setzero_si256();
}
-#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+#endif /* RTE_NET_INTEL_USE_16BYTE_DESC */
#define PKTLEN_SHIFT 10
@@ -419,7 +419,7 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* not always performed. Branch over the code when not enabled.
*/
if (rxq->fdir_enabled) {
-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifdef RTE_NET_INTEL_USE_16BYTE_DESC
/* 16B descriptor code path:
* RSS and FDIR ID use the same offset in the desc, so
* only one can be present at a time. The code below
@@ -539,7 +539,7 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
mbuf_flags =
_mm256_or_si256(mbuf_flags, fdir_add_flags);
/* End 32B desc handling */
-#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+#endif /* RTE_NET_INTEL_USE_16BYTE_DESC */
} /* if() on FDIR enabled */
@@ -77,7 +77,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
}
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
/* NEON version of FDIR mark extraction for 4 32B descriptors at a time */
static inline uint32x4_t
descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt)
@@ -284,7 +284,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
/* Extract FDIR ID only if FDIR is enabled to avoid useless work */
if (rxq->fdir_enabled) {
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
uint32x4_t v_fdir_ol_flags = descs_to_fdir_32b(rxdp, rx_pkts);
#else
(void)rxdp; /* rxdp not required for 16B desc mode */
@@ -86,7 +86,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
}
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
/* SSE version of FDIR mark extraction for 4 32B descriptors at a time */
static inline __m128i
descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt)
@@ -285,7 +285,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
/* Extract FDIR ID only if FDIR is enabled to avoid useless work */
if (rxq->fdir_enabled) {
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
__m128i v_fdir_ol_flags = descs_to_fdir_32b(rxdp, rx_pkts);
#else
(void)rxdp; /* rxdp not required for 16B desc mode */