@@ -6,10 +6,6 @@
#include "idpf_common_device.h"
#include "idpf_common_rxtx.h"
-#ifndef __INTEL_COMPILER
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif
-
#define IDPF_DESCS_PER_LOOP_AVX 8
#define PKTLEN_SHIFT 10
@@ -34,7 +30,7 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)
dma_addr0 = _mm_setzero_si128();
for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
rxp[i] = &rxq->fake_mbuf;
- _mm_store_si128((__m128i *)&rxdp[i].read,
+ _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i].read),
dma_addr0);
}
}
@@ -108,8 +104,8 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)
dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room);
/* flush desc with pa dma_addr */
- _mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3);
- _mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7);
+ _mm512_store_si512(RTE_CAST_PTR(__m512i *, &rxdp->read), dma_addr0_3);
+ _mm512_store_si512(RTE_CAST_PTR(__m512i *, &(rxdp + 4)->read), dma_addr4_7);
}
rxq->rxrearm_start += IDPF_RXQ_REARM_THRESH;
@@ -164,8 +160,8 @@ idpf_singleq_rearm(struct idpf_rx_queue *rxq)
dma_addr0 = _mm_setzero_si128();
for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
rxp[i] = &rxq->fake_mbuf;
- _mm_storeu_si128((__m128i *)&rxdp[i].read,
- dma_addr0);
+ _mm_storeu_si128(RTE_CAST_PTR
+ (__m128i *, &rxdp[i].read), dma_addr0);
}
}
rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
@@ -216,10 +212,10 @@ idpf_singleq_rearm(struct idpf_rx_queue *rxq)
iovas1);
const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
- _mm512_storeu_si512((void *)rxdp, desc0_1);
- _mm512_storeu_si512((void *)(rxdp + 2), desc2_3);
- _mm512_storeu_si512((void *)(rxdp + 4), desc4_5);
- _mm512_storeu_si512((void *)(rxdp + 6), desc6_7);
+ _mm512_storeu_si512(RTE_CAST_PTR(void *, rxdp), desc0_1);
+ _mm512_storeu_si512(RTE_CAST_PTR(void *, (rxdp + 2)), desc2_3);
+ _mm512_storeu_si512(RTE_CAST_PTR(void *, (rxdp + 4)), desc4_5);
+ _mm512_storeu_si512(RTE_CAST_PTR(void *, (rxdp + 6)), desc6_7);
rxp += IDPF_DESCS_PER_LOOP_AVX;
rxdp += IDPF_DESCS_PER_LOOP_AVX;
@@ -337,28 +333,28 @@ _idpf_singleq_recv_raw_pkts_avx512(struct idpf_rx_queue *rxq,
__m512i raw_desc0_3, raw_desc4_7;
const __m128i raw_desc7 =
- _mm_load_si128((void *)(rxdp + 7));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 7));
rte_compiler_barrier();
const __m128i raw_desc6 =
- _mm_load_si128((void *)(rxdp + 6));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 6));
rte_compiler_barrier();
const __m128i raw_desc5 =
- _mm_load_si128((void *)(rxdp + 5));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 5));
rte_compiler_barrier();
const __m128i raw_desc4 =
- _mm_load_si128((void *)(rxdp + 4));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 4));
rte_compiler_barrier();
const __m128i raw_desc3 =
- _mm_load_si128((void *)(rxdp + 3));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 3));
rte_compiler_barrier();
const __m128i raw_desc2 =
- _mm_load_si128((void *)(rxdp + 2));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 2));
rte_compiler_barrier();
const __m128i raw_desc1 =
- _mm_load_si128((void *)(rxdp + 1));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 1));
rte_compiler_barrier();
const __m128i raw_desc0 =
- _mm_load_si128((void *)(rxdp + 0));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 0));
raw_desc4_7 = _mm512_broadcast_i32x4(raw_desc4);
raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc5, 1);
@@ -560,7 +556,7 @@ idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
dma_addr0 = _mm_setzero_si128();
for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
rxp[i] = &rx_bufq->fake_mbuf;
- _mm_store_si128((__m128i *)&rxdp[i],
+ _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i]),
dma_addr0);
}
}
@@ -634,7 +630,7 @@ idpf_splitq_rearm(struct idpf_rx_queue *rx_bufq)
dma_addr0 = _mm_setzero_si128();
for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
rxp[i] = &rx_bufq->fake_mbuf;
- _mm_storeu_si128((__m128i *)&rxdp[i],
+ _mm_storeu_si128(RTE_CAST_PTR(__m128i *, &rxdp[i]),
dma_addr0);
}
}
@@ -798,28 +794,28 @@ _idpf_splitq_recv_raw_pkts_avx512(struct idpf_rx_queue *rxq,
__m512i raw_desc0_3, raw_desc4_7;
const __m128i raw_desc7 =
- _mm_load_si128((void *)(rxdp + 7));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 7));
rte_compiler_barrier();
const __m128i raw_desc6 =
- _mm_load_si128((void *)(rxdp + 6));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 6));
rte_compiler_barrier();
const __m128i raw_desc5 =
- _mm_load_si128((void *)(rxdp + 5));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 5));
rte_compiler_barrier();
const __m128i raw_desc4 =
- _mm_load_si128((void *)(rxdp + 4));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 4));
rte_compiler_barrier();
const __m128i raw_desc3 =
- _mm_load_si128((void *)(rxdp + 3));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 3));
rte_compiler_barrier();
const __m128i raw_desc2 =
- _mm_load_si128((void *)(rxdp + 2));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 2));
rte_compiler_barrier();
const __m128i raw_desc1 =
- _mm_load_si128((void *)(rxdp + 1));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 1));
rte_compiler_barrier();
const __m128i raw_desc0 =
- _mm_load_si128((void *)(rxdp + 0));
+ _mm_load_si128(RTE_CAST_PTR(const __m128i *, rxdp + 0));
raw_desc4_7 = _mm512_broadcast_i32x4(raw_desc4);
raw_desc4_7 = _mm512_inserti32x4(raw_desc4_7, raw_desc5, 1);
@@ -1131,7 +1127,7 @@ idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
__m128i descriptor = _mm_set_epi64x(high_qw,
pkt->buf_iova + pkt->data_off);
- _mm_storeu_si128((__m128i *)txdp, descriptor);
+ _mm_storeu_si128(RTE_CAST_PTR(__m128i *, txdp), descriptor);
}
#define IDPF_TX_LEN_MASK 0xAA
@@ -1178,7 +1174,7 @@ idpf_singleq_vtx(volatile struct idpf_base_tx_desc *txdp,
pkt[1]->buf_iova + pkt[1]->data_off,
hi_qw0,
pkt[0]->buf_iova + pkt[0]->data_off);
- _mm512_storeu_si512((void *)txdp, desc0_3);
+ _mm512_storeu_si512(RTE_CAST_PTR(void *, txdp), desc0_3);
}
/* do any last ones */
@@ -1435,7 +1431,7 @@ idpf_splitq_vtx1(volatile struct idpf_flex_tx_sched_desc *txdp,
__m128i descriptor = _mm_set_epi64x(high_qw,
pkt->buf_iova + pkt->data_off);
- _mm_storeu_si128((__m128i *)txdp, descriptor);
+ _mm_storeu_si128(RTE_CAST_PTR(__m128i *, txdp), descriptor);
}
static __rte_always_inline void
@@ -1480,7 +1476,7 @@ idpf_splitq_vtx(volatile struct idpf_flex_tx_sched_desc *txdp,
pkt[1]->buf_iova + pkt[1]->data_off,
hi_qw0,
pkt[0]->buf_iova + pkt[0]->data_off);
- _mm512_storeu_si512((void *)txdp, desc0_3);
+ _mm512_storeu_si512(RTE_CAST_PTR(void *, txdp), desc0_3);
}
/* do any last ones */
@@ -1521,11 +1517,11 @@ idpf_splitq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkt
if (nb_commit >= n) {
tx_backlog_entry_avx512(txep, tx_pkts, n);
- idpf_splitq_vtx((void *)txdp, tx_pkts, n - 1, cmd_dtype);
+ idpf_splitq_vtx(txdp, tx_pkts, n - 1, cmd_dtype);
tx_pkts += (n - 1);
txdp += (n - 1);
- idpf_splitq_vtx1((void *)txdp, *tx_pkts++, cmd_dtype);
+ idpf_splitq_vtx1(txdp, *tx_pkts++, cmd_dtype);
nb_commit = (uint16_t)(nb_commit - n);
@@ -1540,7 +1536,7 @@ idpf_splitq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkt
tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
- idpf_splitq_vtx((void *)txdp, tx_pkts, nb_commit, cmd_dtype);
+ idpf_splitq_vtx(txdp, tx_pkts, nb_commit, cmd_dtype);
tx_id = (uint16_t)(tx_id + nb_commit);
if (tx_id > txq->next_rs)