@@ -1280,7 +1280,7 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
if (bp->ieee_1588)
goto use_scalar_rx;
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+#if defined(RTE_ARCH_X86)
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) {
PMD_DRV_LOG(INFO,
@@ -1332,7 +1332,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
BNXT_TRUFLOW_EN(bp) || bp->ieee_1588)
goto use_scalar_tx;
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+#if defined(RTE_ARCH_X86)
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) {
PMD_DRV_LOG(INFO,
@@ -3019,7 +3019,7 @@ static const struct {
#if defined(RTE_ARCH_X86)
{bnxt_recv_pkts_vec, "Vector SSE"},
#endif
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+#if defined(RTE_ARCH_X86)
{bnxt_recv_pkts_vec_avx2, "Vector AVX2"},
#endif
#if defined(RTE_ARCH_ARM64)
@@ -3053,7 +3053,7 @@ static const struct {
#if defined(RTE_ARCH_X86)
{bnxt_xmit_pkts_vec, "Vector SSE"},
#endif
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+#if defined(RTE_ARCH_X86)
{bnxt_xmit_pkts_vec_avx2, "Vector AVX2"},
#endif
#if defined(RTE_ARCH_ARM64)
@@ -112,7 +112,7 @@ uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
#endif
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+#if defined(RTE_ARCH_X86)
uint16_t bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
#endif
@@ -52,7 +52,7 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#endif
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+#if defined(RTE_ARCH_X86)
uint16_t bnxt_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#endif
@@ -53,7 +53,6 @@ subdir('hcapi/cfa')
if arch_subdir == 'x86'
sources += files('bnxt_rxtx_vec_sse.c')
- cflags += ['-DCC_AVX2_SUPPORT']
# build AVX2 code with instruction set explicitly enabled for runtime selection
bnxt_avx2_lib = static_library('bnxt_avx2_lib',
'bnxt_rxtx_vec_avx2.c',
@@ -3216,15 +3216,9 @@ get_avx_supported(bool request_avx512)
#endif
} else {
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
-#ifdef CC_AVX2_SUPPORT
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
return true;
-#else
- PMD_DRV_LOG(NOTICE,
- "AVX2 is not supported in build env");
- return false;
-#endif
}
return false;
@@ -3608,7 +3602,7 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev)
}
}
-#ifndef CC_AVX2_SUPPORT
+#ifndef RTE_ARCH_X86
uint16_t
i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
@@ -3632,4 +3626,4 @@ i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
{
return 0;
}
-#endif /* ifndef CC_AVX2_SUPPORT */
+#endif /* ifndef RTE_ARCH_X86 */
@@ -49,7 +49,6 @@ if arch_subdir == 'x86'
cflags += ['-fno-asynchronous-unwind-tables']
endif
- cflags += ['-DCC_AVX2_SUPPORT']
i40e_avx2_lib = static_library('i40e_avx2_lib',
'i40e_rxtx_vec_avx2.c',
dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash],
@@ -396,7 +396,7 @@ iavf_txd_enable_offload(__rte_unused struct rte_mbuf *tx_pkt,
*txd_hi |= ((uint64_t)td_cmd) << IAVF_TXD_QW1_CMD_SHIFT;
}
-#ifdef CC_AVX2_SUPPORT
+#ifdef RTE_ARCH_X86
static __rte_always_inline void
iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512)
{
@@ -29,7 +29,6 @@ if arch_subdir == 'x86'
cflags += ['-fno-asynchronous-unwind-tables']
endif
- cflags += ['-DCC_AVX2_SUPPORT']
iavf_avx2_lib = static_library('iavf_avx2_lib',
'iavf_rxtx_vec_avx2.c',
dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash],
@@ -28,7 +28,6 @@ if arch_subdir == 'x86'
cflags += ['-fno-asynchronous-unwind-tables']
endif
- cflags += ['-DCC_AVX2_SUPPORT']
ice_avx2_lib = static_library('ice_avx2_lib',
'ice_rxtx_vec_avx2.c',
dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash],
@@ -14,7 +14,6 @@ headers = files('rte_acl.h', 'rte_acl_osdep.h')
if dpdk_conf.has('RTE_ARCH_X86')
sources += files('acl_run_sse.c')
- cflags += '-DCC_AVX2_SUPPORT'
avx2_tmplib = static_library('avx2_tmp',
'acl_run_avx2.c',
dependencies: static_rte_eal,
@@ -42,10 +42,9 @@ rte_acl_classify_avx512x32(__rte_unused const struct rte_acl_ctx *ctx,
}
#endif
-#ifndef CC_AVX2_SUPPORT
+#ifndef RTE_ARCH_X86
/*
- * If the compiler doesn't support AVX2 instructions,
- * then the dummy one would be used instead for AVX2 classify method.
+ * If ISA doesn't have AVX2 or SSE, provide dummy fallbacks
*/
int
rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx,
@@ -56,9 +55,6 @@ rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx,
{
return -ENOTSUP;
}
-#endif
-
-#ifndef RTE_ARCH_X86
int
rte_acl_classify_sse(__rte_unused const struct rte_acl_ctx *ctx,
__rte_unused const uint8_t **data,
@@ -182,7 +178,7 @@ acl_check_alg_x86(enum rte_acl_classify_alg alg)
}
if (alg == RTE_ACL_CLASSIFY_AVX2) {
-#ifdef CC_AVX2_SUPPORT
+#ifdef RTE_ARCH_X86
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return 0;