@@ -618,8 +618,8 @@ idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
}
uint16_t
-idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;
volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
@@ -850,8 +850,8 @@ idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,
}
uint16_t
-idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;
volatile struct idpf_flex_tx_sched_desc *txr;
@@ -1024,8 +1024,8 @@ idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold,
}
uint16_t
-idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
volatile union virtchnl2_rx_desc *rx_ring;
volatile union virtchnl2_rx_desc *rxdp;
@@ -1186,8 +1186,8 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
/* TX function */
uint16_t
-idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
volatile struct idpf_flex_tx_desc *txd;
volatile struct idpf_flex_tx_desc *txr;
@@ -1350,8 +1350,8 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* TX prep functions */
uint16_t
-idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+idpf_dp_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
int ret;
@@ -248,31 +248,31 @@ int idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);
__rte_internal
int idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);
__rte_internal
-uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+uint16_t idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
__rte_internal
-uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
__rte_internal
-uint16_t idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+uint16_t idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
__rte_internal
-uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
__rte_internal
-uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t idpf_dp_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
__rte_internal
int idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
__rte_internal
int idpf_qc_singleq_tx_vec_avx512_setup(struct idpf_tx_queue *txq);
__rte_internal
-uint16_t idpf_singleq_recv_pkts_avx512(void *rx_queue,
- struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+uint16_t idpf_dp_singleq_recv_pkts_avx512(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
__rte_internal
-uint16_t idpf_singleq_xmit_pkts_avx512(void *tx_queue,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t idpf_dp_singleq_xmit_pkts_avx512(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _IDPF_COMMON_RXTX_H_ */
@@ -533,8 +533,8 @@ _idpf_singleq_recv_raw_pkts_avx512(struct idpf_rx_queue *rxq,
* - nb_pkts < IDPF_DESCS_PER_LOOP, just return no packet
*/
uint16_t
-idpf_singleq_recv_pkts_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+idpf_dp_singleq_recv_pkts_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
return _idpf_singleq_recv_raw_pkts_avx512(rx_queue, rx_pkts, nb_pkts);
}
@@ -819,8 +819,8 @@ idpf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
}
uint16_t
-idpf_singleq_xmit_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+idpf_dp_singleq_xmit_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
return idpf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts);
}
@@ -4,6 +4,14 @@ INTERNAL {
idpf_adapter_deinit;
idpf_adapter_init;
+ idpf_dp_prep_pkts;
+ idpf_dp_singleq_recv_pkts;
+ idpf_dp_singleq_recv_pkts_avx512;
+ idpf_dp_singleq_xmit_pkts;
+ idpf_dp_singleq_xmit_pkts_avx512;
+ idpf_dp_splitq_recv_pkts;
+ idpf_dp_splitq_xmit_pkts;
+
idpf_qc_rx_thresh_check;
idpf_qc_rx_queue_release;
idpf_qc_rxq_mbufs_release;
@@ -31,13 +39,6 @@ INTERNAL {
idpf_vport_rss_config;
idpf_execute_vc_cmd;
- idpf_prep_pkts;
- idpf_singleq_recv_pkts;
- idpf_singleq_recv_pkts_avx512;
- idpf_singleq_xmit_pkts;
- idpf_singleq_xmit_pkts_avx512;
- idpf_splitq_recv_pkts;
- idpf_splitq_xmit_pkts;
idpf_vc_alloc_vectors;
idpf_vc_check_api_version;
idpf_vc_config_irq_map_unmap;
@@ -771,7 +771,7 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
#ifdef RTE_ARCH_X86
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- dev->rx_pkt_burst = idpf_splitq_recv_pkts;
+ dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -780,19 +780,19 @@ idpf_set_rx_function(struct rte_eth_dev *dev)
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
- dev->rx_pkt_burst = idpf_singleq_recv_pkts_avx512;
+ dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts_avx512;
return;
}
#endif /* CC_AVX512_SUPPORT */
}
- dev->rx_pkt_burst = idpf_singleq_recv_pkts;
+ dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
}
#else
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
- dev->rx_pkt_burst = idpf_splitq_recv_pkts;
+ dev->rx_pkt_burst = idpf_dp_splitq_recv_pkts;
else
- dev->rx_pkt_burst = idpf_singleq_recv_pkts;
+ dev->rx_pkt_burst = idpf_dp_singleq_recv_pkts;
#endif /* RTE_ARCH_X86 */
}
@@ -824,8 +824,8 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
#endif /* RTE_ARCH_X86 */
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- dev->tx_pkt_burst = idpf_splitq_xmit_pkts;
- dev->tx_pkt_prepare = idpf_prep_pkts;
+ dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
+ dev->tx_pkt_prepare = idpf_dp_prep_pkts;
} else {
#ifdef RTE_ARCH_X86
if (vport->tx_vec_allowed) {
@@ -837,14 +837,14 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
continue;
idpf_qc_singleq_tx_vec_avx512_setup(txq);
}
- dev->tx_pkt_burst = idpf_singleq_xmit_pkts_avx512;
- dev->tx_pkt_prepare = idpf_prep_pkts;
+ dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx512;
+ dev->tx_pkt_prepare = idpf_dp_prep_pkts;
return;
}
#endif /* CC_AVX512_SUPPORT */
}
#endif /* RTE_ARCH_X86 */
- dev->tx_pkt_burst = idpf_singleq_xmit_pkts;
- dev->tx_pkt_prepare = idpf_prep_pkts;
+ dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
+ dev->tx_pkt_prepare = idpf_dp_prep_pkts;
}
}