@@ -390,6 +390,8 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
return nb_pkts;
}
+static _RTE_ETH_TX_DEF(virtual_ethdev_tx_burst_success)
+
static uint16_t
virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
@@ -425,6 +427,7 @@ virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
return 0;
}
+static _RTE_ETH_TX_DEF(virtual_ethdev_tx_burst_fail)
void
virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
@@ -447,9 +450,11 @@ virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
dev_private = vrtl_eth_dev->data->dev_private;
if (success)
- vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
+ rte_eth_set_tx_burst(port_id,
+ _RTE_ETH_FUNC(virtual_ethdev_tx_burst_success));
else
- vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
+ rte_eth_set_tx_burst(port_id,
+ _RTE_ETH_FUNC(virtual_ethdev_tx_burst_fail));
dev_private->tx_burst_fail_count = 0;
}
@@ -605,7 +610,8 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
rte_eth_set_rx_burst(eth_dev->data->port_id,
_RTE_ETH_FUNC(virtual_ethdev_rx_burst_success));
- eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
+ rte_eth_set_tx_burst(eth_dev->data->port_id,
+ _RTE_ETH_FUNC(virtual_ethdev_tx_burst_success));
rte_eth_dev_probing_finish(eth_dev);
@@ -1438,7 +1438,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts));
- dev->tx_pkt_burst = i40e_xmit_pkts;
+ rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts));
dev->tx_pkt_prepare = i40e_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
@@ -1578,7 +1578,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
rte_eth_set_rx_burst(eth_dev->data->port_id,
_RTE_ETH_FUNC(i40e_recv_pkts));
- eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+ rte_eth_set_tx_burst(eth_dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_xmit_pkts));
/*
* For secondary processes, we don't initialise any further as primary
@@ -1067,7 +1067,7 @@ i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt)
return count;
}
-uint16_t
+static inline uint16_t
i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct i40e_tx_queue *txq;
@@ -1315,6 +1315,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
+_RTE_ETH_TX_DEF(i40e_xmit_pkts)
+
static __rte_always_inline int
i40e_tx_free_bufs(struct i40e_tx_queue *txq)
{
@@ -1509,6 +1511,8 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
+static _RTE_ETH_TX_DEF(i40e_xmit_pkts_simple)
+
static uint16_t
i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -1531,6 +1535,8 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+static _RTE_ETH_TX_DEF(i40e_xmit_pkts_vec)
+
/*********************************************************************
*
* TX simple prep functions
@@ -2608,7 +2614,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
void
i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
{
- struct rte_eth_dev *dev;
+ rte_eth_tx_burst_t tx_pkt_burst;
uint16_t i;
if (!txq || !txq->sw_ring) {
@@ -2616,14 +2622,14 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
return;
}
- dev = &rte_eth_devices[txq->port_id];
+ tx_pkt_burst = rte_eth_get_tx_burst(txq->port_id);
/**
* vPMD tx will not set sw_ring's mbuf to NULL after free,
* so need to free remains more carefully.
*/
#ifdef CC_AVX512_SUPPORT
- if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx512) {
+ if (tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx512)) {
struct i40e_vec_tx_entry *swr = (void *)txq->sw_ring;
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
@@ -2641,8 +2647,8 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
return;
}
#endif
- if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
- dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+ if (tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2) ||
+ tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec)) {
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
if (txq->tx_tail < i) {
for (; i < txq->nb_tx_desc; i++) {
@@ -3564,49 +3570,55 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
dev->data->port_id);
- dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx512;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(
+ i40e_xmit_pkts_vec_avx512));
#endif
} else {
PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
ad->tx_use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->tx_pkt_burst = ad->tx_use_avx2 ?
- i40e_xmit_pkts_vec_avx2 :
- i40e_xmit_pkts_vec;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ ad->tx_use_avx2 ?
+ _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2) :
+ _RTE_ETH_FUNC(i40e_xmit_pkts_vec));
}
#else /* RTE_ARCH_X86 */
PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
dev->data->port_id);
- dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_xmit_pkts_vec));
#endif /* RTE_ARCH_X86 */
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
- dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_xmit_pkts_simple));
}
dev->tx_pkt_prepare = i40e_simple_prep_pkts;
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
- dev->tx_pkt_burst = i40e_xmit_pkts;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_xmit_pkts));
dev->tx_pkt_prepare = i40e_prep_pkts;
}
}
static const struct {
- eth_tx_burst_t pkt_burst;
+ rte_eth_tx_burst_t pkt_burst;
const char *info;
} i40e_tx_burst_infos[] = {
- { i40e_xmit_pkts_simple, "Scalar Simple" },
- { i40e_xmit_pkts, "Scalar" },
+ { _RTE_ETH_FUNC(i40e_xmit_pkts_simple), "Scalar Simple" },
+ { _RTE_ETH_FUNC(i40e_xmit_pkts), "Scalar" },
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- { i40e_xmit_pkts_vec_avx512, "Vector AVX512" },
+ { _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx512), "Vector AVX512" },
#endif
- { i40e_xmit_pkts_vec_avx2, "Vector AVX2" },
- { i40e_xmit_pkts_vec, "Vector SSE" },
+ { _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2), "Vector AVX2" },
+ { _RTE_ETH_FUNC(i40e_xmit_pkts_vec), "Vector SSE" },
#elif defined(RTE_ARCH_ARM64)
- { i40e_xmit_pkts_vec, "Vector Neon" },
+ { _RTE_ETH_FUNC(i40e_xmit_pkts_vec), "Vector Neon" },
#elif defined(RTE_ARCH_PPC_64)
- { i40e_xmit_pkts_vec, "Vector AltiVec" },
+ { _RTE_ETH_FUNC(i40e_xmit_pkts_vec), "Vector AltiVec" },
#endif
};
@@ -3614,7 +3626,7 @@ int
i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
{
- eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ rte_eth_tx_burst_t pkt_burst = rte_eth_get_tx_burst(dev->data->port_id);
int ret = -EINVAL;
unsigned int i;
@@ -203,9 +203,7 @@ void i40e_dev_tx_queue_release(void *txq);
_RTE_ETH_RX_PROTO(i40e_recv_pkts);
_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts);
-uint16_t i40e_xmit_pkts(void *tx_queue,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(i40e_xmit_pkts);
uint16_t i40e_simple_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -236,8 +234,10 @@ int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+
uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
+
void i40e_set_rx_function(struct rte_eth_dev *dev);
void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq);
@@ -248,16 +248,14 @@ void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx2);
_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx2);
-uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(i40e_xmit_pkts_vec_avx2);
+
int i40e_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx512);
_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx512);
-uint16_t i40e_xmit_pkts_vec_avx512(void *tx_queue,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(i40e_xmit_pkts_vec_avx512);
/* For each value it means, datasheet of hardware can tell more details
*
@@ -824,7 +824,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-uint16_t
+static inline uint16_t
i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -845,3 +845,5 @@ i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+
+_RTE_ETH_TX_DEF(i40e_xmit_pkts_vec_avx2)
@@ -1120,7 +1120,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-uint16_t
+static inline uint16_t
i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1141,3 +1141,5 @@ i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+
+_RTE_ETH_TX_DEF(i40e_xmit_pkts_vec_avx512)
@@ -475,6 +475,8 @@ i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
return 0;
}
+static _RTE_ETH_TX_DEF(i40e_vf_representor_tx_burst)
+
int
i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
{
@@ -505,7 +507,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
*/
rte_eth_set_rx_burst(ethdev->data->port_id,
_RTE_ETH_FUNC(i40e_vf_representor_rx_burst));
- ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
+ rte_eth_set_tx_burst(ethdev->data->port_id,
+ _RTE_ETH_FUNC(i40e_vf_representor_tx_burst));
vf = &pf->vfs[representor->vf_id];
@@ -50,6 +50,8 @@ ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
return 0;
}
+static _RTE_ETH_TX_DEF(ice_dcf_xmit_pkts)
+
static int
ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
{
@@ -1043,7 +1045,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
rte_eth_set_rx_burst(eth_dev->data->port_id,
_RTE_ETH_FUNC(ice_dcf_recv_pkts));
- eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
+ rte_eth_set_tx_burst(eth_dev->data->port_id,
+ _RTE_ETH_FUNC(ice_dcf_xmit_pkts));
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@@ -28,6 +28,8 @@ ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
return 0;
}
+static _RTE_ETH_TX_DEF(ice_dcf_vf_repr_tx_burst)
+
static int
ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev)
{
@@ -417,7 +419,8 @@ ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param)
rte_eth_set_rx_burst(vf_rep_eth_dev->data->port_id,
_RTE_ETH_FUNC(ice_dcf_vf_repr_rx_burst));
- vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
+ rte_eth_set_tx_burst(vf_rep_eth_dev->data->port_id,
+ _RTE_ETH_FUNC(ice_dcf_vf_repr_tx_burst));
vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
vf_rep_eth_dev->data->representor_id = repr->vf_id;
@@ -1997,7 +1997,7 @@ ice_dev_init(struct rte_eth_dev *dev)
dev->rx_descriptor_status = ice_rx_descriptor_status;
dev->tx_descriptor_status = ice_tx_descriptor_status;
rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts));
- dev->tx_pkt_burst = ice_xmit_pkts;
+ rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts));
dev->tx_pkt_prepare = ice_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
@@ -2558,7 +2558,7 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
return count;
}
-uint16_t
+static inline uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct ice_tx_queue *txq;
@@ -2775,6 +2775,8 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
+_RTE_ETH_TX_DEF(ice_xmit_pkts)
+
static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue *txq)
{
@@ -3064,6 +3066,8 @@ ice_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
+static _RTE_ETH_TX_DEF(ice_xmit_pkts_simple)
+
void __rte_cold
ice_set_rx_function(struct rte_eth_dev *dev)
{
@@ -3433,14 +3437,15 @@ ice_set_tx_function(struct rte_eth_dev *dev)
PMD_DRV_LOG(NOTICE,
"Using AVX512 OFFLOAD Vector Tx (port %d).",
dev->data->port_id);
- dev->tx_pkt_burst =
- ice_xmit_pkts_vec_avx512_offload;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload));
dev->tx_pkt_prepare = ice_prep_pkts;
} else {
PMD_DRV_LOG(NOTICE,
"Using AVX512 Vector Tx (port %d).",
dev->data->port_id);
- dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512));
}
#endif
} else {
@@ -3448,16 +3453,17 @@ ice_set_tx_function(struct rte_eth_dev *dev)
PMD_DRV_LOG(NOTICE,
"Using AVX2 OFFLOAD Vector Tx (port %d).",
dev->data->port_id);
- dev->tx_pkt_burst =
- ice_xmit_pkts_vec_avx2_offload;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2_offload));
dev->tx_pkt_prepare = ice_prep_pkts;
} else {
PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
ad->tx_use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->tx_pkt_burst = ad->tx_use_avx2 ?
- ice_xmit_pkts_vec_avx2 :
- ice_xmit_pkts_vec;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ ad->tx_use_avx2 ?
+ _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2) :
+ _RTE_ETH_FUNC(ice_xmit_pkts_vec));
}
}
@@ -3467,28 +3473,31 @@ ice_set_tx_function(struct rte_eth_dev *dev)
if (ad->tx_simple_allowed) {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
- dev->tx_pkt_burst = ice_xmit_pkts_simple;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_xmit_pkts_simple));
dev->tx_pkt_prepare = NULL;
} else {
PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
- dev->tx_pkt_burst = ice_xmit_pkts;
+ rte_eth_set_tx_burst(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_xmit_pkts));
dev->tx_pkt_prepare = ice_prep_pkts;
}
}
static const struct {
- eth_tx_burst_t pkt_burst;
+ rte_eth_tx_burst_t pkt_burst;
const char *info;
} ice_tx_burst_infos[] = {
- { ice_xmit_pkts_simple, "Scalar Simple" },
- { ice_xmit_pkts, "Scalar" },
+ { _RTE_ETH_FUNC(ice_xmit_pkts_simple), "Scalar Simple" },
+ { _RTE_ETH_FUNC(ice_xmit_pkts), "Scalar" },
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
- { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
+ { _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512), "Vector AVX512" },
+ { _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload),
+ "Offload Vector AVX512" },
#endif
- { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
- { ice_xmit_pkts_vec, "Vector SSE" },
+ { _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2), "Vector AVX2" },
+ { _RTE_ETH_FUNC(ice_xmit_pkts_vec), "Vector SSE" },
#endif
};
@@ -3496,7 +3505,7 @@ int
ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
{
- eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ rte_eth_tx_burst_t pkt_burst = rte_eth_get_tx_burst(dev->data->port_id);
int ret = -EINVAL;
unsigned int i;
@@ -213,8 +213,7 @@ void ice_free_queues(struct rte_eth_dev *dev);
int ice_fdir_setup_tx_resources(struct ice_pf *pf);
int ice_fdir_setup_rx_resources(struct ice_pf *pf);
_RTE_ETH_RX_PROTO(ice_recv_pkts);
-uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts);
void ice_set_rx_function(struct rte_eth_dev *dev);
uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -245,29 +244,24 @@ int ice_txq_vec_setup(struct ice_tx_queue *txq);
_RTE_ETH_RX_PROTO(ice_recv_pkts_vec);
_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec);
-uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec);
_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2);
_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2_offload);
_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2);
_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2_offload);
-uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx2);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx2_offload);
_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512);
_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512_offload);
_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512);
_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512_offload);
-uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx512);
+_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx512_offload);
+
int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
@@ -985,16 +985,20 @@ ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-uint16_t
+static inline uint16_t
ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, false);
}
-uint16_t
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx2)
+
+static inline uint16_t
ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true);
}
+
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx2_offload)
@@ -1235,7 +1235,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-uint16_t
+static inline uint16_t
ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1257,7 +1257,9 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-uint16_t
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx512)
+
+static inline uint16_t
ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1279,3 +1281,5 @@ ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx512_offload)
@@ -195,10 +195,11 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
#ifdef CC_AVX512_SUPPORT
- struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
+ rte_eth_tx_burst_t tx_pkt_burst =
+ rte_eth_get_tx_burst(txq->vsi->adapter->pf.dev_data->port_id);
- if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
- dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
+ if (tx_pkt_burst == _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512) ||
+ tx_pkt_burst == _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload)) {
struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
if (txq->tx_tail < i) {
@@ -758,7 +758,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-uint16_t
+static inline uint16_t
ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -779,6 +779,8 @@ ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+_RTE_ETH_TX_DEF(ice_xmit_pkts_vec)
+
int __rte_cold
ice_rxq_vec_setup(struct ice_rx_queue *rxq)
{
@@ -1633,6 +1633,100 @@ rte_eth_rx_burst_t rte_eth_get_rx_burst(uint16_t port_id);
__rte_experimental
int rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf);
+/**
+ * @internal
+ * Helper routine for eth driver tx_burst API.
+ * Should be called as first thing on entrance to the PMD's rte_eth_tx_bulk
+ * implementation.
+ * Does necessary checks and post-processing - invokes TX callbacks if any,
+ * tracing, etc.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queues.
+ * @param tx_pkts
+ * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
+ * which contain the output packets.
+ * @param nb_pkts
+ * The pointer to the maximum number of packets to transmit.
+ *
+ * @return
+ * Pointer to device TX queue structure on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t *nb_pkts)
+{
+ uint16_t n;
+ struct rte_eth_dev *dev;
+
+ n = *nb_pkts;
+ dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
+
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+ return NULL;
+ }
+#endif
+
+#ifdef RTE_ETHDEV_RXTX_CALLBACKS
+ struct rte_eth_rxtx_callback *cb;
+
+ /* __ATOMIC_RELEASE memory order was used when the
+ * call back was inserted into the list.
+ * Since there is a clear dependency between loading
+ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * not required.
+ */
+ cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
+ __ATOMIC_RELAXED);
+
+ if (unlikely(cb != NULL)) {
+ do {
+ n = cb->fn.tx(port_id, queue_id, tx_pkts, n, cb->param);
+ cb = cb->next;
+ } while (cb != NULL);
+ }
+
+ *nb_pkts = n;
+#endif
+
+ rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, n);
+ return dev->data->tx_queues[queue_id];
+}
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD tx_burst functions.
+ */
+#define _RTE_ETH_TX_PROTO(fn) \
+ uint16_t _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD tx_burst functions.
+ */
+#define _RTE_ETH_TX_DEF(fn) \
+_RTE_ETH_TX_PROTO(fn) \
+{ \
+ void *txq = _rte_eth_tx_prolog(port_id, queue_id, tx_pkts, &nb_pkts); \
+ if (txq == NULL) \
+ return 0; \
+ return fn(txq, tx_pkts, nb_pkts); \
+}
+
+__rte_experimental
+rte_eth_tx_burst_t rte_eth_get_tx_burst(uint16_t port_id);
+
+__rte_experimental
+int rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf);
+
#ifdef __cplusplus
}
#endif
@@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
eth_dev->device = NULL;
eth_dev->process_private = NULL;
eth_dev->intr_handle = NULL;
- eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->rx_queue_count = NULL;
eth_dev->rx_descriptor_done = NULL;
@@ -6358,3 +6357,25 @@ rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf)
rte_eth_burst_api[port_id].rx_pkt_burst = rxf;
return 0;
}
+
+__rte_experimental
+rte_eth_tx_burst_t
+rte_eth_get_tx_burst(uint16_t port_id)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ return rte_eth_burst_api[port_id].tx_pkt_burst;
+}
+
+__rte_experimental
+int
+rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api))
+ return -EINVAL;
+
+ rte_eth_burst_api[port_id].tx_pkt_burst = txf;
+ return 0;
+}
@@ -5226,42 +5226,11 @@ static inline uint16_t
rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_ETHDEV_DEBUG_TX
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
-
- if (queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+ if (port_id >= RTE_MAX_ETHPORTS)
return 0;
- }
-#endif
-
-#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb;
- /* __ATOMIC_RELEASE memory order was used when the
- * call back was inserted into the list.
- * Since there is a clear dependency between loading
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
- * not required.
- */
- cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
- __ATOMIC_RELAXED);
-
- if (unlikely(cb != NULL)) {
- do {
- nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
- cb->param);
- cb = cb->next;
- } while (cb != NULL);
- }
-#endif
-
- rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts,
- nb_pkts);
- return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
+ return rte_eth_burst_api[port_id].tx_pkt_burst(port_id, queue_id,
+ tx_pkts, nb_pkts);
}
/**
@@ -115,7 +115,6 @@ struct rte_eth_rxtx_callback {
* process, while the actual configuration data for the device is shared.
*/
struct rte_eth_dev {
- eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
@@ -253,7 +253,9 @@ EXPERIMENTAL {
# added in 21.11
rte_eth_burst_api;
rte_eth_get_rx_burst;
+ rte_eth_get_tx_burst;
rte_eth_set_rx_burst;
+ rte_eth_set_tx_burst;
};
INTERNAL {