@@ -1435,8 +1435,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_queue_count = i40e_dev_rx_queue_count;
dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
- dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
- dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
+ rte_eth_set_rx_desc_st(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_dev_rx_descriptor_status));
+ rte_eth_set_tx_desc_st(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_dev_tx_descriptor_status));
rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts));
rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts));
rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(i40e_prep_pkts));
@@ -1574,8 +1574,10 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &i40evf_eth_dev_ops;
eth_dev->rx_queue_count = i40e_dev_rx_queue_count;
eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
- eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
- eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
+ rte_eth_set_rx_desc_st(eth_dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_dev_rx_descriptor_status));
+ rte_eth_set_tx_desc_st(eth_dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_dev_tx_descriptor_status));
rte_eth_set_rx_burst(eth_dev->data->port_id,
_RTE_ETH_FUNC(i40e_recv_pkts));
rte_eth_set_tx_burst(eth_dev->data->port_id,
@@ -2189,7 +2189,7 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
return ret;
}
-int
+static int
i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
struct i40e_rx_queue *rxq = rx_queue;
@@ -2216,7 +2216,9 @@ i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
return RTE_ETH_RX_DESC_AVAIL;
}
-int
+_RTE_ETH_RX_DESC_DEF(i40e_dev_rx_descriptor_status)
+
+static int
i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
struct i40e_tx_queue *txq = tx_queue;
@@ -2247,6 +2249,8 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+_RTE_ETH_TX_DESC_DEF(i40e_dev_tx_descriptor_status)
+
static int
i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq)
@@ -223,8 +223,8 @@ void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
-int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+_RTE_ETH_RX_DESC_PROTO(i40e_dev_rx_descriptor_status);
+_RTE_ETH_TX_DESC_PROTO(i40e_dev_tx_descriptor_status);
_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec);
_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec);
@@ -1994,8 +1994,10 @@ ice_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &ice_eth_dev_ops;
dev->rx_queue_count = ice_rx_queue_count;
- dev->rx_descriptor_status = ice_rx_descriptor_status;
- dev->tx_descriptor_status = ice_tx_descriptor_status;
+ rte_eth_set_rx_desc_st(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_rx_descriptor_status));
+ rte_eth_set_tx_desc_st(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_tx_descriptor_status));
rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts));
rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts));
rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(ice_prep_pkts));
@@ -2021,7 +2021,7 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
-int
+static int
ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
volatile union ice_rx_flex_desc *rxdp;
@@ -2046,7 +2046,9 @@ ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
return RTE_ETH_RX_DESC_AVAIL;
}
-int
+_RTE_ETH_RX_DESC_DEF(ice_rx_descriptor_status)
+
+static int
ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
struct ice_tx_queue *txq = tx_queue;
@@ -2077,6 +2079,8 @@ ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+_RTE_ETH_TX_DESC_DEF(ice_tx_descriptor_status)
+
void
ice_free_queues(struct rte_eth_dev *dev)
{
@@ -228,8 +228,8 @@ int ice_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_burst_mode *mode);
int ice_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_burst_mode *mode);
-int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
-int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
+_RTE_ETH_RX_DESC_PROTO(ice_rx_descriptor_status);
+_RTE_ETH_TX_DESC_PROTO(ice_tx_descriptor_status);
void ice_set_default_ptype_table(struct rte_eth_dev *dev);
const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq,
@@ -1789,6 +1789,138 @@ rte_eth_tx_prep_t rte_eth_get_tx_prep(uint16_t port_id);
__rte_experimental
int rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t txf);
+/**
+ * @internal
+ * Helper routine for eth driver rx_descriptor_status API.
+ * Should be called as first thing on entrance to the PMD's
+ * rx_descriptor_status implementation.
+ * Does necessary checks and retrieves pointer to device RX queue.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue.
+ * @param rxq
+ * The output pointer to the RX queue structure.
+ *
+ * @return
+ * Zero on success or negative error code otherwise.
+ */
+__rte_internal
+static inline int
+_rte_eth_rx_desc_prolog(uint16_t port_id, uint16_t queue_id, void **rxq)
+{
+ struct rte_eth_dev *dev;
+
+#ifdef RTE_ETHDEV_DEBUG_RX
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+#endif
+ dev = &rte_eth_devices[port_id];
+#ifdef RTE_ETHDEV_DEBUG_RX
+ if (queue_id >= dev->data->nb_rx_queues)
+ return -ENODEV;
+#endif
+ *rxq = dev->data->rx_queues[queue_id];
+ return 0;
+}
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD rx_descriptor_status
+ * functions.
+ */
+#define _RTE_ETH_RX_DESC_PROTO(fn) \
+ int _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \
+ uint16_t offset)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD rx_descriptor_status
+ * functions.
+ */
+#define _RTE_ETH_RX_DESC_DEF(fn) \
+_RTE_ETH_RX_DESC_PROTO(fn) \
+{ \
+ int rc; \
+ void *rxq; \
+ rc = _rte_eth_rx_desc_prolog(port_id, queue_id, &rxq); \
+ if (rc != 0) \
+ return rc; \
+ return fn(rxq, offset); \
+}
+
+__rte_experimental
+rte_eth_rx_descriptor_status_t rte_eth_get_rx_desc_st(uint16_t port_id);
+
+__rte_experimental
+int rte_eth_set_rx_desc_st(uint16_t port_id, rte_eth_rx_descriptor_status_t rf);
+
+/**
+ * @internal
+ * Helper routine for eth driver tx_descriptor_status API.
+ * Should be called as first thing on entrance to the PMD's
+ * tx_descriptor_status implementation.
+ * Does necessary checks and retrieves pointer to device TX queue.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queue.
+ * @param txq
+ * The output pointer to the TX queue structure.
+ *
+ * @return
+ * Zero on success or negative error code otherwise.
+ */
+__rte_internal
+static inline int
+_rte_eth_tx_desc_prolog(uint16_t port_id, uint16_t queue_id, void **txq)
+{
+ struct rte_eth_dev *dev;
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+#endif
+ dev = &rte_eth_devices[port_id];
+#ifdef RTE_ETHDEV_DEBUG_TX
+ if (queue_id >= dev->data->nb_tx_queues)
+ return -ENODEV;
+#endif
+ *txq = dev->data->tx_queues[queue_id];
+ return 0;
+}
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD tx_descriptor_status
+ * functions.
+ */
+#define _RTE_ETH_TX_DESC_PROTO(fn) \
+ int _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \
+ uint16_t offset)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD tx_descriptor_status
+ * functions.
+ */
+#define _RTE_ETH_TX_DESC_DEF(fn) \
+_RTE_ETH_TX_DESC_PROTO(fn) \
+{ \
+ int rc; \
+ void *txq; \
+ rc = _rte_eth_tx_desc_prolog(port_id, queue_id, &txq); \
+ if (rc != 0) \
+ return rc; \
+ return fn(txq, offset); \
+}
+
+__rte_experimental
+rte_eth_tx_descriptor_status_t rte_eth_get_tx_desc_st(uint16_t port_id);
+
+__rte_experimental
+int rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t rf);
+
#ifdef __cplusplus
}
#endif
@@ -590,8 +590,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
eth_dev->intr_handle = NULL;
eth_dev->rx_queue_count = NULL;
eth_dev->rx_descriptor_done = NULL;
- eth_dev->rx_descriptor_status = NULL;
- eth_dev->tx_descriptor_status = NULL;
eth_dev->dev_ops = NULL;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
@@ -6400,3 +6398,46 @@ rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t tpf)
rte_eth_burst_api[port_id].tx_pkt_prepare = tpf;
return 0;
}
+
+__rte_experimental
+rte_eth_rx_descriptor_status_t
+rte_eth_get_rx_desc_st(uint16_t port_id)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ return rte_eth_burst_api[port_id].rx_descriptor_status;
+}
+
+__rte_experimental
+int
+rte_eth_set_rx_desc_st(uint16_t port_id, rte_eth_rx_descriptor_status_t rf)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api))
+ return -EINVAL;
+
+ rte_eth_burst_api[port_id].rx_descriptor_status = rf;
+ return 0;
+}
+
+rte_eth_tx_descriptor_status_t
+rte_eth_get_tx_desc_st(uint16_t port_id)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ return rte_eth_burst_api[port_id].tx_descriptor_status;
+}
+
+__rte_experimental
+int
+rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t tf)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api))
+ return -EINVAL;
+
+ rte_eth_burst_api[port_id].tx_descriptor_status = tf;
+ return 0;
+}
@@ -5082,21 +5082,15 @@ static inline int
rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
uint16_t offset)
{
- struct rte_eth_dev *dev;
- void *rxq;
+ rte_eth_rx_descriptor_status_t rds;
-#ifdef RTE_ETHDEV_DEBUG_RX
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-#endif
- dev = &rte_eth_devices[port_id];
-#ifdef RTE_ETHDEV_DEBUG_RX
- if (queue_id >= dev->data->nb_rx_queues)
- return -ENODEV;
-#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP);
- rxq = dev->data->rx_queues[queue_id];
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return -EINVAL;
+
+ rds = rte_eth_burst_api[port_id].rx_descriptor_status;
+ RTE_FUNC_PTR_OR_ERR_RET(rds, -ENOTSUP);
- return (*dev->rx_descriptor_status)(rxq, offset);
+ return (rds)(port_id, queue_id, offset);
}
#define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */
@@ -5139,21 +5133,15 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
uint16_t queue_id, uint16_t offset)
{
- struct rte_eth_dev *dev;
- void *txq;
+ rte_eth_tx_descriptor_status_t tds;
-#ifdef RTE_ETHDEV_DEBUG_TX
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-#endif
- dev = &rte_eth_devices[port_id];
-#ifdef RTE_ETHDEV_DEBUG_TX
- if (queue_id >= dev->data->nb_tx_queues)
- return -ENODEV;
-#endif
- RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP);
- txq = dev->data->tx_queues[queue_id];
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return -EINVAL;
+
+ tds = rte_eth_burst_api[port_id].tx_descriptor_status;
+ RTE_FUNC_PTR_OR_ERR_RET(tds, -ENOTSUP);
- return (*dev->tx_descriptor_status)(txq, offset);
+ return (tds)(port_id, queue_id, offset);
}
/**
@@ -117,9 +117,6 @@ struct rte_eth_rxtx_callback {
struct rte_eth_dev {
eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
- eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */
- eth_tx_descriptor_status_t tx_descriptor_status; /**< Check the status of a Tx descriptor. */
-
/**
* Next two fields are per-device data but *data is shared between
* primary and secondary processes and *process_private is per-process
@@ -253,9 +253,13 @@ EXPERIMENTAL {
# added in 21.11
rte_eth_burst_api;
rte_eth_get_rx_burst;
+ rte_eth_get_rx_desc_st;
+ rte_eth_get_tx_desc_st;
rte_eth_get_tx_burst;
rte_eth_get_tx_prep;
rte_eth_set_rx_burst;
+ rte_eth_set_rx_desc_st;
+ rte_eth_set_tx_desc_st;
rte_eth_set_tx_burst;
rte_eth_set_tx_prep;
};