@@ -307,6 +307,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
/*
* RX/TX EM function prototypes
*/
@@ -343,6 +349,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
void igb_pf_host_uninit(struct rte_eth_dev *dev);
#endif /* _E1000_ETHDEV_H_ */
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
+ .rxq_info_get = em_rxq_info_get,
+ .txq_info_get = em_txq_info_get,
};
/**
@@ -1881,3 +1881,41 @@ eth_em_tx_init(struct rte_eth_dev *dev)
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct em_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->max_desc = EM_MAX_RING_DESC;
+ qinfo->min_desc = EM_MIN_RING_DESC;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct em_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+ qinfo->max_desc = EM_MAX_RING_DESC;
+ qinfo->min_desc = EM_MIN_RING_DESC;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
@@ -307,6 +307,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.rss_hash_conf_get = eth_igb_rss_hash_conf_get,
.filter_ctrl = eth_igb_filter_ctrl,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .rxq_info_get = igb_rxq_info_get,
+ .txq_info_get = igb_txq_info_get,
.timesync_enable = igb_timesync_enable,
.timesync_disable = igb_timesync_disable,
.timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
@@ -337,6 +339,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .rxq_info_get = igb_rxq_info_get,
+ .txq_info_get = igb_txq_info_get,
.mac_addr_set = igbvf_default_mac_addr_set,
.get_reg_length = igbvf_get_reg_length,
.get_reg = igbvf_get_regs,
@@ -2516,3 +2516,39 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
}
}
+
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct igb_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->max_desc = IGB_MAX_RING_DESC;
+ qinfo->min_desc = IGB_MIN_RING_DESC;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+}
+
+void
+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct igb_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+ qinfo->max_desc = IGB_MAX_RING_DESC;
+ qinfo->min_desc = IGB_MIN_RING_DESC;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+}