On 5/6/2020 8:52 PM, Stephen Hemminger wrote:
> These functions are useful for applications and debugging.
> The netvsc PMD also transparently handles the rx/tx descriptor
> functions for underlying VF device.
>
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
<...>
> +int hn_dev_rx_queue_status(void *arg, uint16_t offset)
> +{
> + const struct hn_rx_queue *rxq = arg;
> +
> + hn_process_events(rxq->hv, rxq->queue_id, 0);
> + if (offset >= rxq->rx_ring->capacity)
> + return -EINVAL;
> + else if (offset < rte_ring_count(rxq->rx_ring))
Is this logic correct, can we compare 'offset' with used entry count in circular
buffer?
> + return RTE_ETH_RX_DESC_DONE;
> + else
> + return RTE_ETH_RX_DESC_AVAIL;
> +}
Can't there be descriptors with 'RTE_ETH_RX_DESC_UNAVAIL' status?
@@ -875,8 +875,11 @@ static const struct eth_dev_ops hn_eth_dev_ops = {
.tx_queue_setup = hn_dev_tx_queue_setup,
.tx_queue_release = hn_dev_tx_queue_release,
.tx_done_cleanup = hn_dev_tx_done_cleanup,
+ .tx_descriptor_status = hn_dev_tx_descriptor_status,
.rx_queue_setup = hn_dev_rx_queue_setup,
.rx_queue_release = hn_dev_rx_queue_release,
+ .rx_queue_count = hn_dev_rx_queue_count,
+ .rx_descriptor_status = hn_dev_rx_queue_status,
.link_update = hn_dev_link_update,
.stats_get = hn_dev_stats_get,
.stats_reset = hn_dev_stats_reset,
@@ -369,6 +369,20 @@ hn_dev_tx_queue_release(void *arg)
rte_free(txq);
}
+int hn_dev_tx_descriptor_status(void *arg, uint16_t offset)
+{
+ const struct hn_tx_queue *txq = arg;
+ uint16_t used;
+
+ hn_process_events(txq->hv, txq->queue_id, 0);
+
+ used = rte_mempool_in_use_count(txq->txdesc_pool);
+ if (offset < used)
+ return RTE_ETH_TX_DESC_FULL;
+ else
+ return RTE_ETH_TX_DESC_DONE;
+}
+
static void
hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
@@ -966,6 +980,31 @@ hn_dev_rx_queue_release(void *arg)
hn_rx_queue_free(rxq, true);
}
+/*
+ * Get the number of used descriptor in a rx queue
+ * For this device that means how many packets are pending in the ring.
+ */
+uint32_t
+hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+ return rte_ring_count(rxq->rx_ring);
+}
+
+int hn_dev_rx_queue_status(void *arg, uint16_t offset)
+{
+ const struct hn_rx_queue *rxq = arg;
+
+ hn_process_events(rxq->hv, rxq->queue_id, 0);
+ if (offset >= rxq->rx_ring->capacity)
+ return -EINVAL;
+ else if (offset < rte_ring_count(rxq->rx_ring))
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
int
hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
{
@@ -169,6 +169,7 @@ void hn_dev_tx_queue_release(void *arg);
void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
struct rte_eth_txq_info *qinfo);
int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
+int hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
uint16_t queue_id,
@@ -181,6 +182,8 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void hn_dev_rx_queue_release(void *arg);
+uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
+int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
void hn_dev_free_queues(struct rte_eth_dev *dev);
/* Check if VF is attached */
@@ -231,6 +234,8 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
+int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
+
int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,