[v4,56/58] net/txgbe: add Rx and Tx descriptor status

Message ID 20201019085415.82207-57-jiawenwu@trustnetic.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series net: txgbe PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Jiawen Wu Oct. 19, 2020, 8:54 a.m. UTC
  Supports check the status of Rx and Tx descriptors.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini |   2 +
 drivers/net/txgbe/txgbe_ethdev.c   |   4 +
 drivers/net/txgbe/txgbe_ethdev.h   |   8 ++
 drivers/net/txgbe/txgbe_rxtx.c     | 180 +++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_rxtx.h     |   1 +
 5 files changed, 195 insertions(+)
  

Comments

Ferruh Yigit Oct. 26, 2020, 2:54 p.m. UTC | #1
On 10/19/2020 9:54 AM, Jiawen Wu wrote:
> Supports check the status of Rx and Tx descriptors.
> 
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>

<...>

> @@ -480,6 +480,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
>   	PMD_INIT_FUNC_TRACE();
>   
>   	eth_dev->dev_ops = &txgbe_eth_dev_ops;
> +	eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
> +	eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
> +	eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
>   	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
>   	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
>   	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
> @@ -4250,6 +4253,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
>   	.timesync_adjust_time       = txgbe_timesync_adjust_time,
>   	.timesync_read_time         = txgbe_timesync_read_time,
>   	.timesync_write_time        = txgbe_timesync_write_time,
> +	.tx_done_cleanup            = txgbe_dev_tx_done_cleanup,

The '.tx_done_cleanup' is also added in this patch, can you please separate it 
to another patch?

<...>

> +int
> +txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
> +{
> +	volatile struct txgbe_rx_desc *rxdp;
> +	struct txgbe_rx_queue *rxq = rx_queue;
> +	uint32_t desc;
> +
> +	if (unlikely(offset >= rxq->nb_rx_desc))
> +		return 0;
> +	desc = rxq->rx_tail + offset;
> +	if (desc >= rxq->nb_rx_desc)
> +		desc -= rxq->nb_rx_desc;
> +
> +	rxdp = &rxq->rx_ring[desc];
> +	return !!(rxdp->qw1.lo.status &
> +			rte_cpu_to_le_32(TXGBE_RXD_STAT_DD));
> +}

'descriptor_done' dev_ops is removed from this patch in this version, but the 
clutter seems remained, like above function and relevant function declerations 
etc.., can you please clean them all?


dropping this patch form the patchset, because of the issues above. Can you 
please send new version as an icremental patch?
  

Patch

diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 1684bcc7e..7c457fede 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -37,6 +37,8 @@  Inner L3 checksum    = P
 Inner L4 checksum    = P
 Packet type parsing  = Y
 Timesync             = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
 Basic stats          = Y
 Extended stats       = Y
 Stats per queue      = Y
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 77292f07f..87824b140 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -480,6 +480,9 @@  eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
+	eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
+	eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
+	eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
 	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
 	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
 	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
@@ -4250,6 +4253,7 @@  static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.timesync_adjust_time       = txgbe_timesync_adjust_time,
 	.timesync_read_time         = txgbe_timesync_read_time,
 	.timesync_write_time        = txgbe_timesync_write_time,
+	.tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
 };
 
 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2ee0290b7..b708e6d43 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -219,6 +219,14 @@  int  txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 		uint16_t nb_tx_desc, unsigned int socket_id,
 		const struct rte_eth_txconf *tx_conf);
 
+uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+		uint16_t rx_queue_id);
+
+int txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
 int txgbe_dev_rx_init(struct rte_eth_dev *dev);
 
 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 4dee4ac16..c78a8b6b0 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1951,6 +1951,95 @@  txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
 	}
 }
 
+static int
+txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt)
+{
+	struct txgbe_tx_entry *swr_ring = txq->sw_ring;
+	uint16_t i, tx_last, tx_id;
+	uint16_t nb_tx_free_last;
+	uint16_t nb_tx_to_clean;
+	uint32_t pkt_cnt;
+
+	/* Start free mbuf from the next of tx_tail */
+	tx_last = txq->tx_tail;
+	tx_id  = swr_ring[tx_last].next_id;
+
+	if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq))
+		return 0;
+
+	nb_tx_to_clean = txq->nb_tx_free;
+	nb_tx_free_last = txq->nb_tx_free;
+	if (!free_cnt)
+		free_cnt = txq->nb_tx_desc;
+
+	/* Loop through swr_ring to count the amount of
+	 * freeable mubfs and packets.
+	 */
+	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+		for (i = 0; i < nb_tx_to_clean &&
+			pkt_cnt < free_cnt &&
+			tx_id != tx_last; i++) {
+			if (swr_ring[tx_id].mbuf != NULL) {
+				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+				swr_ring[tx_id].mbuf = NULL;
+
+				/*
+				 * last segment in the packet,
+				 * increment packet count
+				 */
+				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+			}
+
+			tx_id = swr_ring[tx_id].next_id;
+		}
+
+		if (pkt_cnt < free_cnt) {
+			if (txgbe_xmit_cleanup(txq))
+				break;
+
+			nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+			nb_tx_free_last = txq->nb_tx_free;
+		}
+	}
+
+	return (int)pkt_cnt;
+}
+
+static int
+txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq,
+			uint32_t free_cnt)
+{
+	int i, n, cnt;
+
+	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+		free_cnt = txq->nb_tx_desc;
+
+	cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+	for (i = 0; i < cnt; i += n) {
+		if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+			break;
+
+		n = txgbe_tx_free_bufs(txq);
+
+		if (n == 0)
+			break;
+	}
+
+	return i;
+}
+
+int
+txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+	struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+	if (txq->offloads == 0 &&
+		txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
+		return txgbe_tx_done_cleanup_simple(txq, free_cnt);
+
+	return txgbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
 static void __rte_cold
 txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
 {
@@ -2536,6 +2625,97 @@  txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	return 0;
 }
 
+uint32_t
+txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define TXGBE_RXQ_SCAN_INTERVAL 4
+	volatile struct txgbe_rx_desc *rxdp;
+	struct txgbe_rx_queue *rxq;
+	uint32_t desc = 0;
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+	rxdp = &rxq->rx_ring[rxq->rx_tail];
+
+	while ((desc < rxq->nb_rx_desc) &&
+		(rxdp->qw1.lo.status &
+			rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) {
+		desc += TXGBE_RXQ_SCAN_INTERVAL;
+		rxdp += TXGBE_RXQ_SCAN_INTERVAL;
+		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+			rxdp = &(rxq->rx_ring[rxq->rx_tail +
+				desc - rxq->nb_rx_desc]);
+	}
+
+	return desc;
+}
+
+int
+txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+	volatile struct txgbe_rx_desc *rxdp;
+	struct txgbe_rx_queue *rxq = rx_queue;
+	uint32_t desc;
+
+	if (unlikely(offset >= rxq->nb_rx_desc))
+		return 0;
+	desc = rxq->rx_tail + offset;
+	if (desc >= rxq->nb_rx_desc)
+		desc -= rxq->nb_rx_desc;
+
+	rxdp = &rxq->rx_ring[desc];
+	return !!(rxdp->qw1.lo.status &
+			rte_cpu_to_le_32(TXGBE_RXD_STAT_DD));
+}
+
+int
+txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+	struct txgbe_rx_queue *rxq = rx_queue;
+	volatile uint32_t *status;
+	uint32_t nb_hold, desc;
+
+	if (unlikely(offset >= rxq->nb_rx_desc))
+		return -EINVAL;
+
+	nb_hold = rxq->nb_rx_hold;
+	if (offset >= rxq->nb_rx_desc - nb_hold)
+		return RTE_ETH_RX_DESC_UNAVAIL;
+
+	desc = rxq->rx_tail + offset;
+	if (desc >= rxq->nb_rx_desc)
+		desc -= rxq->nb_rx_desc;
+
+	status = &rxq->rx_ring[desc].qw1.lo.status;
+	if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))
+		return RTE_ETH_RX_DESC_DONE;
+
+	return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+	struct txgbe_tx_queue *txq = tx_queue;
+	volatile uint32_t *status;
+	uint32_t desc;
+
+	if (unlikely(offset >= txq->nb_tx_desc))
+		return -EINVAL;
+
+	desc = txq->tx_tail + offset;
+	if (desc >= txq->nb_tx_desc) {
+		desc -= txq->nb_tx_desc;
+		if (desc >= txq->nb_tx_desc)
+			desc -= txq->nb_tx_desc;
+	}
+
+	status = &txq->tx_ring[desc].dw3;
+	if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD))
+		return RTE_ETH_TX_DESC_DONE;
+
+	return RTE_ETH_TX_DESC_FULL;
+}
+
 void __rte_cold
 txgbe_dev_clear_queues(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 8c36698c3..120c2547f 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -407,6 +407,7 @@  struct txgbe_txq_ops {
 void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq);
 
 void txgbe_set_rx_function(struct rte_eth_dev *dev);
+int txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
 
 uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);