[dpdk-dev,v3,16/20] thunderx/nicvf: add tx queue start and stop support

Message ID 1465317632-11471-17-git-send-email-jerin.jacob@caviumnetworks.com (mailing list archive)
State Superseded, archived
Delegated to: Bruce Richardson
Headers

Commit Message

Jerin Jacob June 7, 2016, 4:40 p.m. UTC
  Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
Signed-off-by: Kamil Rytarowski <Kamil.Rytarowski@caviumnetworks.com>
Signed-off-by: Zyta Szpak <zyta.szpak@semihalf.com>
Signed-off-by: Slawomir Rosek <slawomir.rosek@semihalf.com>
Signed-off-by: Radoslaw Biernacki <rad@semihalf.com>
---
 drivers/net/thunderx/nicvf_ethdev.c | 68 +++++++++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)
  

Comments

Ferruh Yigit June 8, 2016, 5:46 p.m. UTC | #1
On 6/7/2016 5:40 PM, Jerin Jacob wrote:
> Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
> Signed-off-by: Kamil Rytarowski <Kamil.Rytarowski@caviumnetworks.com>
> Signed-off-by: Zyta Szpak <zyta.szpak@semihalf.com>
> Signed-off-by: Slawomir Rosek <slawomir.rosek@semihalf.com>
> Signed-off-by: Radoslaw Biernacki <rad@semihalf.com>

...

> +static inline int
> +nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
> +{
> +	struct nicvf_txq *txq;
> +	int ret;
> +
> +	if (dev->data->tx_queue_state[qidx] == 
> +	    RTE_ETH_QUEUE_STATE_STARTED)
Is line wrap required?

...
>  
>  static inline int
>  nicvf_configure_cpi(struct rte_eth_dev *dev)
> @@ -912,6 +960,24 @@ nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
>  }
>  
>  static int
> +nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
> +{
> +	if (qidx >= nicvf_pmd_priv(dev)->eth_dev->data->nb_tx_queues)
> +		return -EINVAL;
This check already done by librte_ether

> +
> +	return nicvf_start_tx_queue(dev, qidx);
> +}
> +
> +static int
> +nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
> +{
> +	if (qidx >= nicvf_pmd_priv(dev)->eth_dev->data->nb_tx_queues)
> +		return -EINVAL;
Same here

> +
> +	return nicvf_stop_tx_queue(dev, qidx);
> +}
> +
...
  

Patch

diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index ba32803..baa2e7a 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -90,6 +90,8 @@  static int nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 				       struct rte_eth_rss_conf *rss_conf);
 static int nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx);
 static int nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx);
+static int nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx);
+static int nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx);
 static int nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 				    uint16_t nb_desc, unsigned int socket_id,
 				    const struct rte_eth_rxconf *rx_conf,
@@ -596,6 +598,52 @@  nicvf_tx_queue_reset(struct nicvf_txq *txq)
 	txq->xmit_bufs = 0;
 }
 
+static inline int
+nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	struct nicvf_txq *txq;
+	int ret;
+
+	if (dev->data->tx_queue_state[qidx] ==
+	    RTE_ETH_QUEUE_STATE_STARTED)
+		return 0;
+
+	txq = dev->data->tx_queues[qidx];
+	txq->pool = NULL;
+	ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret);
+		goto config_sq_error;
+	}
+
+	dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+	return ret;
+
+config_sq_error:
+	nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+	return ret;
+}
+
+static inline int
+nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	struct nicvf_txq *txq;
+	int ret;
+
+	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+		return 0;
+
+	ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+	if (ret)
+		PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret);
+
+	txq = dev->data->tx_queues[qidx];
+	nicvf_tx_queue_release_mbufs(txq);
+	nicvf_tx_queue_reset(txq);
+
+	dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+	return ret;
+}
 
 static inline int
 nicvf_configure_cpi(struct rte_eth_dev *dev)
@@ -912,6 +960,24 @@  nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
 }
 
 static int
+nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	if (qidx >= nicvf_pmd_priv(dev)->eth_dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	return nicvf_start_tx_queue(dev, qidx);
+}
+
+static int
+nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	if (qidx >= nicvf_pmd_priv(dev)->eth_dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	return nicvf_stop_tx_queue(dev, qidx);
+}
+
+static int
 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			 uint16_t nb_desc, unsigned int socket_id,
 			 const struct rte_eth_rxconf *rx_conf,
@@ -1140,6 +1206,8 @@  static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
 	.rx_queue_start           = nicvf_dev_rx_queue_start,
 	.rx_queue_stop            = nicvf_dev_rx_queue_stop,
+	.tx_queue_start           = nicvf_dev_tx_queue_start,
+	.tx_queue_stop            = nicvf_dev_tx_queue_stop,
 	.rx_queue_setup           = nicvf_dev_rx_queue_setup,
 	.rx_queue_release         = nicvf_dev_rx_queue_release,
 	.rx_queue_count           = nicvf_dev_rx_queue_count,