@@ -12,6 +12,7 @@ Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
+Queue start/stop = Y
RSS hash = Y
Inner RSS = Y
Packet type parsing = Y
@@ -12,6 +12,7 @@ Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
+Queue start/stop = Y
RSS hash = Y
Inner RSS = Y
Packet type parsing = Y
@@ -11,6 +11,7 @@ Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
+Queue start/stop = Y
RSS hash = Y
Inner RSS = Y
Packet type parsing = Y
@@ -138,6 +138,21 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
}
static int
+cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+ struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+ int rc;
+
+ rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
+ if (rc)
+ return rc;
+
+ /* Clear fc cache pkts to trigger worker stop */
+ txq->fc_cache_pkts = 0;
+ return 0;
+}
+
+static int
cn10k_nix_configure(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -169,6 +184,7 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
+ cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
}
@@ -136,6 +136,21 @@ cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
}
static int
+cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+ struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+ int rc;
+
+ rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
+ if (rc)
+ return rc;
+
+ /* Clear fc cache pkts to trigger worker stop */
+ txq->fc_cache_pkts = 0;
+ return 0;
+}
+
+static int
cn9k_nix_configure(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -178,6 +193,7 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
+ cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
}
@@ -866,12 +866,104 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
return rc;
}
+static int
+cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_sq *sq = &dev->sqs[qid];
+ int rc = -EINVAL;
+
+ if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = roc_nix_tm_sq_aura_fc(sq, true);
+ if (rc) {
+ plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+ return rc;
+}
+
+int
+cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_sq *sq = &dev->sqs[qid];
+ int rc;
+
+ if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ rc = roc_nix_tm_sq_aura_fc(sq, false);
+ if (rc) {
+ plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
+ rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_rq *rq = &dev->rqs[qid];
+ int rc;
+
+ if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = roc_nix_rq_ena_dis(rq, true);
+ if (rc) {
+ plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_rq *rq = &dev->rqs[qid];
+ int rc;
+
+ if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ rc = roc_nix_rq_ena_dis(rq, false);
+ if (rc) {
+ plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+ return rc;
+}
+
/* CNXK platform independent eth dev ops */
struct eth_dev_ops cnxk_eth_dev_ops = {
.dev_infos_get = cnxk_nix_info_get,
.link_update = cnxk_nix_link_update,
.tx_queue_release = cnxk_nix_tx_queue_release,
.rx_queue_release = cnxk_nix_rx_queue_release,
+ .tx_queue_start = cnxk_nix_tx_queue_start,
+ .rx_queue_start = cnxk_nix_rx_queue_start,
+ .rx_queue_stop = cnxk_nix_rx_queue_stop,
.dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
};
@@ -214,6 +214,7 @@ int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_rx_q_sz,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
+int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);