[v3,24/25] net/spnic: support Tx/Rx queue start/stop

Message ID 1bc4636e940682b02279711ae1714b22c958086a.1640332922.git.songyl@ramaxel.com (mailing list archive)
State Superseded, archived
Headers
Series Net/SPNIC: support SPNIC into DPDK 22.03 |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Yanling Song Dec. 24, 2021, 8:32 a.m. UTC
  This commit support starting or stopping a specified Rx/Tx queue
For Rx queue:
    when starting rx queue, mbuf will be allocated and fill rq
wqe with mbuf info, then add the qid to indirect table of RSS.
if the first rx queue is started, the valid bit in function table
will be set so that the packets can be received to host.
    when stopping rx queue, the PMD driver will poll the rx queue
until it is empty and release the mbuf, then the PMD driver will
remove the qid for RSS indirect table. if the last rx queue is
stopped, the valid bit in function table will be cleared.

For Rx queue:
    when stopping tx queue, the PMD driver will wait until all tx
packets are sent and then releases all mbuf.

Signed-off-by: Yanling Song <songyl@ramaxel.com>
---
 drivers/net/spnic/base/spnic_nic_cfg.c |  33 ++++
 drivers/net/spnic/base/spnic_nic_cfg.h |  13 ++
 drivers/net/spnic/spnic_ethdev.c       |  82 +++++++++
 drivers/net/spnic/spnic_rx.c           | 222 +++++++++++++++++++++++++
 drivers/net/spnic/spnic_rx.h           |   4 +
 5 files changed, 354 insertions(+)
  

Patch

diff --git a/drivers/net/spnic/base/spnic_nic_cfg.c b/drivers/net/spnic/base/spnic_nic_cfg.c
index e740138b8e..862400de71 100644
--- a/drivers/net/spnic/base/spnic_nic_cfg.c
+++ b/drivers/net/spnic/base/spnic_nic_cfg.c
@@ -1289,6 +1289,39 @@  int spnic_vf_get_default_cos(void *hwdev, u8 *cos_id)
 	return 0;
 }
 
+int spnic_set_rq_flush(void *hwdev, u16 q_id)
+{
+	struct spnic_cmd_set_rq_flush *rq_flush_msg = NULL;
+	struct spnic_cmd_buf *cmd_buf = NULL;
+	u64 out_param = EIO;
+	int err;
+
+	cmd_buf = spnic_alloc_cmd_buf(hwdev);
+	if (!cmd_buf) {
+		PMD_DRV_LOG(ERR, "Failed to allocate cmd buf\n");
+		return -ENOMEM;
+	}
+
+	cmd_buf->size = sizeof(*rq_flush_msg);
+
+	rq_flush_msg = cmd_buf->buf;
+	rq_flush_msg->local_rq_id = q_id;
+	rq_flush_msg->value = cpu_to_be32(rq_flush_msg->value);
+
+	err = spnic_cmdq_direct_resp(hwdev, SPNIC_MOD_L2NIC,
+				     SPNIC_UCODE_CMD_SET_RQ_FLUSH, cmd_buf,
+				     &out_param, 0);
+	if (err || out_param != 0) {
+		PMD_DRV_LOG(ERR, "Failed to set rq flush, err:%d, out_param: %" PRIu64 "",
+			    err, out_param);
+		err = -EFAULT;
+	}
+
+	spnic_free_cmd_buf(cmd_buf);
+
+	return err;
+}
+
 static int _mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in,
 				 u16 in_size, void *buf_out, u16 *out_size)
 {
diff --git a/drivers/net/spnic/base/spnic_nic_cfg.h b/drivers/net/spnic/base/spnic_nic_cfg.h
index 560c9e4e7d..7c39a32d7c 100644
--- a/drivers/net/spnic/base/spnic_nic_cfg.h
+++ b/drivers/net/spnic/base/spnic_nic_cfg.h
@@ -1069,6 +1069,19 @@  int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl);
  */
 int spnic_vf_get_default_cos(void *hwdev, u8 *cos_id);
 
+/**
+ * Flush rx queue resource
+ *
+ * @param[in] hwdev
+ *   Device pointer to hwdev
+ * @param[in] q_id
+ *   rx queue id
+ *
+ * @retval zero : Success
+ * @retval non-zero : Failure
+ */
+int spnic_set_rq_flush(void *hwdev, u16 q_id);
+
 /**
  * Get service feature HW supported
  *
diff --git a/drivers/net/spnic/spnic_ethdev.c b/drivers/net/spnic/spnic_ethdev.c
index 430e8cb340..d98637c971 100644
--- a/drivers/net/spnic/spnic_ethdev.c
+++ b/drivers/net/spnic/spnic_ethdev.c
@@ -974,6 +974,80 @@  static void spnic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
 	spnic_delete_mc_addr_list(nic_dev);
 }
 
+static int spnic_dev_rx_queue_start(__rte_unused struct rte_eth_dev *dev,
+				     __rte_unused uint16_t rq_id)
+{
+	struct spnic_rxq *rxq = NULL;
+	int rc;
+
+	if (rq_id < dev->data->nb_rx_queues) {
+		rxq = dev->data->rx_queues[rq_id];
+
+		rc = spnic_start_rq(dev, rxq);
+		if (rc) {
+			PMD_DRV_LOG(ERR, "Start rx queue failed, eth_dev:%s, queue_idx:%d",
+					dev->data->name, rq_id);
+			return rc;
+		}
+
+		dev->data->rx_queue_state[rq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+	}
+
+	return 0;
+}
+
+static int spnic_dev_rx_queue_stop(__rte_unused struct rte_eth_dev *dev,
+				    __rte_unused uint16_t rq_id)
+{
+	struct spnic_rxq *rxq = NULL;
+	int rc;
+
+	if (rq_id < dev->data->nb_rx_queues) {
+		rxq = dev->data->rx_queues[rq_id];
+
+		rc = spnic_stop_rq(dev, rxq);
+		if (rc) {
+			PMD_DRV_LOG(ERR, "Stop rx queue failed, eth_dev:%s, queue_idx:%d",
+					dev->data->name, rq_id);
+			return rc;
+		}
+
+		dev->data->rx_queue_state[rq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+	}
+
+	return 0;
+}
+
+static int spnic_dev_tx_queue_start(__rte_unused struct rte_eth_dev *dev,
+				     __rte_unused uint16_t sq_id)
+{
+	PMD_DRV_LOG(INFO, "Start tx queue, eth_dev:%s, queue_idx:%d",
+		   dev->data->name, sq_id);
+	dev->data->tx_queue_state[sq_id] = RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+}
+
+static int spnic_dev_tx_queue_stop(__rte_unused struct rte_eth_dev *dev,
+				    __rte_unused uint16_t sq_id)
+{
+	struct spnic_txq *txq = NULL;
+	int rc;
+
+	if (sq_id < dev->data->nb_tx_queues) {
+		txq = dev->data->tx_queues[sq_id];
+		rc = spnic_stop_sq(txq);
+		if (rc) {
+			PMD_DRV_LOG(ERR, "Stop tx queue failed, eth_dev:%s, queue_idx:%d",
+				   dev->data->name, sq_id);
+			return rc;
+		}
+
+		dev->data->tx_queue_state[sq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+	}
+
+	return 0;
+}
+
 int spnic_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
 				    uint16_t queue_id)
 {
@@ -2698,6 +2772,10 @@  static const struct eth_dev_ops spnic_pmd_ops = {
 	.tx_queue_setup                = spnic_tx_queue_setup,
 	.rx_queue_release              = spnic_rx_queue_release,
 	.tx_queue_release              = spnic_tx_queue_release,
+	.rx_queue_start                = spnic_dev_rx_queue_start,
+	.rx_queue_stop                 = spnic_dev_rx_queue_stop,
+	.tx_queue_start                = spnic_dev_tx_queue_start,
+	.tx_queue_stop                 = spnic_dev_tx_queue_stop,
 	.rx_queue_intr_enable          = spnic_dev_rx_queue_intr_enable,
 	.rx_queue_intr_disable         = spnic_dev_rx_queue_intr_disable,
 	.dev_start                     = spnic_dev_start,
@@ -2737,6 +2815,10 @@  static const struct eth_dev_ops spnic_pmd_vf_ops = {
 	.tx_queue_setup                = spnic_tx_queue_setup,
 	.rx_queue_intr_enable          = spnic_dev_rx_queue_intr_enable,
 	.rx_queue_intr_disable         = spnic_dev_rx_queue_intr_disable,
+	.rx_queue_start                = spnic_dev_rx_queue_start,
+	.rx_queue_stop                 = spnic_dev_rx_queue_stop,
+	.tx_queue_start                = spnic_dev_tx_queue_start,
+	.tx_queue_stop                 = spnic_dev_tx_queue_stop,
 	.dev_start                     = spnic_dev_start,
 	.link_update                   = spnic_link_update,
 	.rx_queue_release              = spnic_rx_queue_release,
diff --git a/drivers/net/spnic/spnic_rx.c b/drivers/net/spnic/spnic_rx.c
index 92cb387bed..8b1c13aa40 100644
--- a/drivers/net/spnic/spnic_rx.c
+++ b/drivers/net/spnic/spnic_rx.c
@@ -467,6 +467,228 @@  void spnic_remove_rq_from_rx_queue_list(struct spnic_nic_dev *nic_dev,
 	nic_dev->num_rss = rss_queue_count;
 }
 
+static void spnic_rx_queue_release_mbufs(struct spnic_rxq *rxq)
+{
+	u16 sw_ci, ci_mask, free_wqebbs;
+	u16 rx_buf_len;
+	u32 status, vlan_len, pkt_len;
+	u32 pkt_left_len = 0;
+	u32 nr_released = 0;
+	struct spnic_rx_info *rx_info;
+	volatile struct spnic_rq_cqe *rx_cqe;
+
+	sw_ci = spnic_get_rq_local_ci(rxq);
+	rx_info = &rxq->rx_info[sw_ci];
+	rx_cqe = &rxq->rx_cqe[sw_ci];
+	free_wqebbs = (u16)(spnic_get_rq_free_wqebb(rxq) + 1);
+	status = rx_cqe->status;
+	ci_mask = rxq->q_mask;
+
+	while (free_wqebbs < rxq->q_depth) {
+		rx_buf_len = rxq->buf_len;
+		if (pkt_left_len != 0) {
+			/* flush continues jumbo rqe */
+			pkt_left_len = (pkt_left_len <= rx_buf_len) ? 0 :
+				       (pkt_left_len - rx_buf_len);
+		} else if (SPNIC_GET_RX_FLUSH(status)) {
+			/* flush one released rqe */
+			pkt_left_len = 0;
+		} else if (SPNIC_GET_RX_DONE(status)) {
+			/* flush single packet or  first jumbo rqe */
+			vlan_len = rx_cqe->vlan_len;
+			pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len);
+			pkt_left_len = (pkt_len <= rx_buf_len) ? 0 :
+				       (pkt_len - rx_buf_len);
+		} else {
+			break;
+		}
+
+		rte_pktmbuf_free(rx_info->mbuf);
+
+		rx_info->mbuf = NULL;
+		rx_cqe->status = 0;
+		nr_released++;
+		free_wqebbs++;
+
+		/* see next cqe */
+		sw_ci++;
+		sw_ci &= ci_mask;
+		rx_info = &rxq->rx_info[sw_ci];
+		rx_cqe = &rxq->rx_cqe[sw_ci];
+		status = rx_cqe->status;
+	}
+
+	spnic_update_rq_local_ci(rxq, nr_released);
+}
+
+int spnic_poll_rq_empty(struct spnic_rxq *rxq)
+{
+	unsigned long timeout;
+	int free_wqebb;
+	int err = -EFAULT;
+
+	timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies;
+	do {
+		free_wqebb = spnic_get_rq_free_wqebb(rxq) + 1;
+		if (free_wqebb == rxq->q_depth) {
+			err = 0;
+			break;
+		}
+		spnic_rx_queue_release_mbufs(rxq);
+		rte_delay_us(1);
+	} while (time_before(jiffies, timeout));
+
+	return err;
+}
+
+void spnic_dump_cqe_status(struct spnic_rxq *rxq, u32 *cqe_done_cnt,
+			    u32 *cqe_hole_cnt, u32 *head_ci,
+			    u32 *head_done)
+{
+	u16 sw_ci;
+	u16 avail_pkts = 0;
+	u16 hit_done = 0;
+	u16 cqe_hole = 0;
+	u32 status;
+	volatile struct spnic_rq_cqe *rx_cqe;
+
+	sw_ci = spnic_get_rq_local_ci(rxq);
+	rx_cqe = &rxq->rx_cqe[sw_ci];
+	status = rx_cqe->status;
+	*head_done = SPNIC_GET_RX_DONE(status);
+	*head_ci = sw_ci;
+
+	for (sw_ci = 0; sw_ci < rxq->q_depth; sw_ci++) {
+		rx_cqe = &rxq->rx_cqe[sw_ci];
+
+		/* test current ci is done */
+		status = rx_cqe->status;
+		if (!SPNIC_GET_RX_DONE(status) ||
+		    !SPNIC_GET_RX_FLUSH(status)) {
+			if (hit_done) {
+				cqe_hole++;
+				hit_done = 0;
+			}
+
+			continue;
+		}
+
+		avail_pkts++;
+		hit_done = 1;
+	}
+
+	*cqe_done_cnt = avail_pkts;
+	*cqe_hole_cnt = cqe_hole;
+}
+
+int spnic_stop_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq)
+{
+	struct spnic_nic_dev *nic_dev = rxq->nic_dev;
+	u32 cqe_done_cnt = 0;
+	u32 cqe_hole_cnt = 0;
+	u32 head_ci, head_done;
+	int err;
+
+	/* disable rxq intr */
+	spnic_dev_rx_queue_intr_disable(eth_dev, rxq->q_id);
+
+	/* lock dev queue switch  */
+	rte_spinlock_lock(&nic_dev->queue_list_lock);
+
+	spnic_remove_rq_from_rx_queue_list(nic_dev, rxq->q_id);
+
+	if (nic_dev->rss_state == SPNIC_RSS_ENABLE) {
+		err = spnic_refill_indir_rqid(rxq);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Clear rq in indirect table failed, eth_dev:%s, queue_idx:%d\n",
+				    nic_dev->dev_name, rxq->q_id);
+			spnic_add_rq_to_rx_queue_list(nic_dev, rxq->q_id);
+			goto set_indir_failed;
+		}
+	}
+
+	if (nic_dev->num_rss == 0) {
+		err = spnic_set_vport_enable(nic_dev->hwdev, false);
+		if (err) {
+			PMD_DRV_LOG(ERR, "%s Disable vport failed, rc:%d",
+				    nic_dev->dev_name, err);
+			goto set_vport_failed;
+		}
+	}
+
+	/* unlock dev queue list switch  */
+	rte_spinlock_unlock(&nic_dev->queue_list_lock);
+
+	/* Send flush rq cmd to uCode */
+	err = spnic_set_rq_flush(nic_dev->hwdev, rxq->q_id);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Flush rq failed, eth_dev:%s, queue_idx:%d\n",
+			    nic_dev->dev_name, rxq->q_id);
+		goto rq_flush_failed;
+	}
+
+	err = spnic_poll_rq_empty(rxq);
+	if (err) {
+		spnic_dump_cqe_status(rxq, &cqe_done_cnt, &cqe_hole_cnt,
+				       &head_ci, &head_done);
+		PMD_DRV_LOG(ERR, "Poll rq empty timeout, eth_dev:%s, queue_idx:%d, "
+			    "mbuf_left:%d, cqe_done:%d, cqe_hole:%d, cqe[%d].done=%d\n",
+			    nic_dev->dev_name, rxq->q_id,
+			    rxq->q_depth - spnic_get_rq_free_wqebb(rxq),
+			    cqe_done_cnt, cqe_hole_cnt, head_ci, head_done);
+		goto poll_rq_failed;
+	}
+
+	return 0;
+
+poll_rq_failed:
+rq_flush_failed:
+	rte_spinlock_lock(&nic_dev->queue_list_lock);
+set_vport_failed:
+	spnic_add_rq_to_rx_queue_list(nic_dev, rxq->q_id);
+	if (nic_dev->rss_state == SPNIC_RSS_ENABLE)
+		(void)spnic_refill_indir_rqid(rxq);
+set_indir_failed:
+	rte_spinlock_unlock(&nic_dev->queue_list_lock);
+	spnic_dev_rx_queue_intr_enable(eth_dev, rxq->q_id);
+	return err;
+}
+
+int spnic_start_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq)
+{
+	struct spnic_nic_dev *nic_dev = rxq->nic_dev;
+	int err = 0;
+
+	/* lock dev queue switch  */
+	rte_spinlock_lock(&nic_dev->queue_list_lock);
+
+	spnic_add_rq_to_rx_queue_list(nic_dev, rxq->q_id);
+
+	spnic_rearm_rxq_mbuf(rxq);
+
+	if (nic_dev->rss_state == SPNIC_RSS_ENABLE) {
+		err = spnic_refill_indir_rqid(rxq);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Refill rq to indrect table failed, eth_dev:%s, queue_idx:%d err:%d\n",
+				    nic_dev->dev_name, rxq->q_id, err);
+			spnic_remove_rq_from_rx_queue_list(nic_dev, rxq->q_id);
+		}
+	}
+
+	if (rxq->nic_dev->num_rss == 1) {
+		err = spnic_set_vport_enable(nic_dev->hwdev, true);
+		if (err)
+			PMD_DRV_LOG(ERR, "%s enable vport failed, err:%d",
+				    nic_dev->dev_name, err);
+	}
+
+	/* unlock dev queue list switch  */
+	rte_spinlock_unlock(&nic_dev->queue_list_lock);
+
+	spnic_dev_rx_queue_intr_enable(eth_dev, rxq->q_id);
+
+	return err;
+}
 
 static inline uint64_t spnic_rx_vlan(uint32_t offload_type, uint32_t vlan_len,
 				      uint16_t *vlan_tci)
diff --git a/drivers/net/spnic/spnic_rx.h b/drivers/net/spnic/spnic_rx.h
index 79cb092db0..e0a7294230 100644
--- a/drivers/net/spnic/spnic_rx.h
+++ b/drivers/net/spnic/spnic_rx.h
@@ -273,6 +273,10 @@  void spnic_dump_cqe_status(struct spnic_rxq *rxq, u32 *cqe_done_cnt,
 			    u32 *cqe_hole_cnt, u32 *head_ci,
 			    u32 *head_done);
 
+int spnic_stop_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq);
+
+int spnic_start_rq(struct rte_eth_dev *eth_dev, struct spnic_rxq *rxq);
+
 int spnic_start_all_rqs(struct rte_eth_dev *eth_dev);
 
 u16 spnic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts);