[08/10] net/cpfl: enable write back based on ITR expire

Message ID 20230421065048.106899-9-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series add hairpin queue support |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Xing, Beilei April 21, 2023, 6:50 a.m. UTC
  From: Beilei Xing <beilei.xing@intel.com>

This patch enabls write back based on ITR expire
(WR_ON_ITR) for hairpin queue.

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
 drivers/common/idpf/idpf_common_device.h |  4 ++
 drivers/common/idpf/version.map          |  1 +
 drivers/net/cpfl/cpfl_ethdev.c           | 13 +++-
 4 files changed, 92 insertions(+), 1 deletion(-)
  

Patch

diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index 3b58bdd41e..86a4a54f9b 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -559,6 +559,81 @@  idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
 	return ret;
 }
 
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_queue_vector *qv_map;
+	struct idpf_hw *hw = &adapter->hw;
+	uint32_t dynctl_val, itrn_val;
+	uint32_t dynctl_reg_start;
+	uint32_t itrn_reg_start;
+	uint16_t i;
+	int ret;
+
+	qv_map = rte_zmalloc("qv_map",
+			     nb_rx_queues *
+			     sizeof(struct virtchnl2_queue_vector), 0);
+	if (qv_map == NULL) {
+		DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+			nb_rx_queues);
+		ret = -ENOMEM;
+		goto qv_map_alloc_err;
+	}
+
+	/* Rx interrupt disabled, Map interrupt only for writeback */
+
+	/* The capability flags adapter->caps.other_caps should be
+	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+	 * condition should be updated when the FW can return the
+	 * correct flag bits.
+	 */
+	dynctl_reg_start =
+		vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+	itrn_reg_start =
+		vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+	dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+	DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+	itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+	DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+	/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+	 * register. WB_ON_ITR and INTENA are mutually exclusive
+	 * bits. Setting WB_ON_ITR bits means TX and RX Descs
+	 * are written back based on ITR expiration irrespective
+	 * of INTENA setting.
+	 */
+	/* TBD: need to tune INTERVAL value for better performance. */
+	itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+	dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
+		     PF_GLINT_DYN_CTL_ITR_INDX_S |
+		     PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+		     itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+	IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+	for (i = 0; i < nb_rx_queues; i++) {
+		/* map all queues to the same vector */
+		qv_map[i].queue_id = qids[i];
+		qv_map[i].vector_id =
+			vport->recv_vectors->vchunks.vchunks->start_vector_id;
+	}
+	vport->qv_map = qv_map;
+
+	ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+	if (ret != 0) {
+		DRV_LOG(ERR, "config interrupt mapping failed");
+		goto config_irq_map_err;
+	}
+
+	return 0;
+
+config_irq_map_err:
+	rte_free(vport->qv_map);
+	vport->qv_map = NULL;
+
+qv_map_alloc_err:
+	return ret;
+}
+
 int
 idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
 {
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 7cf2355bc9..1aa9d9516f 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -212,5 +212,9 @@  int idpf_vport_info_init(struct idpf_vport *vport,
 			 struct virtchnl2_create_vport *vport_info);
 __rte_internal
 void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+				      uint32_t *qids,
+				      uint16_t nb_rx_queues);
 
 #endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0e87dba2ae..e3a7ef0daa 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -74,6 +74,7 @@  INTERNAL {
 	idpf_vport_info_init;
 	idpf_vport_init;
 	idpf_vport_irq_map_config;
+	idpf_vport_irq_map_config_by_qids;
 	idpf_vport_irq_unmap_config;
 	idpf_vport_rss_config;
 	idpf_vport_stats_update;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index f154c83f27..008686bfd4 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -730,11 +730,22 @@  cpfl_dev_configure(struct rte_eth_dev *dev)
 static int
 cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
 {
+	uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
 	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
 	struct idpf_vport *vport = &cpfl_vport->base;
 	uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+	struct cpfl_rx_queue *cpfl_rxq;
+	int i;
 
-	return idpf_vport_irq_map_config(vport, nb_rx_queues);
+	for (i = 0; i < nb_rx_queues; i++) {
+		cpfl_rxq = dev->data->rx_queues[i];
+		if (cpfl_rxq->hairpin_info.hairpin_q)
+			qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid,
+						  (i - cpfl_vport->nb_data_rxq));
+		else
+			qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+	}
+	return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
 }
 
 /* Update hairpin_info for dev's tx hairpin queue */