[12/13] net/idpf: support write back based on ITR expire

Message ID 20220803113104.1184059-13-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Junfeng Guo Aug. 3, 2022, 11:31 a.m. UTC
  Force write-backs by setting WB_ON_ITR bit in DYN_CTL register,
so that the packets can be received once at a time.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 117 +++++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_ethdev.h |   8 +++
 drivers/net/idpf/idpf_vchnl.c  | 108 ++++++++++++++++++++++++++++++
 3 files changed, 233 insertions(+)
  

Patch

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index b934488d0b..1e9564a5a9 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -512,6 +512,87 @@  idpf_dev_configure(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static int
+idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)
+{
+	struct idpf_vport *vport =
+		(struct idpf_vport *)dev->data->dev_private;
+	struct virtchnl2_queue_vector *qv_map;
+	struct iecm_hw *hw = &adapter->hw;
+	uint32_t dynctl_reg_start;
+	uint32_t itrn_reg_start;
+	uint32_t dynctl_val, itrn_val;
+	uint16_t i;
+
+	qv_map = rte_zmalloc("qv_map",
+			dev->data->nb_rx_queues *
+			sizeof(struct virtchnl2_queue_vector), 0);
+	if (!qv_map) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+			    dev->data->nb_rx_queues);
+		goto qv_map_alloc_err;
+	}
+
+	/* Rx interrupt disabled, Map interrupt only for writeback */
+
+	/* The capability flags adapter->caps->other_caps here should be
+	 * compared with bit VIRTCHNL2_CAP_WB_ON_ITR. The if condition should
+	 * be updated when the FW can return correct flag bits.
+	 */
+	if (adapter->caps->other_caps) {
+		dynctl_reg_start = vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+		itrn_reg_start = vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+		dynctl_val = IECM_READ_REG(hw, dynctl_reg_start);
+		PMD_DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+		itrn_val = IECM_READ_REG(hw, itrn_reg_start);
+		PMD_DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+		/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+		 * register. WB_ON_ITR and INTENA are mutually exclusive
+		 * bits. Setting WB_ON_ITR bits means TX and RX Descs
+		 * are writen back based on ITR expiration irrespective
+		 * of INTENA setting.
+		 */
+		/* TBD: need to tune INTERVAL value for better performance. */
+		if (itrn_val)
+			IECM_WRITE_REG(hw,
+				       dynctl_reg_start,
+				       VIRTCHNL2_ITR_IDX_0  <<
+				       PF_GLINT_DYN_CTL_ITR_INDX_S |
+				       PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+				       itrn_val <<
+				       PF_GLINT_DYN_CTL_INTERVAL_S);
+		else
+			IECM_WRITE_REG(hw,
+				       dynctl_reg_start,
+				       VIRTCHNL2_ITR_IDX_0  <<
+				       PF_GLINT_DYN_CTL_ITR_INDX_S |
+				       PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+				       IDPF_DFLT_INTERVAL <<
+				       PF_GLINT_DYN_CTL_INTERVAL_S);
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		/* map all queues to the same vector */
+		qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
+		qv_map[i].vector_id =
+			vport->recv_vectors->vchunks.vchunks->start_vector_id;
+	}
+	vport->qv_map = qv_map;
+
+	if (idpf_config_irq_map_unmap(vport, true)) {
+		PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+		goto config_irq_map_err;
+	}
+
+	return 0;
+
+config_irq_map_err:
+	rte_free(vport->qv_map);
+	vport->qv_map = NULL;
+
+qv_map_alloc_err:
+	return -1;
+}
+
 static int
 idpf_start_queues(struct rte_eth_dev *dev)
 {
@@ -550,6 +631,9 @@  idpf_dev_start(struct rte_eth_dev *dev)
 {
 	struct idpf_vport *vport =
 		(struct idpf_vport *)dev->data->dev_private;
+	uint16_t num_allocated_vectors =
+		adapter->caps->num_allocated_vectors;
+	uint16_t req_vecs_num;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -562,6 +646,23 @@  idpf_dev_start(struct rte_eth_dev *dev)
 
 	vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
 
+	req_vecs_num = IDPF_DFLT_Q_VEC_NUM;
+	if (req_vecs_num + used_vecs_num > num_allocated_vectors) {
+		PMD_DRV_LOG(ERR, "The accumulated request vectors' number should be less than %d",
+			    num_allocated_vectors);
+		goto err_mtu;
+	}
+	if (idpf_alloc_vectors(vport, req_vecs_num)) {
+		PMD_DRV_LOG(ERR, "Failed to allocate interrupt vectors");
+		goto err_mtu;
+	}
+	used_vecs_num += req_vecs_num;
+
+	if (idpf_config_rx_queues_irqs(dev)) {
+		PMD_DRV_LOG(ERR, "Failed to configure irqs");
+		goto err_mtu;
+	}
+
 	if (idpf_start_queues(dev)) {
 		PMD_DRV_LOG(ERR, "Failed to start queues");
 		goto err_mtu;
@@ -603,6 +704,12 @@  idpf_dev_stop(struct rte_eth_dev *dev)
 
 	idpf_stop_queues(dev);
 
+	if (idpf_config_irq_map_unmap(vport, false))
+		PMD_DRV_LOG(ERR, "config interrupt unmapping failed");
+
+	if (idpf_dealloc_vectors(vport))
+		PMD_DRV_LOG(ERR, "deallocate interrupt vectors failed");
+
 	vport->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -631,6 +738,16 @@  idpf_dev_close(struct rte_eth_dev *dev)
 		vport->rss_key = NULL;
 	}
 
+	if (vport->recv_vectors) {
+		rte_free(vport->recv_vectors);
+		vport->recv_vectors = NULL;
+	}
+
+	if (vport->qv_map) {
+		rte_free(vport->qv_map);
+		vport->qv_map = NULL;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index c0cbf4c3c6..32520c03bb 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -119,6 +119,11 @@  struct idpf_vport {
 	uint8_t *rss_key;
 	uint64_t rss_hf;
 
+	/* MSIX info*/
+	struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
+	uint16_t max_vectors;
+	struct virtchnl2_alloc_vectors *recv_vectors;
+
 	/* Chunk info */
 	struct idpf_chunks_info chunks_info;
 
@@ -229,6 +234,9 @@  int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable);
 int idpf_query_stats(struct idpf_vport *vport,
 			struct virtchnl2_vport_stats **pstats);
+int idpf_config_irq_map_unmap(struct idpf_vport *vport, bool map);
+int idpf_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors);
+int idpf_dealloc_vectors(struct idpf_vport *vport);
 
 #endif /* _IDPF_ETHDEV_H_ */
 
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 563f8f649e..bfb3b08465 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -220,6 +220,10 @@  idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
 	case VIRTCHNL2_OP_ENABLE_VPORT:
 	case VIRTCHNL2_OP_DISABLE_VPORT:
 	case VIRTCHNL2_OP_GET_STATS:
+	case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+	case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+	case VIRTCHNL2_OP_ALLOC_VECTORS:
+	case VIRTCHNL2_OP_DEALLOC_VECTORS:
 		/* for init virtchnl ops, need to poll the response */
 		do {
 			result = idpf_read_msg_from_ipf(adapter,
@@ -900,6 +904,110 @@  idpf_config_txq(struct idpf_vport *vport, uint16_t txq_id)
 	return err;
 }
 
+int
+idpf_config_irq_map_unmap(struct idpf_vport *vport, bool map)
+{
+	struct virtchnl2_queue_vector_maps *map_info;
+	struct virtchnl2_queue_vector *vecmap;
+	uint16_t nb_rxq = vport->dev_data->nb_rx_queues;
+	struct idpf_cmd_info args;
+	int len, i, err = 0;
+
+	len = sizeof(struct virtchnl2_queue_vector_maps) +
+		(nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector);
+
+	map_info = rte_zmalloc("map_info", len, 0);
+	if (!map_info)
+		return -ENOMEM;
+
+	map_info->vport_id = vport->vport_id;
+	map_info->num_qv_maps = nb_rxq;
+	for (i = 0; i < nb_rxq; i++) {
+		vecmap = &map_info->qv_maps[i];
+		vecmap->queue_id = vport->qv_map[i].queue_id;
+		vecmap->vector_id = vport->qv_map[i].vector_id;
+		vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
+		vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
+	}
+
+	args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
+		VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+	args.in_args = (u8 *)map_info;
+	args.in_args_size = len;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
+			    map ? "MAP" : "UNMAP");
+
+	rte_free(map_info);
+	return err;
+}
+
+int
+idpf_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)
+{
+	struct virtchnl2_alloc_vectors *alloc_vec;
+	struct idpf_cmd_info args;
+	int err, len;
+
+	len = sizeof(struct virtchnl2_alloc_vectors) +
+		(num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk);
+	alloc_vec = rte_zmalloc("alloc_vec", len, 0);
+	if (!alloc_vec)
+		return -ENOMEM;
+
+	alloc_vec->num_vectors = num_vectors;
+
+	args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
+	args.in_args = (u8 *)alloc_vec;
+	args.in_args_size = sizeof(struct virtchnl2_alloc_vectors);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
+
+	if (!vport->recv_vectors) {
+		vport->recv_vectors = rte_zmalloc("recv_vectors", len, 0);
+		if (!vport->recv_vectors) {
+			rte_free(alloc_vec);
+			return -ENOMEM;
+		}
+	}
+
+	rte_memcpy(vport->recv_vectors, args.out_buffer, len);
+	rte_free(alloc_vec);
+	return err;
+}
+
+int
+idpf_dealloc_vectors(struct idpf_vport *vport)
+{
+	struct virtchnl2_alloc_vectors *alloc_vec;
+	struct virtchnl2_vector_chunks *vcs;
+	struct idpf_cmd_info args;
+	int err, len;
+
+	alloc_vec = vport->recv_vectors;
+	vcs = &alloc_vec->vchunks;
+
+	len = sizeof(struct virtchnl2_vector_chunks) +
+		(vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk);
+
+	args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
+	args.in_args = (u8 *)vcs;
+	args.in_args_size = len;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
+
+	return err;
+}
+
 static int
 idpf_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
 		       uint32_t type, bool on)