[RFC] net/i40e: enable multi-queue Rx interrupt for VF

Message ID 20190902194535.2699-1-lunyuanx.cui@intel.com (mailing list archive)
State Superseded, archived
Delegated to: xiaolong ye
Headers
Series [RFC] net/i40e: enable multi-queue Rx interrupt for VF |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Cui, LunyuanX Sept. 2, 2019, 7:45 p.m. UTC
  This patch enables VF to support multi-queue Rx interrupt.

Current implementation is that only one Rx queue can support interrupt,
because all queues are mapped in the same vector id in vfio_enable_msix().
So VF can not support multi-queue Rx interrupt in the interrupt mode.

In this patch, if the packet I/O interrupt on datapath is enabled
(rte_intr_dp_is_en(intr_handle) is true),
we map different interrupt vectors to each queue
and send this map to PF everytime.
After PF sets the map to the register, all Rx queue interrupts will be received.

In addition, because of the i40e performance in ioctl(),
the maximum supported interrupt vector and the
maximum supported queue number are 4.
So if queue number is more than 4,we should set other queue vector id as 4.
Without this operation, i40e driver will fail to start.
Even though it limits the maximum of the interrupt vector,
when queue number is more than 4, i40e driver can be started,
but all Rx queue can not support interrupt.

Signed-off-by: lunyuan.cui <lunyuanx.cui@intel.com>
---
 drivers/net/i40e/i40e_ethdev_vf.c | 80 ++++++++++++++++++++++---------
 1 file changed, 57 insertions(+), 23 deletions(-)
  

Patch

diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 308fb9835..9d1af3804 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -645,6 +645,8 @@  i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
 	return ret;
 }
 
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
+
 static int
 i40evf_config_irq_map(struct rte_eth_dev *dev)
 {
@@ -655,38 +657,70 @@  i40evf_config_irq_map(struct rte_eth_dev *dev)
 	struct virtchnl_irq_map_info *map_info;
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	uint16_t nb_msix = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
 	uint32_t vector_id;
 	int i, err;
 
 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
-	    rte_intr_allow_others(intr_handle))
+	    rte_intr_allow_others(intr_handle)) {
+		nb_msix = RTE_MIN(intr_handle->nb_efd, nb_msix);
 		vector_id = I40E_RX_VEC_START;
-	else
+	} else
 		vector_id = I40E_MISC_VEC_ID;
 
-	map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
-	map_info->num_vectors = 1;
-	map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
-	map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
-	/* Alway use default dynamic MSIX interrupt */
-	map_info->vecmap[0].vector_id = vector_id;
-	/* Don't map any tx queue */
-	map_info->vecmap[0].txq_map = 0;
-	map_info->vecmap[0].rxq_map = 0;
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		map_info->vecmap[0].rxq_map |= 1 << i;
-		if (rte_intr_dp_is_en(intr_handle))
+	if (rte_intr_dp_is_en(intr_handle)) {
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			memset(cmd_buffer, 0, sizeof(cmd_buffer));
+			map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
+			map_info->num_vectors = 1;
+			map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+			map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
+			/* Alway use default dynamic MSIX interrupt */
+			map_info->vecmap[0].vector_id = vector_id;
+			/* Don't map any tx queue */
+			map_info->vecmap[0].txq_map = 0;
+			map_info->vecmap[0].rxq_map = 0;
+			map_info->vecmap[0].rxq_map |= 1 << i;
+
 			intr_handle->intr_vec[i] = vector_id;
-	}
 
-	args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
-	args.in_args = (u8 *)cmd_buffer;
-	args.in_args_size = sizeof(cmd_buffer);
-	args.out_buffer = vf->aq_resp;
-	args.out_size = I40E_AQ_BUF_SZ;
-	err = i40evf_execute_vf_cmd(dev, &args);
-	if (err)
-		PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
+			args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+			args.in_args = (u8 *)cmd_buffer;
+			args.in_args_size = sizeof(cmd_buffer);
+			args.out_buffer = vf->aq_resp;
+			args.out_size = I40E_AQ_BUF_SZ;
+			err = i40evf_execute_vf_cmd(dev, &args);
+			if (err) {
+				PMD_DRV_LOG(ERR, "fail to execute command "
+					"OP_ADD_ETHER_ADDRESS");
+				return err;
+			}
+			if ((vector_id != I40E_MISC_VEC_ID) && (nb_msix > 1))
+				vector_id++;
+			nb_msix--;
+	} else {
+		map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
+		map_info->num_vectors = 1;
+		map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
+		map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
+		/* Alway use default dynamic MSIX interrupt */
+		map_info->vecmap[0].vector_id = vector_id;
+		/* Don't map any tx queue */
+		map_info->vecmap[0].txq_map = 0;
+		map_info->vecmap[0].rxq_map = 0;
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			map_info->vecmap[0].rxq_map |= 1 << i;
+
+		args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+		args.in_args = (u8 *)cmd_buffer;
+		args.in_args_size = sizeof(cmd_buffer);
+		args.out_buffer = vf->aq_resp;
+		args.out_size = I40E_AQ_BUF_SZ;
+		err = i40evf_execute_vf_cmd(dev, &args);
+		if (err)
+			PMD_DRV_LOG(ERR,
+				"fail to execute command OP_ENABLE_QUEUES");
+	}
 
 	return err;
 }