From patchwork Wed Aug 3 11:31:03 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 114578 X-Patchwork-Delegate: andrew.rybchenko@oktetlabs.ru Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1A889A00C5; Wed, 3 Aug 2022 13:32:48 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B432F42BDE; Wed, 3 Aug 2022 13:31:43 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id CBEAC42BD4 for ; Wed, 3 Aug 2022 13:31:40 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1659526301; x=1691062301; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=tqA3ezylEkEAMA2KJ7vJ9+WNYHeSQw+Im0o4BmdGdho=; b=AVU+O2f5NnUsI4tbZUQYSYBFoIN50h1MCTUAmPDYBgVd7AzkVcxaYHnv hH+51pwoKLRjT9oxuGRMGx4mT4YYxY/StKS3hpPkB7bXAf8kv0acdIPtM BOGdloy9Ent7KLeYWVb8swmoMFvn/yQUPl9fgjoZZBryVjz+veNCHTyEr 8bX/IQxwnIqT5b4NbnfjkPnhW2St0+emOga0W6otKol+/r1Wp/V2PmNM2 1tlETNWEYCH0dNTYSV3xMyY0aLmu1z/nm5lgWHNQoYvOLjNA5brI6dENK G24a9VYFUxcyzfWLBNg72uCJCAYtYQK1cxInxEBQuFGsqLh87JU4LAYuu w==; X-IronPort-AV: E=McAfee;i="6400,9594,10427"; a="375948544" X-IronPort-AV: E=Sophos;i="5.93,214,1654585200"; d="scan'208";a="375948544" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Aug 2022 04:31:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,214,1654585200"; d="scan'208";a="692211133" Received: from dpdk-jf-ntb-v2.sh.intel.com ([10.67.118.246]) by FMSMGA003.fm.intel.com with ESMTP; 03 Aug 2022 04:31:38 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, junfeng.guo@intel.com Subject: [PATCH 12/13] net/idpf: support write back based on ITR expire Date: Wed, 3 Aug 2022 19:31:03 +0800 Message-Id: <20220803113104.1184059-13-junfeng.guo@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220803113104.1184059-1-junfeng.guo@intel.com> References: <20220803113104.1184059-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Force write-backs by setting WB_ON_ITR bit in DYN_CTL register, so that the packets can be received once at a time. Signed-off-by: Beilei Xing Signed-off-by: Junfeng Guo --- drivers/net/idpf/idpf_ethdev.c | 117 +++++++++++++++++++++++++++++++++ drivers/net/idpf/idpf_ethdev.h | 8 +++ drivers/net/idpf/idpf_vchnl.c | 108 ++++++++++++++++++++++++++++++ 3 files changed, 233 insertions(+) diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index b934488d0b..1e9564a5a9 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -512,6 +512,87 @@ idpf_dev_configure(struct rte_eth_dev *dev) return ret; } +static int +idpf_config_rx_queues_irqs(struct rte_eth_dev *dev) +{ + struct idpf_vport *vport = + (struct idpf_vport *)dev->data->dev_private; + struct virtchnl2_queue_vector *qv_map; + struct iecm_hw *hw = &adapter->hw; + uint32_t dynctl_reg_start; + uint32_t itrn_reg_start; + uint32_t dynctl_val, itrn_val; + uint16_t i; + + qv_map = rte_zmalloc("qv_map", + dev->data->nb_rx_queues * + sizeof(struct virtchnl2_queue_vector), 0); + if (!qv_map) { + PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", + dev->data->nb_rx_queues); + goto qv_map_alloc_err; + } + + /* Rx interrupt disabled, Map interrupt only for writeback */ + + /* The capability flags adapter->caps->other_caps here should be + * compared with bit VIRTCHNL2_CAP_WB_ON_ITR. The if condition should + * be updated when the FW can return correct flag bits. + */ + if (adapter->caps->other_caps) { + dynctl_reg_start = vport->recv_vectors->vchunks.vchunks->dynctl_reg_start; + itrn_reg_start = vport->recv_vectors->vchunks.vchunks->itrn_reg_start; + dynctl_val = IECM_READ_REG(hw, dynctl_reg_start); + PMD_DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val); + itrn_val = IECM_READ_REG(hw, itrn_reg_start); + PMD_DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val); + /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL + * register. WB_ON_ITR and INTENA are mutually exclusive + * bits. Setting WB_ON_ITR bits means TX and RX Descs + * are writen back based on ITR expiration irrespective + * of INTENA setting. + */ + /* TBD: need to tune INTERVAL value for better performance. */ + if (itrn_val) + IECM_WRITE_REG(hw, + dynctl_reg_start, + VIRTCHNL2_ITR_IDX_0 << + PF_GLINT_DYN_CTL_ITR_INDX_S | + PF_GLINT_DYN_CTL_WB_ON_ITR_M | + itrn_val << + PF_GLINT_DYN_CTL_INTERVAL_S); + else + IECM_WRITE_REG(hw, + dynctl_reg_start, + VIRTCHNL2_ITR_IDX_0 << + PF_GLINT_DYN_CTL_ITR_INDX_S | + PF_GLINT_DYN_CTL_WB_ON_ITR_M | + IDPF_DFLT_INTERVAL << + PF_GLINT_DYN_CTL_INTERVAL_S); + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + /* map all queues to the same vector */ + qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i; + qv_map[i].vector_id = + vport->recv_vectors->vchunks.vchunks->start_vector_id; + } + vport->qv_map = qv_map; + + if (idpf_config_irq_map_unmap(vport, true)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + goto config_irq_map_err; + } + + return 0; + +config_irq_map_err: + rte_free(vport->qv_map); + vport->qv_map = NULL; + +qv_map_alloc_err: + return -1; +} + static int idpf_start_queues(struct rte_eth_dev *dev) { @@ -550,6 +631,9 @@ idpf_dev_start(struct rte_eth_dev *dev) { struct idpf_vport *vport = (struct idpf_vport *)dev->data->dev_private; + uint16_t num_allocated_vectors = + adapter->caps->num_allocated_vectors; + uint16_t req_vecs_num; PMD_INIT_FUNC_TRACE(); @@ -562,6 +646,23 @@ idpf_dev_start(struct rte_eth_dev *dev) vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD; + req_vecs_num = IDPF_DFLT_Q_VEC_NUM; + if (req_vecs_num + used_vecs_num > num_allocated_vectors) { + PMD_DRV_LOG(ERR, "The accumulated request vectors' number should be less than %d", + num_allocated_vectors); + goto err_mtu; + } + if (idpf_alloc_vectors(vport, req_vecs_num)) { + PMD_DRV_LOG(ERR, "Failed to allocate interrupt vectors"); + goto err_mtu; + } + used_vecs_num += req_vecs_num; + + if (idpf_config_rx_queues_irqs(dev)) { + PMD_DRV_LOG(ERR, "Failed to configure irqs"); + goto err_mtu; + } + if (idpf_start_queues(dev)) { PMD_DRV_LOG(ERR, "Failed to start queues"); goto err_mtu; @@ -603,6 +704,12 @@ idpf_dev_stop(struct rte_eth_dev *dev) idpf_stop_queues(dev); + if (idpf_config_irq_map_unmap(vport, false)) + PMD_DRV_LOG(ERR, "config interrupt unmapping failed"); + + if (idpf_dealloc_vectors(vport)) + PMD_DRV_LOG(ERR, "deallocate interrupt vectors failed"); + vport->stopped = 1; dev->data->dev_started = 0; @@ -631,6 +738,16 @@ idpf_dev_close(struct rte_eth_dev *dev) vport->rss_key = NULL; } + if (vport->recv_vectors) { + rte_free(vport->recv_vectors); + vport->recv_vectors = NULL; + } + + if (vport->qv_map) { + rte_free(vport->qv_map); + vport->qv_map = NULL; + } + return 0; } diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h index c0cbf4c3c6..32520c03bb 100644 --- a/drivers/net/idpf/idpf_ethdev.h +++ b/drivers/net/idpf/idpf_ethdev.h @@ -119,6 +119,11 @@ struct idpf_vport { uint8_t *rss_key; uint64_t rss_hf; + /* MSIX info*/ + struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */ + uint16_t max_vectors; + struct virtchnl2_alloc_vectors *recv_vectors; + /* Chunk info */ struct idpf_chunks_info chunks_info; @@ -229,6 +234,9 @@ int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable); int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable); int idpf_query_stats(struct idpf_vport *vport, struct virtchnl2_vport_stats **pstats); +int idpf_config_irq_map_unmap(struct idpf_vport *vport, bool map); +int idpf_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors); +int idpf_dealloc_vectors(struct idpf_vport *vport); #endif /* _IDPF_ETHDEV_H_ */ diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c index 563f8f649e..bfb3b08465 100644 --- a/drivers/net/idpf/idpf_vchnl.c +++ b/drivers/net/idpf/idpf_vchnl.c @@ -220,6 +220,10 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args) case VIRTCHNL2_OP_ENABLE_VPORT: case VIRTCHNL2_OP_DISABLE_VPORT: case VIRTCHNL2_OP_GET_STATS: + case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: + case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: + case VIRTCHNL2_OP_ALLOC_VECTORS: + case VIRTCHNL2_OP_DEALLOC_VECTORS: /* for init virtchnl ops, need to poll the response */ do { result = idpf_read_msg_from_ipf(adapter, @@ -900,6 +904,110 @@ idpf_config_txq(struct idpf_vport *vport, uint16_t txq_id) return err; } +int +idpf_config_irq_map_unmap(struct idpf_vport *vport, bool map) +{ + struct virtchnl2_queue_vector_maps *map_info; + struct virtchnl2_queue_vector *vecmap; + uint16_t nb_rxq = vport->dev_data->nb_rx_queues; + struct idpf_cmd_info args; + int len, i, err = 0; + + len = sizeof(struct virtchnl2_queue_vector_maps) + + (nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector); + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->vport_id = vport->vport_id; + map_info->num_qv_maps = nb_rxq; + for (i = 0; i < nb_rxq; i++) { + vecmap = &map_info->qv_maps[i]; + vecmap->queue_id = vport->qv_map[i].queue_id; + vecmap->vector_id = vport->qv_map[i].vector_id; + vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0; + vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX; + } + + args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR : + VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR; + args.in_args = (u8 *)map_info; + args.in_args_size = len; + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + err = idpf_execute_vc_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR", + map ? "MAP" : "UNMAP"); + + rte_free(map_info); + return err; +} + +int +idpf_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors) +{ + struct virtchnl2_alloc_vectors *alloc_vec; + struct idpf_cmd_info args; + int err, len; + + len = sizeof(struct virtchnl2_alloc_vectors) + + (num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk); + alloc_vec = rte_zmalloc("alloc_vec", len, 0); + if (!alloc_vec) + return -ENOMEM; + + alloc_vec->num_vectors = num_vectors; + + args.ops = VIRTCHNL2_OP_ALLOC_VECTORS; + args.in_args = (u8 *)alloc_vec; + args.in_args_size = sizeof(struct virtchnl2_alloc_vectors); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + err = idpf_execute_vc_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS"); + + if (!vport->recv_vectors) { + vport->recv_vectors = rte_zmalloc("recv_vectors", len, 0); + if (!vport->recv_vectors) { + rte_free(alloc_vec); + return -ENOMEM; + } + } + + rte_memcpy(vport->recv_vectors, args.out_buffer, len); + rte_free(alloc_vec); + return err; +} + +int +idpf_dealloc_vectors(struct idpf_vport *vport) +{ + struct virtchnl2_alloc_vectors *alloc_vec; + struct virtchnl2_vector_chunks *vcs; + struct idpf_cmd_info args; + int err, len; + + alloc_vec = vport->recv_vectors; + vcs = &alloc_vec->vchunks; + + len = sizeof(struct virtchnl2_vector_chunks) + + (vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk); + + args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS; + args.in_args = (u8 *)vcs; + args.in_args_size = len; + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + err = idpf_execute_vc_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS"); + + return err; +} + static int idpf_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid, uint32_t type, bool on)