From patchwork Fri Apr 21 06:50:42 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 126348 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 836CC429A9; Fri, 21 Apr 2023 09:14:22 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 27B0242D2C; Fri, 21 Apr 2023 09:14:00 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 3E76742D29 for ; Fri, 21 Apr 2023 09:13:58 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682061238; x=1713597238; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=H24FaLJF++6WE73hgUI4dHlg31lRf4gwNyeJOhTcvrM=; b=ZK5nKtfgYdxmK2T2gHMbGS7kvGwHUduD1xh+GRTsImKvkbrhgr1RzOn9 d+b8xpw1zKu7cfC9G4Weg76po9iLKyp2jzPXsVd0LBMDw8b0IjKMXwf/H XDC0bYKPGVYCiv9+v6M06YBzJ+Qvp5b0bx9ZM944HIfakPm6SfPOw+5dc hvwb1xcJ5RTxqTiuul0rx7Lyig/VAxBXEHBO402NCop3ro4n99qPH3tn0 W2QO11UV7VoJIr6CMjGes0qL7wf/HlKNr7D4UfhalQVeKBajY4RHkzbyA qu3kjwtGBjDQdXw8PAQ3997jAzBtnWC4Ncmdg1UcqRFXIlw+4TgRmU8D3 w==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="326260057" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="326260057" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 00:13:57 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="722669112" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="722669112" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by orsmga008.jf.intel.com with ESMTP; 21 Apr 2023 00:13:56 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing Subject: [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init Date: Fri, 21 Apr 2023 06:50:42 +0000 Message-Id: <20230421065048.106899-5-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230421065048.106899-1-beilei.xing@intel.com> References: <20230421065048.106899-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing This patch adds haipin queue group during vpotr init. Signed-off-by: Mingxia Liu Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 125 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h | 17 +++++ drivers/net/cpfl/cpfl_rxtx.h | 4 ++ 3 files changed, 146 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 114fc18f5f..ad5ddebd3a 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -856,6 +856,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev) return 0; } +static int +cpfl_p2p_queue_grps_del(struct idpf_vport *vport) +{ + struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0}; + int ret = 0; + + qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID; + qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P; + ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids); + if (ret) + PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups"); + return ret; +} + static int cpfl_dev_close(struct rte_eth_dev *dev) { @@ -864,6 +878,9 @@ cpfl_dev_close(struct rte_eth_dev *dev) struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter); cpfl_dev_stop(dev); + + cpfl_p2p_queue_grps_del(vport); + idpf_vport_deinit(vport); adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id); @@ -1350,6 +1367,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter) return vport_idx; } +static int +cpfl_p2p_q_grps_add(struct idpf_vport *vport, + struct virtchnl2_add_queue_groups *p2p_queue_grps_info, + uint8_t *p2p_q_vc_out_info) +{ + int ret; + + p2p_queue_grps_info->vport_id = vport->vport_id; + p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS; + p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES; + p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ; + p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES; + p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ; + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID; + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P; + p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0; + + ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to add p2p queue groups."); + return ret; + } + + return ret; +} + +static int +cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport, + struct virtchnl2_add_queue_groups *p2p_q_vc_out_info) +{ + struct p2p_queue_chunks_info *p2p_q_chunks_info = &cpfl_vport->p2p_q_chunks_info; + struct virtchnl2_queue_reg_chunks *vc_chunks_out; + int i, type; + + if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type != + VIRTCHNL2_QUEUE_GROUP_P2P) { + PMD_DRV_LOG(ERR, "Add queue group response mismatch."); + return -EINVAL; + } + + vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks; + + for (i = 0; i < vc_chunks_out->num_chunks; i++) { + type = vc_chunks_out->chunks[i].type; + switch (type) { + case VIRTCHNL2_QUEUE_TYPE_TX: + p2p_q_chunks_info->tx_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->tx_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->tx_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + case VIRTCHNL2_QUEUE_TYPE_RX: + p2p_q_chunks_info->rx_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->rx_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->rx_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: + p2p_q_chunks_info->tx_compl_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->tx_compl_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->tx_compl_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: + p2p_q_chunks_info->rx_buf_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->rx_buf_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->rx_buf_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported queue type"); + break; + } + } + + return 0; +} + static int cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { @@ -1359,6 +1466,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) struct cpfl_adapter_ext *adapter = param->adapter; /* for sending create vport virtchnl msg prepare */ struct virtchnl2_create_vport create_vport_info; + struct virtchnl2_add_queue_groups p2p_queue_grps_info; + uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0}; int ret = 0; dev->dev_ops = &cpfl_eth_dev_ops; @@ -1380,6 +1489,19 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) goto err; } + memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info)); + ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to add p2p queue group."); + goto err_q_grps_add; + } + ret = cpfl_p2p_queue_info_init(cpfl_vport, + (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to init p2p queue info."); + goto err_p2p_qinfo_init; + } + adapter->vports[param->idx] = cpfl_vport; adapter->cur_vports |= RTE_BIT32(param->devarg_id); adapter->cur_vport_nb++; @@ -1397,6 +1519,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) return 0; err_mac_addrs: +err_p2p_qinfo_init: + cpfl_p2p_queue_grps_del(vport); +err_q_grps_add: adapter->vports[param->idx] = NULL; /* reset */ idpf_vport_deinit(vport); adapter->cur_vports &= ~RTE_BIT32(param->devarg_id); diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 81fe9ac4c3..5e2e7a1bfb 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -69,8 +69,25 @@ struct cpfl_devargs { uint16_t req_vport_nb; }; +struct p2p_queue_chunks_info { + uint32_t tx_start_qid; + uint32_t rx_start_qid; + uint32_t tx_compl_start_qid; + uint32_t rx_buf_start_qid; + + uint64_t tx_qtail_start; + uint32_t tx_qtail_spacing; + uint64_t rx_qtail_start; + uint32_t rx_qtail_spacing; + uint64_t tx_compl_qtail_start; + uint32_t tx_compl_qtail_spacing; + uint64_t rx_buf_qtail_start; + uint32_t rx_buf_qtail_spacing; +}; + struct cpfl_vport { struct idpf_vport base; + struct p2p_queue_chunks_info p2p_q_chunks_info; }; struct cpfl_adapter_ext { diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index b2b3537d10..3a87a1f4b3 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -17,6 +17,10 @@ #define CPFL_MAX_HAIRPINQ_TX_2_RX 1 #define CPFL_MAX_HAIRPINQ_NB_DESC 1024 #define CPFL_MAX_P2P_NB_QUEUES 16 +#define CPFL_P2P_NB_RX_BUFQ 1 +#define CPFL_P2P_NB_TX_COMPLQ 1 +#define CPFL_P2P_NB_QUEUE_GRPS 1 +#define CPFL_P2P_QUEUE_GRP_ID 1 /* Base address of the HW descriptor ring should be 128B aligned. */ #define CPFL_RING_BASE_ALIGN 128