From patchwork Mon Jun 5 09:06:30 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 128106 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0637E42C34; Mon, 5 Jun 2023 11:32:27 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7362F42D3A; Mon, 5 Jun 2023 11:32:07 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 2C680427F2 for ; Mon, 5 Jun 2023 11:32:04 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1685957524; x=1717493524; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=kznGVveTrSn3ervA6eVTU4RO6cs5COwN9VLkt6jwwi4=; b=UeLjdAdz1+er8iyERGWSsUhISVgvVUVAdpBWFceJEGdGfxqmOUExHwsC SZQTEobJCTxdHypp20v2o0sG14HBJFUfoyWaUkoCVeDYC7TStdkdqqNfz wII2dcxVP/WYMX95d84kgX7HYMkhGl2EYG6IBfzCbqN7K+iOupnTWsPvF tisvIYOjVSSDhq2u5nLYWlKwtVUlaEVVh9fqAPz0SJxuiq0hZNvzW2MiZ Mv/wAi8B95KPhUqclzOPEmkVJe2IIyZKgjbFja0V7XRS/4xIp0JL+wQMI JJycofISR3Habpyz1mAapbNc9PWoB10l3V9W1DfXEx8dCx92NRJx3hQfD Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10731"; a="355181050" X-IronPort-AV: E=Sophos;i="6.00,217,1681196400"; d="scan'208";a="355181050" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 05 Jun 2023 02:32:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10731"; a="741652023" X-IronPort-AV: E=Sophos;i="6.00,217,1681196400"; d="scan'208";a="741652023" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by orsmga001.jf.intel.com with ESMTP; 05 Jun 2023 02:31:58 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing Subject: [PATCH v9 03/14] net/cpfl: add haipin queue group during vport init Date: Mon, 5 Jun 2023 09:06:30 +0000 Message-Id: <20230605090641.36525-4-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230605090641.36525-1-beilei.xing@intel.com> References: <20230605061724.88130-1-beilei.xing@intel.com> <20230605090641.36525-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing This patch adds haipin queue group during vport init. Signed-off-by: Mingxia Liu Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 134 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h | 18 +++++ drivers/net/cpfl/cpfl_rxtx.h | 7 ++ 3 files changed, 159 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index e587155db6..7f34cd288c 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev) return 0; } +static int +cpfl_p2p_queue_grps_del(struct idpf_vport *vport) +{ + struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0}; + int ret = 0; + + qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID; + qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P; + ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids); + if (ret) + PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups"); + return ret; +} + static int cpfl_dev_close(struct rte_eth_dev *dev) { @@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev) struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter); cpfl_dev_stop(dev); + + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) + cpfl_p2p_queue_grps_del(vport); + idpf_vport_deinit(vport); + rte_free(cpfl_vport->p2p_q_chunks_info); adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id); adapter->cur_vport_nb--; @@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter) return vport_idx; } +static int +cpfl_p2p_q_grps_add(struct idpf_vport *vport, + struct virtchnl2_add_queue_groups *p2p_queue_grps_info, + uint8_t *p2p_q_vc_out_info) +{ + int ret; + + p2p_queue_grps_info->vport_id = vport->vport_id; + p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS; + p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES; + p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ; + p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES; + p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ; + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID; + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P; + p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0; + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0; + + ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to add p2p queue groups."); + return ret; + } + + return ret; +} + +static int +cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport, + struct virtchnl2_add_queue_groups *p2p_q_vc_out_info) +{ + struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info; + struct virtchnl2_queue_reg_chunks *vc_chunks_out; + int i, type; + + if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type != + VIRTCHNL2_QUEUE_GROUP_P2P) { + PMD_DRV_LOG(ERR, "Add queue group response mismatch."); + return -EINVAL; + } + + vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks; + + for (i = 0; i < vc_chunks_out->num_chunks; i++) { + type = vc_chunks_out->chunks[i].type; + switch (type) { + case VIRTCHNL2_QUEUE_TYPE_TX: + p2p_q_chunks_info->tx_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->tx_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->tx_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + case VIRTCHNL2_QUEUE_TYPE_RX: + p2p_q_chunks_info->rx_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->rx_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->rx_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: + p2p_q_chunks_info->tx_compl_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->tx_compl_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->tx_compl_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: + p2p_q_chunks_info->rx_buf_start_qid = + vc_chunks_out->chunks[i].start_queue_id; + p2p_q_chunks_info->rx_buf_qtail_start = + vc_chunks_out->chunks[i].qtail_reg_start; + p2p_q_chunks_info->rx_buf_qtail_spacing = + vc_chunks_out->chunks[i].qtail_reg_spacing; + break; + default: + PMD_DRV_LOG(ERR, "Unsupported queue type"); + break; + } + } + + return 0; +} + static int cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { @@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) struct cpfl_adapter_ext *adapter = param->adapter; /* for sending create vport virtchnl msg prepare */ struct virtchnl2_create_vport create_vport_info; + struct virtchnl2_add_queue_groups p2p_queue_grps_info; + uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0}; int ret = 0; dev->dev_ops = &cpfl_eth_dev_ops; @@ -1327,6 +1438,29 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr, &dev->data->mac_addrs[0]); + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) { + memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info)); + ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info); + if (ret != 0) { + PMD_INIT_LOG(WARNING, "Failed to add p2p queue group."); + return 0; + } + cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL, + sizeof(struct p2p_queue_chunks_info), 0); + if (cpfl_vport->p2p_q_chunks_info == NULL) { + PMD_INIT_LOG(WARNING, "Failed to allocate p2p queue info."); + cpfl_p2p_queue_grps_del(vport); + return 0; + } + ret = cpfl_p2p_queue_info_init(cpfl_vport, + (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info); + if (ret != 0) { + PMD_INIT_LOG(WARNING, "Failed to init p2p queue info."); + rte_free(cpfl_vport->p2p_q_chunks_info); + cpfl_p2p_queue_grps_del(vport); + } + } + return 0; err_mac_addrs: diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 81fe9ac4c3..666d46a44a 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -56,6 +56,7 @@ /* Device IDs */ #define IDPF_DEV_ID_CPF 0x1453 +#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100 struct cpfl_vport_param { struct cpfl_adapter_ext *adapter; @@ -69,8 +70,25 @@ struct cpfl_devargs { uint16_t req_vport_nb; }; +struct p2p_queue_chunks_info { + uint32_t tx_start_qid; + uint32_t rx_start_qid; + uint32_t tx_compl_start_qid; + uint32_t rx_buf_start_qid; + + uint64_t tx_qtail_start; + uint32_t tx_qtail_spacing; + uint64_t rx_qtail_start; + uint32_t rx_qtail_spacing; + uint64_t tx_compl_qtail_start; + uint32_t tx_compl_qtail_spacing; + uint64_t rx_buf_qtail_start; + uint32_t rx_buf_qtail_spacing; +}; + struct cpfl_vport { struct idpf_vport base; + struct p2p_queue_chunks_info *p2p_q_chunks_info; }; struct cpfl_adapter_ext { diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index bfb9ad97bd..1fe65778f0 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -13,6 +13,13 @@ #define CPFL_MIN_RING_DESC 32 #define CPFL_MAX_RING_DESC 4096 #define CPFL_DMA_MEM_ALIGN 4096 + +#define CPFL_MAX_P2P_NB_QUEUES 16 +#define CPFL_P2P_NB_RX_BUFQ 1 +#define CPFL_P2P_NB_TX_COMPLQ 1 +#define CPFL_P2P_NB_QUEUE_GRPS 1 +#define CPFL_P2P_QUEUE_GRP_ID 1 + /* Base address of the HW descriptor ring should be 128B aligned. */ #define CPFL_RING_BASE_ALIGN 128