From patchwork Tue Jan 17 08:06:16 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 122169 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AD76D423FA; Tue, 17 Jan 2023 09:31:59 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 931E542D66; Tue, 17 Jan 2023 09:31:08 +0100 (CET) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id 9135842D3D for ; Tue, 17 Jan 2023 09:31:06 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1673944266; x=1705480266; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=S2ceBQVkAsmTKYRo3s5qee6R4u3mqFumtGx7Im8Ge1I=; b=aMXQjoS3efTIeoq2nSSaEhUD+/TvBV9/6YGaa5ygzjE8Sah/FRE5zZ43 MW8x7RRvkoXG1g3ShsTOeUnSErj2FYsx59vV07TxSWp0jzYDE7a7BSo5P Zk7LNvob3I7THO94lkBgYIWJWbRYgb6Eip/Pri1/YGpEtKSkvUxEJjQHu 65b593Ss6AU/rNHI9hVrgqwMJ0qrNIcXBvITbftNUeAei+mqrMMK/FKlM 8h9TjweyZ8hyLUro3Xl8bzhPBcP7D/eC00gYQPIxD1aobr7afdflNohsW oU8MHGLfhBVLqHa9T1+oon0qClYXQKZ6WD+dD+L+JsOUxXah5ZDdlkRai g==; X-IronPort-AV: E=McAfee;i="6500,9779,10592"; a="324695309" X-IronPort-AV: E=Sophos;i="5.97,222,1669104000"; d="scan'208";a="324695309" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Jan 2023 00:31:06 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10592"; a="689723632" X-IronPort-AV: E=Sophos;i="5.97,222,1669104000"; d="scan'208";a="689723632" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by orsmga008.jf.intel.com with ESMTP; 17 Jan 2023 00:31:04 -0800 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, qi.z.zhang@intel.com, Beilei Xing Subject: [PATCH v4 09/15] common/idpf: add vport info initialization Date: Tue, 17 Jan 2023 08:06:16 +0000 Message-Id: <20230117080622.105657-10-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230117080622.105657-1-beilei.xing@intel.com> References: <20230117080622.105657-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Move queue module fields from idpf_adapter_ext structure to idpf_adapter structure. Refine some parameter and function name, and move function idpf_create_vport_info_init to common module. Signed-off-by: Beilei Xing --- drivers/common/idpf/idpf_common_device.c | 35 +++++++++++++++++ drivers/common/idpf/idpf_common_device.h | 11 ++++++ drivers/common/idpf/version.map | 1 + drivers/net/idpf/idpf_ethdev.c | 48 +++--------------------- drivers/net/idpf/idpf_ethdev.h | 8 ---- 5 files changed, 53 insertions(+), 50 deletions(-) diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c index 9647d4a62a..411873c902 100644 --- a/drivers/common/idpf/idpf_common_device.c +++ b/drivers/common/idpf/idpf_common_device.c @@ -613,4 +613,39 @@ idpf_config_irq_unmap(struct idpf_vport *vport, uint16_t nb_rx_queues) return 0; } +int +idpf_create_vport_info_init(struct idpf_vport *vport, + struct virtchnl2_create_vport *vport_info) +{ + struct idpf_adapter *adapter = vport->adapter; + + vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT); + if (adapter->txq_model == 0) { + vport_info->txq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM; + vport_info->num_tx_complq = + IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP; + } else { + vport_info->txq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM; + vport_info->num_tx_complq = 0; + } + if (adapter->rxq_model == 0) { + vport_info->rxq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM; + vport_info->num_rx_bufq = + IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP; + } else { + vport_info->rxq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM; + vport_info->num_rx_bufq = 0; + } + + return 0; +} + RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE); diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h index 997f01f3aa..0c73d40e53 100644 --- a/drivers/common/idpf/idpf_common_device.h +++ b/drivers/common/idpf/idpf_common_device.h @@ -16,6 +16,11 @@ #define IDPF_CTLQ_LEN 64 #define IDPF_DFLT_MBX_BUF_SIZE 4096 +#define IDPF_DEFAULT_RXQ_NUM 16 +#define IDPF_RX_BUFQ_PER_GRP 2 +#define IDPF_DEFAULT_TXQ_NUM 16 +#define IDPF_TX_COMPLQ_PER_GRP 1 + #define IDPF_MAX_PKT_TYPE 1024 #define IDPF_DFLT_INTERVAL 16 @@ -33,6 +38,9 @@ struct idpf_adapter { uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */ uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned; + + uint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */ + uint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */ }; struct idpf_chunks_info { @@ -168,5 +176,8 @@ __rte_internal int idpf_config_irq_map(struct idpf_vport *vport, uint16_t nb_rx_queues); __rte_internal int idpf_config_irq_unmap(struct idpf_vport *vport, uint16_t nb_rx_queues); +__rte_internal +int idpf_create_vport_info_init(struct idpf_vport *vport, + struct virtchnl2_create_vport *vport_info); #endif /* _IDPF_COMMON_DEVICE_H_ */ diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map index da3b0feefc..b153647ee1 100644 --- a/drivers/common/idpf/version.map +++ b/drivers/common/idpf/version.map @@ -6,6 +6,7 @@ INTERNAL { idpf_config_irq_map; idpf_config_irq_unmap; idpf_config_rss; + idpf_create_vport_info_init; idpf_ctlq_clean_sq; idpf_ctlq_deinit; idpf_ctlq_init; diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index 84046f955a..734e97ffc2 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -142,42 +142,6 @@ idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) return ptypes; } -static int -idpf_init_vport_req_info(struct rte_eth_dev *dev, - struct virtchnl2_create_vport *vport_info) -{ - struct idpf_vport *vport = dev->data->dev_private; - struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(vport->adapter); - - vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT); - if (adapter->txq_model == 0) { - vport_info->txq_model = - rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT); - vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM; - vport_info->num_tx_complq = - IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP; - } else { - vport_info->txq_model = - rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); - vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM; - vport_info->num_tx_complq = 0; - } - if (adapter->rxq_model == 0) { - vport_info->rxq_model = - rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT); - vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM; - vport_info->num_rx_bufq = - IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP; - } else { - vport_info->rxq_model = - rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); - vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM; - vport_info->num_rx_bufq = 0; - } - - return 0; -} - static int idpf_init_rss(struct idpf_vport *vport) { @@ -566,12 +530,12 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap goto bail; ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool, - &adapter->txq_model); + &adapter->base.txq_model); if (ret != 0) goto bail; ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool, - &adapter->rxq_model); + &adapter->base.rxq_model); if (ret != 0) goto bail; @@ -672,7 +636,7 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params) struct idpf_vport_param *param = init_params; struct idpf_adapter_ext *adapter = param->adapter; /* for sending create vport virtchnl msg prepare */ - struct virtchnl2_create_vport vport_req_info; + struct virtchnl2_create_vport create_vport_info; int ret = 0; dev->dev_ops = &idpf_eth_dev_ops; @@ -680,14 +644,14 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params) vport->sw_idx = param->idx; vport->devarg_id = param->devarg_id; - memset(&vport_req_info, 0, sizeof(vport_req_info)); - ret = idpf_init_vport_req_info(dev, &vport_req_info); + memset(&create_vport_info, 0, sizeof(create_vport_info)); + ret = idpf_create_vport_info_init(vport, &create_vport_info); if (ret != 0) { PMD_INIT_LOG(ERR, "Failed to init vport req_info."); goto err; } - ret = idpf_vport_init(vport, &vport_req_info, dev->data); + ret = idpf_vport_init(vport, &create_vport_info, dev->data); if (ret != 0) { PMD_INIT_LOG(ERR, "Failed to init vports."); goto err; diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h index d30807ca41..c2a7abb05c 100644 --- a/drivers/net/idpf/idpf_ethdev.h +++ b/drivers/net/idpf/idpf_ethdev.h @@ -22,14 +22,9 @@ #define IDPF_MAX_VPORT_NUM 8 -#define IDPF_DEFAULT_RXQ_NUM 16 -#define IDPF_DEFAULT_TXQ_NUM 16 - #define IDPF_INVALID_VPORT_IDX 0xffff #define IDPF_TXQ_PER_GRP 1 -#define IDPF_TX_COMPLQ_PER_GRP 1 #define IDPF_RXQ_PER_GRP 1 -#define IDPF_RX_BUFQ_PER_GRP 2 #define IDPF_DFLT_Q_VEC_NUM 1 @@ -78,9 +73,6 @@ struct idpf_adapter_ext { char name[IDPF_ADAPTER_NAME_LEN]; - uint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */ - uint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */ - struct idpf_vport **vports; uint16_t max_vport_nb;