From patchwork Thu Apr 13 09:44:59 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenjing Qiao X-Patchwork-Id: 126017 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C517142931; Thu, 13 Apr 2023 11:52:02 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9927B42D7C; Thu, 13 Apr 2023 11:50:44 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 1946942D73 for ; Thu, 13 Apr 2023 11:50:41 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681379442; x=1712915442; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=WouZ5MZLkn3YhMvzRAOlChwCZIa7TiiCq3lLPL1CLLI=; b=g0V5S2RmdxVTuKYFrwPA84QqvcWlOp/6auno5K0erb+I+/mqwc900dS7 /mtntDVVLxc5DGQMzmJ5zzDPlCh3WE4lNzhEII4uMht5Ay93CRigZEEKf A9PIuyM0zRq7iT2jI/GTGeaPQnmgiUs88+icrgONe3WhL0fwg3jlwWxWO +jf3TISxvGoBZ5fxTdan5i3LO5XkcGJXsnmPF60PHmp1VI3F/JAzHxiKn 2v8hiQy2lY3a66DquwDGj13vwuJjuLc6dxweOpG/WNjTN6hhP1NUr1dV4 KZXsxjRPNMIIYKNmT67SxsaAvW4miiGDghSm161WDcNvMWRv6WxPX0TjR A==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="409290564" X-IronPort-AV: E=Sophos;i="5.98,341,1673942400"; d="scan'208";a="409290564" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2023 02:50:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="778699375" X-IronPort-AV: E=Sophos;i="5.98,341,1673942400"; d="scan'208";a="778699375" Received: from dpdk-wenjing-01.sh.intel.com ([10.67.119.244]) by FMSMGA003.fm.intel.com with ESMTP; 13 Apr 2023 02:50:39 -0700 From: Wenjing Qiao To: jingjing.wu@intel.com, beilei.xing@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, Wenjing Qiao , Nizan Zorea Subject: [PATCH 15/18] common/idpf: add/delete queue groups commands Date: Thu, 13 Apr 2023 05:44:59 -0400 Message-Id: <20230413094502.1714755-16-wenjing.qiao@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230413094502.1714755-1-wenjing.qiao@intel.com> References: <20230413094502.1714755-1-wenjing.qiao@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add types for new two virtchnl commands: add & delete queue group Signed-off-by: Nizan Zorea Signed-off-by: Wenjing Qiao --- drivers/common/idpf/base/virtchnl2.h | 189 +++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) diff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h index 415e90358e..9e70e5b10e 100644 --- a/drivers/common/idpf/base/virtchnl2.h +++ b/drivers/common/idpf/base/virtchnl2.h @@ -95,6 +95,8 @@ #define VIRTCHNL2_OP_ADD_MAC_ADDR 535 #define VIRTCHNL2_OP_DEL_MAC_ADDR 536 #define VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE 537 +#define VIRTCHNL2_OP_ADD_QUEUE_GROUPS 538 +#define VIRTCHNL2_OP_DEL_QUEUE_GROUPS 539 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX 0xFFFF @@ -345,6 +347,14 @@ #define VIRTCHNL2_UNICAST_PROMISC BIT(0) #define VIRTCHNL2_MULTICAST_PROMISC BIT(1) +/* VIRTCHNL2_QUEUE_GROUP_TYPE + * Type of queue groups + * 0 till 0xFF is for general use + */ +#define VIRTCHNL2_QUEUE_GROUP_DATA 1 +#define VIRTCHNL2_QUEUE_GROUP_MBX 2 +#define VIRTCHNL2_QUEUE_GROUP_CONFIG 3 + /* VIRTCHNL2_PROTO_HDR_TYPE * Protocol header type within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each @@ -794,6 +804,133 @@ struct virtchnl2_add_queues { VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_add_queues); +/* Queue Groups Extension */ + +struct virtchnl2_rx_queue_group_info { + /* IN/OUT, user can ask to update rss_lut size originally allocated + * by CreateVport command. New size will be returned if allocation + * suceeded, otherwise original rss_size from CreateVport will + * be returned. + */ + __le16 rss_lut_size; + /* Future extension purpose */ + u8 pad[6]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rx_queue_group_info); + +struct virtchnl2_tx_queue_group_info { /* IN */ + /* TX TC queue group will be connected to */ + u8 tx_tc; + /* Each group can have its own priority, value 0-7, while each group + * with unique priority is strict priority. + * It can be single set of queue groups which configured with + * same priority, then they are assumed part of WFQ arbitration + * group and are expected to be assigned with weight. + */ + u8 priority; + /* Determines if queue group is expected to be Strict Priority + * according to its priority + */ + u8 is_sp; + u8 pad; + + /* Peak Info Rate Weight in case Queue Group is part of WFQ + * arbitration set. + * The weights of the groups are independent of each other. + * Possible values: 1-200 + */ + __le16 pir_weight; + /* Future extension purpose for CIR only */ + u8 cir_pad[2]; + /* Future extension purpose*/ + u8 pad2[8]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_tx_queue_group_info); + +struct virtchnl2_queue_group_id { + /* Queue group ID - depended on it's type + * Data: is an ID which is relative to Vport + * Config & Mailbox: is an ID which is relative to func. + * This ID is use in future calls, i.e. delete. + * Requested by host and assigned by Control plane. + */ + __le16 queue_group_id; + /* Functional type: see VIRTCHNL2_QUEUE_GROUP_TYPE definitions */ + __le16 queue_group_type; + u8 pad[4]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_group_id); + +struct virtchnl2_queue_group_info { + /* IN */ + struct virtchnl2_queue_group_id qg_id; + /* IN, Number of queue of different types in the group. */ + __le16 num_tx_q; + __le16 num_tx_complq; + __le16 num_rx_q; + __le16 num_rx_bufq; + + struct virtchnl2_tx_queue_group_info tx_q_grp_info; + struct virtchnl2_rx_queue_group_info rx_q_grp_info; + /* Future extension purpose */ + u8 pad[40]; + struct virtchnl2_queue_reg_chunks chunks; /* OUT */ +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(120, virtchnl2_queue_group_info); + +struct virtchnl2_queue_groups { + __le16 num_queue_groups; + u8 pad[6]; + struct virtchnl2_queue_group_info groups[1]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_queue_groups); + +/* VIRTCHNL2_OP_ADD_QUEUE_GROUPS + * PF sends this message to request additional transmit/receive queue groups + * beyond the ones that were assigned via CREATE_VPORT request. + * virtchnl2_add_queue_groups structure is used to specify the number of each + * type of queues. CP responds with the same structure with the actual number of + * groups and queues assigned followed by num_queue_groups and num_chunks of + * virtchnl2_queue_groups and virtchnl2_queue_chunk structures. + */ +struct virtchnl2_add_queue_groups { + /* IN, vport_id to add queue group to, same as allocated by CreateVport. + * NA for mailbox and other types not assigned to vport + */ + __le32 vport_id; + u8 pad[4]; + /* IN/OUT */ + struct virtchnl2_queue_groups qg_info; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_add_queue_groups); + +/* VIRTCHNL2_OP_DEL_QUEUE_GROUPS + * PF sends this message to delete queue groups. + * PF sends virtchnl2_delete_queue_groups struct to specify the queue groups + * to be deleted. CP performs requested action and returns status and update + * num_queue_groups with number of successfully deleted queue groups. + */ +struct virtchnl2_delete_queue_groups { + /* IN, vport_id to delete queue group from, same as + * allocated by CreateVport. + */ + __le32 vport_id; + /* IN/OUT, Defines number of groups provided below */ + __le16 num_queue_groups; + u8 pad[2]; + + /* IN, IDs & types of Queue Groups to delete */ + struct virtchnl2_queue_group_id qg_ids[1]; +}; + +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_delete_queue_groups); + /* Structure to specify a chunk of contiguous interrupt vectors */ struct virtchnl2_vector_chunk { __le16 start_vector_id; @@ -1243,6 +1380,10 @@ static inline const char *virtchnl2_op_str(__le32 v_opcode) return "VIRTCHNL2_OP_CREATE_ADI"; case VIRTCHNL2_OP_DESTROY_ADI: return "VIRTCHNL2_OP_DESTROY_ADI"; + case VIRTCHNL2_OP_ADD_QUEUE_GROUPS: + return "VIRTCHNL2_OP_ADD_QUEUE_GROUPS"; + case VIRTCHNL2_OP_DEL_QUEUE_GROUPS: + return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS"; default: return "Unsupported (update virtchnl2.h)"; } @@ -1373,6 +1514,54 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3 sizeof(struct virtchnl2_queue_chunk); } break; + case VIRTCHNL2_OP_ADD_QUEUE_GROUPS: + valid_len = sizeof(struct virtchnl2_add_queue_groups); + if (msglen != valid_len) { + __le32 i = 0, offset = 0; + struct virtchnl2_add_queue_groups *add_queue_grp = + (struct virtchnl2_add_queue_groups *)msg; + struct virtchnl2_queue_groups *groups = &(add_queue_grp->qg_info); + struct virtchnl2_queue_group_info *grp_info; + __le32 chunk_size = sizeof(struct virtchnl2_queue_reg_chunk); + __le32 group_size = sizeof(struct virtchnl2_queue_group_info); + __le32 total_chunks_size; + + if (groups->num_queue_groups == 0) { + err_msg_format = true; + break; + } + valid_len += (groups->num_queue_groups - 1) * + sizeof(struct virtchnl2_queue_group_info); + offset = (u8 *)(&groups->groups[0]) - (u8 *)groups; + + for (i = 0; i < groups->num_queue_groups; i++) { + grp_info = (struct virtchnl2_queue_group_info *) + ((u8 *)groups + offset); + if (grp_info->chunks.num_chunks == 0) { + offset += group_size; + continue; + } + total_chunks_size = (grp_info->chunks.num_chunks - 1) * chunk_size; + offset += group_size + total_chunks_size; + valid_len += total_chunks_size; + } + } + break; + case VIRTCHNL2_OP_DEL_QUEUE_GROUPS: + valid_len = sizeof(struct virtchnl2_delete_queue_groups); + if (msglen != valid_len) { + struct virtchnl2_delete_queue_groups *del_queue_grp = + (struct virtchnl2_delete_queue_groups *)msg; + + if (del_queue_grp->num_queue_groups == 0) { + err_msg_format = true; + break; + } + + valid_len += (del_queue_grp->num_queue_groups - 1) * + sizeof(struct virtchnl2_queue_group_id); + } + break; case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: valid_len = sizeof(struct virtchnl2_queue_vector_maps);