From patchwork Sat Apr 24 06:03:36 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 92098 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 757A6A0547; Sat, 24 Apr 2021 08:00:17 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id ED5074111C; Sat, 24 Apr 2021 08:00:03 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 3B7F341121 for ; Sat, 24 Apr 2021 08:00:02 +0200 (CEST) IronPort-SDR: rWzINVtCEXYkZPwAOIuJYVUd4eEdgBX2vgrJL3B/oVz04EQVBlkxxmBHiu9l6NiILGeNBYdA1F IwK/xkUeG+LQ== X-IronPort-AV: E=McAfee;i="6200,9189,9963"; a="257470326" X-IronPort-AV: E=Sophos;i="5.82,247,1613462400"; d="scan'208";a="257470326" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Apr 2021 23:00:01 -0700 IronPort-SDR: oN62jVKOvIDNcdveKG0sYJrHB/ddmOveydKv+X2QpYAX/dxb9PMssG4yFl5ieQRpv8RRVSjMn6 2vi7SthMXilQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.82,247,1613462400"; d="scan'208";a="421999442" Received: from dpdk51.sh.intel.com ([10.67.111.142]) by fmsmga008.fm.intel.com with ESMTP; 23 Apr 2021 23:00:00 -0700 From: Qi Zhang To: beilei.xing@intel.com Cc: haiyue.wang@intel.com, dev@dpdk.org, Qi Zhang , Sridhar Samudrala , Joshua Hay Date: Sat, 24 Apr 2021 14:03:36 +0800 Message-Id: <20210424060337.2824837-4-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210424060337.2824837-1-qi.z.zhang@intel.com> References: <20210424060337.2824837-1-qi.z.zhang@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 3/4] common/iavf: refine comment in virtchnl X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" General clean up for comment in virtchnl. Signed-off-by: Sridhar Samudrala Signed-off-by: Joshua Hay Signed-off-by: Qi Zhang --- drivers/common/iavf/virtchnl.h | 69 ++++++++++++++++++++++++---------- 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h index d794f11c01..0772c28527 100644 --- a/drivers/common/iavf/virtchnl.h +++ b/drivers/common/iavf/virtchnl.h @@ -6,8 +6,9 @@ #define _VIRTCHNL_H_ /* Description: - * This header file describes the VF-PF communication protocol used - * by the drivers for all devices starting from our 40G product line + * This header file describes the Virtual Function (VF) - Physical Function + * (PF) communication protocol used by the drivers for all devices starting + * from our 40G product line * * Admin queue buffer usage: * desc->opcode is always aqc_opc_send_msg_to_pf @@ -21,8 +22,8 @@ * have a maximum of sixteen queues for all of its VSIs. * * The PF is required to return a status code in v_retval for all messages - * except RESET_VF, which does not require any response. The return value - * is of status_code type, defined in the shared type.h. + * except RESET_VF, which does not require any response. The returned value + * is of virtchnl_status_code type, defined in the shared type.h. * * In general, VF driver initialization should roughly follow the order of * these opcodes. The VF driver must first validate the API version of the @@ -287,8 +288,12 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) struct virtchnl_msg { u8 pad[8]; /* AQ flags/opcode/len/retval fields */ - enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ - enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + + /* avoid confusion with desc->opcode */ + enum virtchnl_ops v_opcode; + + /* ditto for desc->retval */ + enum virtchnl_status_code v_retval; u32 vfid; /* used by PF when sending to VF */ }; @@ -354,7 +359,9 @@ enum virtchnl_vsi_type { struct virtchnl_vsi_resource { u16 vsi_id; u16 num_queue_pairs; - enum virtchnl_vsi_type vsi_type; + + /* see enum virtchnl_vsi_type */ + s32 vsi_type; u16 qset_handle; u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; }; @@ -510,7 +517,9 @@ struct virtchnl_rxq_info { u8 rxdid; u8 pad1[2]; u64 dma_ring_addr; - enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + + /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ + s32 rx_split_pos; u32 pad2; }; @@ -1265,8 +1274,12 @@ enum virtchnl_flow_type { struct virtchnl_filter { union virtchnl_flow_spec data; union virtchnl_flow_spec mask; - enum virtchnl_flow_type flow_type; - enum virtchnl_action action; + + /* see enum virtchnl_flow_type */ + s32 flow_type; + + /* see enum virtchnl_action */ + s32 action; u32 action_meta; u8 field_flags; }; @@ -1371,7 +1384,8 @@ enum virtchnl_event_codes { #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 struct virtchnl_pf_event { - enum virtchnl_event_codes event; + /* see enum virtchnl_event_codes */ + s32 event; union { /* If the PF driver does not support the new speed reporting * capabilities then use link_event else use link_event_adv to @@ -1579,7 +1593,8 @@ enum virtchnl_proto_hdr_field { }; struct virtchnl_proto_hdr { - enum virtchnl_proto_hdr_type type; + /* see enum virtchnl_proto_hdr_type */ + s32 type; u32 field_selector; /* a bit mask to select field for header type */ u8 buffer[64]; /** @@ -1608,7 +1623,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); struct virtchnl_rss_cfg { struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ - enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */ + + /* see enum virtchnl_rss_algorithm; rss algorithm type */ + s32 rss_algorithm; u8 reserved[128]; /* reserve for future */ }; @@ -1616,7 +1633,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); /* action configuration for FDIR */ struct virtchnl_filter_action { - enum virtchnl_action type; + /* see enum virtchnl_action type */ + s32 type; union { /* used for queue and qgroup action */ struct { @@ -1723,7 +1741,9 @@ struct virtchnl_fdir_add { u16 validate_only; /* INPUT */ u32 flow_id; /* OUTPUT */ struct virtchnl_fdir_rule rule_cfg; /* INPUT */ - enum virtchnl_fdir_prgm_status status; /* OUTPUT */ + + /* see enum virtchnl_fdir_prgm_status; OUTPUT */ + s32 status; }; VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); @@ -1736,7 +1756,9 @@ struct virtchnl_fdir_del { u16 vsi_id; /* INPUT */ u16 pad; u32 flow_id; /* INPUT */ - enum virtchnl_fdir_prgm_status status; /* OUTPUT */ + + /* see enum virtchnl_fdir_prgm_status; OUTPUT */ + s32 status; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); @@ -1752,7 +1774,9 @@ struct virtchnl_fdir_query { u32 flow_id; /* INPUT */ u32 reset_counter:1; /* INPUT */ struct virtchnl_fdir_query_info query_info; /* OUTPUT */ - enum virtchnl_fdir_prgm_status status; /* OUTPUT */ + + /* see enum virtchnl_fdir_prgm_status; OUTPUT */ + s32 status; u32 pad2; }; @@ -1775,7 +1799,8 @@ enum virtchnl_queue_type { /* structure to specify a chunk of contiguous queues */ struct virtchnl_queue_chunk { - enum virtchnl_queue_type type; + /* see enum virtchnl_queue_type */ + s32 type; u16 start_queue_id; u16 num_queues; }; @@ -1828,8 +1853,12 @@ struct virtchnl_queue_vector { u16 queue_id; u16 vector_id; u8 pad[4]; - enum virtchnl_itr_idx itr_idx; - enum virtchnl_queue_type queue_type; + + /* see enum virtchnl_itr_idx */ + s32 itr_idx; + + /* see enum virtchnl_queue_type */ + s32 queue_type; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);