@@ -8,317 +8,396 @@
/* All opcodes associated with virtchnl 2 are prefixed with virtchnl2 or
* VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
* and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
+ *
+ * PF/VF uses the virtchnl interface defined in this header file to communicate
+ * with device Control Plane (CP). Driver and the CP may run on different
+ * platforms with different endianness. To avoid byte order discrepancies,
+ * all the structures in this header follow little-endian format.
+ *
+ * This is an interface definition file where existing enums and their values
+ * must remain unchanged over time, so we specify explicit values for all enums.
*/
#include "virtchnl2_lan_desc.h"
-/* VIRTCHNL2_ERROR_CODES */
-/* success */
-#define VIRTCHNL2_STATUS_SUCCESS 0
-/* Operation not permitted, used in case of command not permitted for sender */
-#define VIRTCHNL2_STATUS_ERR_EPERM 1
-/* Bad opcode - virtchnl interface problem */
-#define VIRTCHNL2_STATUS_ERR_ESRCH 3
-/* I/O error - HW access error */
-#define VIRTCHNL2_STATUS_ERR_EIO 5
-/* No such resource - Referenced resource is not allacated */
-#define VIRTCHNL2_STATUS_ERR_ENXIO 6
-/* Permission denied - Resource is not permitted to caller */
-#define VIRTCHNL2_STATUS_ERR_EACCES 13
-/* Device or resource busy - In case shared resource is in use by others */
-#define VIRTCHNL2_STATUS_ERR_EBUSY 16
-/* Object already exists and not free */
-#define VIRTCHNL2_STATUS_ERR_EEXIST 17
-/* Invalid input argument in command */
-#define VIRTCHNL2_STATUS_ERR_EINVAL 22
-/* No space left or allocation failure */
-#define VIRTCHNL2_STATUS_ERR_ENOSPC 28
-/* Parameter out of range */
-#define VIRTCHNL2_STATUS_ERR_ERANGE 34
-
-/* Op not allowed in current dev mode */
-#define VIRTCHNL2_STATUS_ERR_EMODE 200
-/* State Machine error - Command sequence problem */
-#define VIRTCHNL2_STATUS_ERR_ESM 201
-
-/* This macro is used to generate compilation errors if a structure
+/**
+ * enum virtchnl2_status - Error codes.
+ * @VIRTCHNL2_STATUS_SUCCESS: Success
+ * @VIRTCHNL2_STATUS_ERR_EPERM: Operation not permitted, used in case of command
+ * not permitted for sender
+ * @VIRTCHNL2_STATUS_ERR_ESRCH: Bad opcode - virtchnl interface problem
+ * @VIRTCHNL2_STATUS_ERR_EIO: I/O error - HW access error
+ * @VIRTCHNL2_STATUS_ERR_ENXIO: No such resource - Referenced resource is not
+ * allocated
+ * @VIRTCHNL2_STATUS_ERR_EACCES: Permission denied - Resource is not permitted
+ * to caller
+ * @VIRTCHNL2_STATUS_ERR_EBUSY: Device or resource busy - In case shared
+ * resource is in use by others
+ * @VIRTCHNL2_STATUS_ERR_EEXIST: Object already exists and not free
+ * @VIRTCHNL2_STATUS_ERR_EINVAL: Invalid input argument in command
+ * @VIRTCHNL2_STATUS_ERR_ENOSPC: No space left or allocation failure
+ * @VIRTCHNL2_STATUS_ERR_ERANGE: Parameter out of range
+ * @VIRTCHNL2_STATUS_ERR_EMODE: Operation not allowed in current dev mode
+ * @VIRTCHNL2_STATUS_ERR_ESM: State Machine error - Command sequence problem
+ */
+enum virtchnl2_status {
+ VIRTCHNL2_STATUS_SUCCESS = 0,
+ VIRTCHNL2_STATUS_ERR_EPERM = 1,
+ VIRTCHNL2_STATUS_ERR_ESRCH = 3,
+ VIRTCHNL2_STATUS_ERR_EIO = 5,
+ VIRTCHNL2_STATUS_ERR_ENXIO = 6,
+ VIRTCHNL2_STATUS_ERR_EACCES = 13,
+ VIRTCHNL2_STATUS_ERR_EBUSY = 16,
+ VIRTCHNL2_STATUS_ERR_EEXIST = 17,
+ VIRTCHNL2_STATUS_ERR_EINVAL = 22,
+ VIRTCHNL2_STATUS_ERR_ENOSPC = 28,
+ VIRTCHNL2_STATUS_ERR_ERANGE = 34,
+ VIRTCHNL2_STATUS_ERR_EMODE = 200,
+ VIRTCHNL2_STATUS_ERR_ESM = 201,
+};
+
+/**
+ * This macro is used to generate compilation errors if a structure
* is not exactly the correct length.
*/
#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) \
static_assert((n) == sizeof(struct X), \
"Structure length does not match with the expected value")
-/* New major set of opcodes introduced and so leaving room for
+/**
+ * New major set of opcodes introduced and so leaving room for
* old misc opcodes to be added in future. Also these opcodes may only
* be used if both the PF and VF have successfully negotiated the
- * VIRTCHNL version as 2.0 during VIRTCHNL22_OP_VERSION exchange.
- */
-#define VIRTCHNL2_OP_UNKNOWN 0
-#define VIRTCHNL2_OP_VERSION 1
-#define VIRTCHNL2_OP_GET_CAPS 500
-#define VIRTCHNL2_OP_CREATE_VPORT 501
-#define VIRTCHNL2_OP_DESTROY_VPORT 502
-#define VIRTCHNL2_OP_ENABLE_VPORT 503
-#define VIRTCHNL2_OP_DISABLE_VPORT 504
-#define VIRTCHNL2_OP_CONFIG_TX_QUEUES 505
-#define VIRTCHNL2_OP_CONFIG_RX_QUEUES 506
-#define VIRTCHNL2_OP_ENABLE_QUEUES 507
-#define VIRTCHNL2_OP_DISABLE_QUEUES 508
-#define VIRTCHNL2_OP_ADD_QUEUES 509
-#define VIRTCHNL2_OP_DEL_QUEUES 510
-#define VIRTCHNL2_OP_MAP_QUEUE_VECTOR 511
-#define VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR 512
-#define VIRTCHNL2_OP_GET_RSS_KEY 513
-#define VIRTCHNL2_OP_SET_RSS_KEY 514
-#define VIRTCHNL2_OP_GET_RSS_LUT 515
-#define VIRTCHNL2_OP_SET_RSS_LUT 516
-#define VIRTCHNL2_OP_GET_RSS_HASH 517
-#define VIRTCHNL2_OP_SET_RSS_HASH 518
-#define VIRTCHNL2_OP_SET_SRIOV_VFS 519
-#define VIRTCHNL2_OP_ALLOC_VECTORS 520
-#define VIRTCHNL2_OP_DEALLOC_VECTORS 521
-#define VIRTCHNL2_OP_EVENT 522
-#define VIRTCHNL2_OP_GET_STATS 523
-#define VIRTCHNL2_OP_RESET_VF 524
- /* opcode 525 is reserved */
-#define VIRTCHNL2_OP_GET_PTYPE_INFO 526
- /* opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
- * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW
+ * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
+ */
+enum virtchnl2_op {
+ VIRTCHNL2_OP_UNKNOWN = 0,
+ VIRTCHNL2_OP_VERSION = 1,
+ VIRTCHNL2_OP_GET_CAPS = 500,
+ VIRTCHNL2_OP_CREATE_VPORT = 501,
+ VIRTCHNL2_OP_DESTROY_VPORT = 502,
+ VIRTCHNL2_OP_ENABLE_VPORT = 503,
+ VIRTCHNL2_OP_DISABLE_VPORT = 504,
+ VIRTCHNL2_OP_CONFIG_TX_QUEUES = 505,
+ VIRTCHNL2_OP_CONFIG_RX_QUEUES = 506,
+ VIRTCHNL2_OP_ENABLE_QUEUES = 507,
+ VIRTCHNL2_OP_DISABLE_QUEUES = 508,
+ VIRTCHNL2_OP_ADD_QUEUES = 509,
+ VIRTCHNL2_OP_DEL_QUEUES = 510,
+ VIRTCHNL2_OP_MAP_QUEUE_VECTOR = 511,
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR = 512,
+ VIRTCHNL2_OP_GET_RSS_KEY = 513,
+ VIRTCHNL2_OP_SET_RSS_KEY = 514,
+ VIRTCHNL2_OP_GET_RSS_LUT = 515,
+ VIRTCHNL2_OP_SET_RSS_LUT = 516,
+ VIRTCHNL2_OP_GET_RSS_HASH = 517,
+ VIRTCHNL2_OP_SET_RSS_HASH = 518,
+ VIRTCHNL2_OP_SET_SRIOV_VFS = 519,
+ VIRTCHNL2_OP_ALLOC_VECTORS = 520,
+ VIRTCHNL2_OP_DEALLOC_VECTORS = 521,
+ VIRTCHNL2_OP_EVENT = 522,
+ VIRTCHNL2_OP_GET_STATS = 523,
+ VIRTCHNL2_OP_RESET_VF = 524,
+ /* Opcode 525 is reserved */
+ VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
+ /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
+ * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
*/
- /* opcodes 529, 530, and 531 are reserved */
-#define VIRTCHNL2_OP_NON_FLEX_CREATE_ADI 532
-#define VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI 533
-#define VIRTCHNL2_OP_LOOPBACK 534
-#define VIRTCHNL2_OP_ADD_MAC_ADDR 535
-#define VIRTCHNL2_OP_DEL_MAC_ADDR 536
-#define VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE 537
-#define VIRTCHNL2_OP_ADD_QUEUE_GROUPS 538
-#define VIRTCHNL2_OP_DEL_QUEUE_GROUPS 539
-#define VIRTCHNL2_OP_GET_PORT_STATS 540
-/* TimeSync opcodes */
-#define VIRTCHNL2_OP_GET_PTP_CAPS 541
-#define VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES 542
+/* Opcodes 529, 530, and 531 are reserved */
+ VIRTCHNL2_OP_NON_FLEX_CREATE_ADI = 532,
+ VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI = 533,
+ VIRTCHNL2_OP_LOOPBACK = 534,
+ VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
+ VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
+ VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+ VIRTCHNL2_OP_ADD_QUEUE_GROUPS = 538,
+ VIRTCHNL2_OP_DEL_QUEUE_GROUPS = 539,
+ VIRTCHNL2_OP_GET_PORT_STATS = 540,
+ /* TimeSync opcodes */
+ VIRTCHNL2_OP_GET_PTP_CAPS = 541,
+ VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES = 542,
+};
#define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX 0xFFFF
-/* VIRTCHNL2_VPORT_TYPE
- * Type of virtual port
+/**
+ * enum virtchnl2_vport_type - Type of virtual port
+ * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type
+ * @VIRTCHNL2_VPORT_TYPE_SRIOV: SRIOV virtual port type
+ * @VIRTCHNL2_VPORT_TYPE_SIOV: SIOV virtual port type
+ * @VIRTCHNL2_VPORT_TYPE_SUBDEV: Subdevice virtual port type
+ * @VIRTCHNL2_VPORT_TYPE_MNG: Management virtual port type
*/
-#define VIRTCHNL2_VPORT_TYPE_DEFAULT 0
-#define VIRTCHNL2_VPORT_TYPE_SRIOV 1
-#define VIRTCHNL2_VPORT_TYPE_SIOV 2
-#define VIRTCHNL2_VPORT_TYPE_SUBDEV 3
-#define VIRTCHNL2_VPORT_TYPE_MNG 4
+enum virtchnl2_vport_type {
+ VIRTCHNL2_VPORT_TYPE_DEFAULT = 0,
+ VIRTCHNL2_VPORT_TYPE_SRIOV = 1,
+ VIRTCHNL2_VPORT_TYPE_SIOV = 2,
+ VIRTCHNL2_VPORT_TYPE_SUBDEV = 3,
+ VIRTCHNL2_VPORT_TYPE_MNG = 4,
+};
-/* VIRTCHNL2_QUEUE_MODEL
- * Type of queue model
+/**
+ * enum virtchnl2_queue_model - Type of queue model
+ * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model
+ * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model
*
* In the single queue model, the same transmit descriptor queue is used by
* software to post descriptors to hardware and by hardware to post completed
* descriptors to software.
* Likewise, the same receive descriptor queue is used by hardware to post
* completions to software and by software to post buffers to hardware.
- */
-#define VIRTCHNL2_QUEUE_MODEL_SINGLE 0
-/* In the split queue model, hardware uses transmit completion queues to post
+ *
+ * In the split queue model, hardware uses transmit completion queues to post
* descriptor/buffer completions to software, while software uses transmit
* descriptor queues to post descriptors to hardware.
* Likewise, hardware posts descriptor completions to the receive descriptor
* queue, while software uses receive buffer queues to post buffers to hardware.
*/
-#define VIRTCHNL2_QUEUE_MODEL_SPLIT 1
-
-/* VIRTCHNL2_CHECKSUM_OFFLOAD_CAPS
- * Checksum offload capability flags
- */
-#define VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 BIT(0)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP BIT(1)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP BIT(2)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP BIT(3)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP BIT(4)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP BIT(5)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP BIT(6)
-#define VIRTCHNL2_CAP_TX_CSUM_GENERIC BIT(7)
-#define VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 BIT(8)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP BIT(9)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP BIT(10)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP BIT(11)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP BIT(12)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP BIT(13)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP BIT(14)
-#define VIRTCHNL2_CAP_RX_CSUM_GENERIC BIT(15)
-#define VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL BIT(16)
-#define VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL BIT(17)
-#define VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL BIT(18)
-#define VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL BIT(19)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL BIT(20)
-#define VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL BIT(21)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL BIT(22)
-#define VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL BIT(23)
-
-/* VIRTCHNL2_SEGMENTATION_OFFLOAD_CAPS
- * Segmentation offload capability flags
- */
-#define VIRTCHNL2_CAP_SEG_IPV4_TCP BIT(0)
-#define VIRTCHNL2_CAP_SEG_IPV4_UDP BIT(1)
-#define VIRTCHNL2_CAP_SEG_IPV4_SCTP BIT(2)
-#define VIRTCHNL2_CAP_SEG_IPV6_TCP BIT(3)
-#define VIRTCHNL2_CAP_SEG_IPV6_UDP BIT(4)
-#define VIRTCHNL2_CAP_SEG_IPV6_SCTP BIT(5)
-#define VIRTCHNL2_CAP_SEG_GENERIC BIT(6)
-#define VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL BIT(7)
-#define VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL BIT(8)
-
-/* VIRTCHNL2_RSS_FLOW_TYPE_CAPS
- * Receive Side Scaling Flow type capability flags
- */
-#define VIRTCHNL2_CAP_RSS_IPV4_TCP BIT_ULL(0)
-#define VIRTCHNL2_CAP_RSS_IPV4_UDP BIT_ULL(1)
-#define VIRTCHNL2_CAP_RSS_IPV4_SCTP BIT_ULL(2)
-#define VIRTCHNL2_CAP_RSS_IPV4_OTHER BIT_ULL(3)
-#define VIRTCHNL2_CAP_RSS_IPV6_TCP BIT_ULL(4)
-#define VIRTCHNL2_CAP_RSS_IPV6_UDP BIT_ULL(5)
-#define VIRTCHNL2_CAP_RSS_IPV6_SCTP BIT_ULL(6)
-#define VIRTCHNL2_CAP_RSS_IPV6_OTHER BIT_ULL(7)
-#define VIRTCHNL2_CAP_RSS_IPV4_AH BIT_ULL(8)
-#define VIRTCHNL2_CAP_RSS_IPV4_ESP BIT_ULL(9)
-#define VIRTCHNL2_CAP_RSS_IPV4_AH_ESP BIT_ULL(10)
-#define VIRTCHNL2_CAP_RSS_IPV6_AH BIT_ULL(11)
-#define VIRTCHNL2_CAP_RSS_IPV6_ESP BIT_ULL(12)
-#define VIRTCHNL2_CAP_RSS_IPV6_AH_ESP BIT_ULL(13)
-
-/* VIRTCHNL2_HEADER_SPLIT_CAPS
- * Header split capability flags
- */
-/* for prepended metadata */
-#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 BIT(0)
-/* all VLANs go into header buffer */
-#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 BIT(1)
-#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 BIT(2)
-#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6 BIT(3)
-
-/* VIRTCHNL2_RSC_OFFLOAD_CAPS
- * Receive Side Coalescing offload capability flags
- */
-#define VIRTCHNL2_CAP_RSC_IPV4_TCP BIT(0)
-#define VIRTCHNL2_CAP_RSC_IPV4_SCTP BIT(1)
-#define VIRTCHNL2_CAP_RSC_IPV6_TCP BIT(2)
-#define VIRTCHNL2_CAP_RSC_IPV6_SCTP BIT(3)
-
-/* VIRTCHNL2_OTHER_CAPS
- * Other capability flags
- * SPLITQ_QSCHED: Queue based scheduling using split queue model
- * TX_VLAN: VLAN tag insertion
- * RX_VLAN: VLAN tag stripping
- */
-#define VIRTCHNL2_CAP_RDMA BIT_ULL(0)
-#define VIRTCHNL2_CAP_SRIOV BIT_ULL(1)
-#define VIRTCHNL2_CAP_MACFILTER BIT_ULL(2)
-#define VIRTCHNL2_CAP_FLOW_DIRECTOR BIT_ULL(3)
-#define VIRTCHNL2_CAP_SPLITQ_QSCHED BIT_ULL(4)
-#define VIRTCHNL2_CAP_CRC BIT_ULL(5)
-#define VIRTCHNL2_CAP_INLINE_FLOW_STEER BIT_ULL(6)
-#define VIRTCHNL2_CAP_WB_ON_ITR BIT_ULL(7)
-#define VIRTCHNL2_CAP_PROMISC BIT_ULL(8)
-#define VIRTCHNL2_CAP_LINK_SPEED BIT_ULL(9)
-#define VIRTCHNL2_CAP_INLINE_IPSEC BIT_ULL(10)
-#define VIRTCHNL2_CAP_LARGE_NUM_QUEUES BIT_ULL(11)
-/* require additional info */
-#define VIRTCHNL2_CAP_VLAN BIT_ULL(12)
-#define VIRTCHNL2_CAP_PTP BIT_ULL(13)
-#define VIRTCHNL2_CAP_ADV_RSS BIT_ULL(15)
-#define VIRTCHNL2_CAP_FDIR BIT_ULL(16)
-#define VIRTCHNL2_CAP_RX_FLEX_DESC BIT_ULL(17)
-#define VIRTCHNL2_CAP_PTYPE BIT_ULL(18)
-#define VIRTCHNL2_CAP_LOOPBACK BIT_ULL(19)
-/* Enable miss completion types plus ability to detect a miss completion if a
- * reserved bit is set in a standared completion's tag.
- */
-#define VIRTCHNL2_CAP_MISS_COMPL_TAG BIT_ULL(20)
-/* this must be the last capability */
-#define VIRTCHNL2_CAP_OEM BIT_ULL(63)
-
-/* VIRTCHNL2_TXQ_SCHED_MODE
- * Transmit Queue Scheduling Modes - Queue mode is the legacy mode i.e. inorder
- * completions where descriptors and buffers are completed at the same time.
- * Flow scheduling mode allows for out of order packet processing where
- * descriptors are cleaned in order, but buffers can be completed out of order.
- */
-#define VIRTCHNL2_TXQ_SCHED_MODE_QUEUE 0
-#define VIRTCHNL2_TXQ_SCHED_MODE_FLOW 1
-
-/* VIRTCHNL2_TXQ_FLAGS
- * Transmit Queue feature flags
- *
- * Enable rule miss completion type; packet completion for a packet
- * sent on exception path; only relevant in flow scheduling mode
- */
-#define VIRTCHNL2_TXQ_ENABLE_MISS_COMPL BIT(0)
-
-/* VIRTCHNL2_PEER_TYPE
- * Transmit mailbox peer type
- */
-#define VIRTCHNL2_RDMA_CPF 0
-#define VIRTCHNL2_NVME_CPF 1
-#define VIRTCHNL2_ATE_CPF 2
-#define VIRTCHNL2_LCE_CPF 3
-
-/* VIRTCHNL2_RXQ_FLAGS
- * Receive Queue Feature flags
- */
-#define VIRTCHNL2_RXQ_RSC BIT(0)
-#define VIRTCHNL2_RXQ_HDR_SPLIT BIT(1)
-/* When set, packet descriptors are flushed by hardware immediately after
- * processing each packet.
- */
-#define VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK BIT(2)
-#define VIRTCHNL2_RX_DESC_SIZE_16BYTE BIT(3)
-#define VIRTCHNL2_RX_DESC_SIZE_32BYTE BIT(4)
-
-/* VIRTCHNL2_RSS_ALGORITHM
- * Type of RSS algorithm
- */
-#define VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC 0
-#define VIRTCHNL2_RSS_ALG_R_ASYMMETRIC 1
-#define VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC 2
-#define VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC 3
-
-/* VIRTCHNL2_EVENT_CODES
- * Type of event
- */
-#define VIRTCHNL2_EVENT_UNKNOWN 0
-#define VIRTCHNL2_EVENT_LINK_CHANGE 1
-/* These messages are only sent to PF from CP */
-#define VIRTCHNL2_EVENT_START_RESET_ADI 2
-#define VIRTCHNL2_EVENT_FINISH_RESET_ADI 3
-#define VIRTCHNL2_EVENT_ADI_ACTIVE 4
-
-/* VIRTCHNL2_QUEUE_TYPE
- * Transmit and Receive queue types are valid in legacy as well as split queue
- * models. With Split Queue model, 2 additional types are introduced -
- * TX_COMPLETION and RX_BUFFER. In split queue model, receive corresponds to
+enum virtchnl2_queue_model {
+ VIRTCHNL2_QUEUE_MODEL_SINGLE = 0,
+ VIRTCHNL2_QUEUE_MODEL_SPLIT = 1,
+};
+
+/* Checksum offload capability flags */
+enum virtchnl2_cap_txrx_csum {
+ VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 = BIT(0),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP = BIT(1),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP = BIT(2),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP = BIT(3),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP = BIT(4),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP = BIT(5),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_CAP_TX_CSUM_GENERIC = BIT(7),
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 = BIT(8),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP = BIT(9),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP = BIT(10),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP = BIT(11),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP = BIT(12),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP = BIT(13),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP = BIT(14),
+ VIRTCHNL2_CAP_RX_CSUM_GENERIC = BIT(15),
+ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL = BIT(16),
+ VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL = BIT(17),
+ VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL = BIT(18),
+ VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL = BIT(19),
+ VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL = BIT(20),
+ VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL = BIT(21),
+ VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL = BIT(22),
+ VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL = BIT(23),
+};
+
+/* Segmentation offload capability flags */
+enum virtchnl2_cap_seg {
+ VIRTCHNL2_CAP_SEG_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_SEG_IPV4_UDP = BIT(1),
+ VIRTCHNL2_CAP_SEG_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_CAP_SEG_IPV6_TCP = BIT(3),
+ VIRTCHNL2_CAP_SEG_IPV6_UDP = BIT(4),
+ VIRTCHNL2_CAP_SEG_IPV6_SCTP = BIT(5),
+ VIRTCHNL2_CAP_SEG_GENERIC = BIT(6),
+ VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL = BIT(7),
+ VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
+};
+
+/* Receive Side Scaling Flow type capability flags */
+enum virtchnl2_cap_rss {
+ VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
+ VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
+ VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
+ VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
+ VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
+ VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
+ VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
+ VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
+ VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
+};
+
+/* Header split capability flags */
+enum virtchnl2_cap_rx_hsplit_at {
+ /* For prepended metadata */
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 = BIT(0),
+ /* All VLANs go into header buffer */
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 = BIT(1),
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 = BIT(2),
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6 = BIT(3),
+};
+
+/* Receive Side Coalescing offload capability flags */
+enum virtchnl2_cap_rsc {
+ VIRTCHNL2_CAP_RSC_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_RSC_IPV4_SCTP = BIT(1),
+ VIRTCHNL2_CAP_RSC_IPV6_TCP = BIT(2),
+ VIRTCHNL2_CAP_RSC_IPV6_SCTP = BIT(3),
+};
+
+/* Other capability flags */
+enum virtchnl2_cap_other {
+ VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
+ VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
+ VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
+ VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
+ VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
+ VIRTCHNL2_CAP_CRC = BIT_ULL(5),
+ VIRTCHNL2_CAP_INLINE_FLOW_STEER = BIT_ULL(6),
+ VIRTCHNL2_CAP_WB_ON_ITR = BIT_ULL(7),
+ VIRTCHNL2_CAP_PROMISC = BIT_ULL(8),
+ VIRTCHNL2_CAP_LINK_SPEED = BIT_ULL(9),
+ VIRTCHNL2_CAP_INLINE_IPSEC = BIT_ULL(10),
+ VIRTCHNL2_CAP_LARGE_NUM_QUEUES = BIT_ULL(11),
+ /* Require additional info */
+ VIRTCHNL2_CAP_VLAN = BIT_ULL(12),
+ VIRTCHNL2_CAP_PTP = BIT_ULL(13),
+ VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
+ VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
+ VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
+ VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
+ VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
+ /* Enable miss completion types plus ability to detect a miss completion
+ * if a reserved bit is set in a standard completion's tag.
+ */
+ VIRTCHNL2_CAP_MISS_COMPL_TAG = BIT_ULL(20),
+ /* This must be the last capability */
+ VIRTCHNL2_CAP_OEM = BIT_ULL(63),
+};
+
+/**
+ * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes
+ * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
+ * completions where descriptors and buffers
+ * are completed at the same time.
+ * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
+ * packet processing where descriptors are
+ * cleaned in order, but buffers can be
+ * completed out of order.
+ */
+enum virtchnl2_txq_sched_mode {
+ VIRTCHNL2_TXQ_SCHED_MODE_QUEUE = 0,
+ VIRTCHNL2_TXQ_SCHED_MODE_FLOW = 1,
+};
+
+/**
+ * enum virtchnl2_txq_flags - Transmit Queue feature flags
+ * @VIRTCHNL2_TXQ_ENABLE_MISS_COMPL: Enable rule miss completion type. Packet
+ * completion for a packet sent on exception
+ * path and only relevant in flow scheduling
+ * mode.
+ */
+enum virtchnl2_txq_flags {
+ VIRTCHNL2_TXQ_ENABLE_MISS_COMPL = BIT(0),
+};
+
+/**
+ * enum virtchnl2_peer_type - Transmit mailbox peer type
+ * @VIRTCHNL2_RDMA_CPF: RDMA peer type
+ * @VIRTCHNL2_NVME_CPF: NVME peer type
+ * @VIRTCHNL2_ATE_CPF: ATE peer type
+ * @VIRTCHNL2_LCE_CPF: LCE peer type
+ */
+enum virtchnl2_peer_type {
+ VIRTCHNL2_RDMA_CPF = 0,
+ VIRTCHNL2_NVME_CPF = 1,
+ VIRTCHNL2_ATE_CPF = 2,
+ VIRTCHNL2_LCE_CPF = 3,
+};
+
+/**
+ * enum virtchnl2_rxq_flags - Receive Queue Feature flags
+ * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag
+ * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag
+ * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
+ * by hardware immediately after processing
+ * each packet.
+ * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size
+ * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size
+ */
+enum virtchnl2_rxq_flags {
+ VIRTCHNL2_RXQ_RSC = BIT(0),
+ VIRTCHNL2_RXQ_HDR_SPLIT = BIT(1),
+ VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK = BIT(2),
+ VIRTCHNL2_RX_DESC_SIZE_16BYTE = BIT(3),
+ VIRTCHNL2_RX_DESC_SIZE_32BYTE = BIT(4),
+};
+
+/**
+ * enum virtchnl2_rss_alg - Type of RSS algorithm
+ * @VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC: TOEPLITZ_ASYMMETRIC algorithm
+ * @VIRTCHNL2_RSS_ALG_R_ASYMMETRIC: R_ASYMMETRIC algorithm
+ * @VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC: TOEPLITZ_SYMMETRIC algorithm
+ * @VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC: XOR_SYMMETRIC algorithm
+ */
+enum virtchnl2_rss_alg {
+ VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL2_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/**
+ * enum virtchnl2_event_codes - Type of event
+ * @VIRTCHNL2_EVENT_UNKNOWN: Unknown event type
+ * @VIRTCHNL2_EVENT_LINK_CHANGE: Link change event type
+ * @VIRTCHNL2_EVENT_START_RESET_ADI: Start reset ADI event type
+ * @VIRTCHNL2_EVENT_FINISH_RESET_ADI: Finish reset ADI event type
+ * @VIRTCHNL2_EVENT_ADI_ACTIVE: Event type to indicate 'function active' state
+ * of ADI.
+ */
+enum virtchnl2_event_codes {
+ VIRTCHNL2_EVENT_UNKNOWN = 0,
+ VIRTCHNL2_EVENT_LINK_CHANGE = 1,
+ /* These messages are only sent to PF from CP */
+ VIRTCHNL2_EVENT_START_RESET_ADI = 2,
+ VIRTCHNL2_EVENT_FINISH_RESET_ADI = 3,
+ VIRTCHNL2_EVENT_ADI_ACTIVE = 4,
+};
+
+/**
+ * enum virtchnl2_queue_type - Various queue types
+ * @VIRTCHNL2_QUEUE_TYPE_TX: TX queue type
+ * @VIRTCHNL2_QUEUE_TYPE_RX: RX queue type
+ * @VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: TX completion queue type
+ * @VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: RX buffer queue type
+ * @VIRTCHNL2_QUEUE_TYPE_CONFIG_TX: Config TX queue type
+ * @VIRTCHNL2_QUEUE_TYPE_CONFIG_RX: Config RX queue type
+ * @VIRTCHNL2_QUEUE_TYPE_MBX_TX: TX mailbox queue type
+ * @VIRTCHNL2_QUEUE_TYPE_MBX_RX: RX mailbox queue type
+ *
+ * Transmit and Receive queue types are valid in single as well as split queue
+ * models. With Split Queue model, 2 additional types are introduced which are
+ * TX_COMPLETION and RX_BUFFER. In split queue model, receive corresponds to
* the queue where hardware posts completions.
*/
-#define VIRTCHNL2_QUEUE_TYPE_TX 0
-#define VIRTCHNL2_QUEUE_TYPE_RX 1
-#define VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION 2
-#define VIRTCHNL2_QUEUE_TYPE_RX_BUFFER 3
-#define VIRTCHNL2_QUEUE_TYPE_CONFIG_TX 4
-#define VIRTCHNL2_QUEUE_TYPE_CONFIG_RX 5
-#define VIRTCHNL2_QUEUE_TYPE_P2P_TX 6
-#define VIRTCHNL2_QUEUE_TYPE_P2P_RX 7
-#define VIRTCHNL2_QUEUE_TYPE_P2P_TX_COMPLETION 8
-#define VIRTCHNL2_QUEUE_TYPE_P2P_RX_BUFFER 9
-#define VIRTCHNL2_QUEUE_TYPE_MBX_TX 10
-#define VIRTCHNL2_QUEUE_TYPE_MBX_RX 11
-
-/* VIRTCHNL2_ITR_IDX
- * Virtchannel interrupt throttling rate index
- */
-#define VIRTCHNL2_ITR_IDX_0 0
-#define VIRTCHNL2_ITR_IDX_1 1
-
-/* VIRTCHNL2_VECTOR_LIMITS
+enum virtchnl2_queue_type {
+ VIRTCHNL2_QUEUE_TYPE_TX = 0,
+ VIRTCHNL2_QUEUE_TYPE_RX = 1,
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION = 2,
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER = 3,
+ VIRTCHNL2_QUEUE_TYPE_CONFIG_TX = 4,
+ VIRTCHNL2_QUEUE_TYPE_CONFIG_RX = 5,
+ VIRTCHNL2_QUEUE_TYPE_P2P_TX = 6,
+ VIRTCHNL2_QUEUE_TYPE_P2P_RX = 7,
+ VIRTCHNL2_QUEUE_TYPE_P2P_TX_COMPLETION = 8,
+ VIRTCHNL2_QUEUE_TYPE_P2P_RX_BUFFER = 9,
+ VIRTCHNL2_QUEUE_TYPE_MBX_TX = 10,
+ VIRTCHNL2_QUEUE_TYPE_MBX_RX = 11,
+};
+
+/**
+ * enum virtchnl2_itr_idx - Interrupt throttling rate index
+ * @VIRTCHNL2_ITR_IDX_0: ITR index 0
+ * @VIRTCHNL2_ITR_IDX_1: ITR index 1
+ */
+enum virtchnl2_itr_idx {
+ VIRTCHNL2_ITR_IDX_0 = 0,
+ VIRTCHNL2_ITR_IDX_1 = 1,
+};
+
+/**
+ * VIRTCHNL2_VECTOR_LIMITS
* Since PF/VF messages are limited by __le16 size, precalculate the maximum
* possible values of nested elements in virtchnl structures that virtual
* channel can possibly handle in a single message.
@@ -332,131 +411,150 @@
((__le16)(~0) - sizeof(struct virtchnl2_queue_vector_maps)) / \
sizeof(struct virtchnl2_queue_vector))
-/* VIRTCHNL2_MAC_TYPE
- * VIRTCHNL2_MAC_ADDR_PRIMARY
- * PF/VF driver should set @type to VIRTCHNL2_MAC_ADDR_PRIMARY for the
- * primary/device unicast MAC address filter for VIRTCHNL2_OP_ADD_MAC_ADDR and
- * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the underlying control plane
- * function to accurately track the MAC address and for VM/function reset.
- *
- * VIRTCHNL2_MAC_ADDR_EXTRA
- * PF/VF driver should set @type to VIRTCHNL2_MAC_ADDR_EXTRA for any extra
- * unicast and/or multicast filters that are being added/deleted via
- * VIRTCHNL2_OP_ADD_MAC_ADDR/VIRTCHNL2_OP_DEL_MAC_ADDR respectively.
+/**
+ * enum virtchnl2_mac_addr_type - MAC address types
+ * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
+ * primary/device unicast MAC address filter for
+ * VIRTCHNL2_OP_ADD_MAC_ADDR and
+ * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
+ * underlying control plane function to accurately
+ * track the MAC address and for VM/function reset.
+ * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
+ * unicast and/or multicast filters that are being
+ * added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
+ * VIRTCHNL2_OP_DEL_MAC_ADDR.
*/
-#define VIRTCHNL2_MAC_ADDR_PRIMARY 1
-#define VIRTCHNL2_MAC_ADDR_EXTRA 2
+enum virtchnl2_mac_addr_type {
+ VIRTCHNL2_MAC_ADDR_PRIMARY = 1,
+ VIRTCHNL2_MAC_ADDR_EXTRA = 2,
+};
-/* VIRTCHNL2_PROMISC_FLAGS
- * Flags used for promiscuous mode
+/**
+ * enum virtchnl2_promisc_flags - Flags used for promiscuous mode
+ * @VIRTCHNL2_UNICAST_PROMISC: Unicast promiscuous mode
+ * @VIRTCHNL2_MULTICAST_PROMISC: Multicast promiscuous mode
*/
-#define VIRTCHNL2_UNICAST_PROMISC BIT(0)
-#define VIRTCHNL2_MULTICAST_PROMISC BIT(1)
+enum virtchnl2_promisc_flags {
+ VIRTCHNL2_UNICAST_PROMISC = BIT(0),
+ VIRTCHNL2_MULTICAST_PROMISC = BIT(1),
+};
-/* VIRTCHNL2_QUEUE_GROUP_TYPE
- * Type of queue groups
+/**
+ * enum virtchnl2_queue_group_type - Type of queue groups
+ * @VIRTCHNL2_QUEUE_GROUP_DATA: Data queue group type
+ * @VIRTCHNL2_QUEUE_GROUP_MBX: Mailbox queue group type
+ * @VIRTCHNL2_QUEUE_GROUP_CONFIG: Config queue group type
+ *
* 0 till 0xFF is for general use
*/
-#define VIRTCHNL2_QUEUE_GROUP_DATA 1
-#define VIRTCHNL2_QUEUE_GROUP_MBX 2
-#define VIRTCHNL2_QUEUE_GROUP_CONFIG 3
+enum virtchnl2_queue_group_type {
+ VIRTCHNL2_QUEUE_GROUP_DATA = 1,
+ VIRTCHNL2_QUEUE_GROUP_MBX = 2,
+ VIRTCHNL2_QUEUE_GROUP_CONFIG = 3,
+};
-/* VIRTCHNL2_PROTO_HDR_TYPE
- * Protocol header type within a packet segment. A segment consists of one or
+/* Protocol header type within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
* tunneling or encapsulation protocols for network virtualization.
*/
-/* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_ANY 0
-#define VIRTCHNL2_PROTO_HDR_PRE_MAC 1
-/* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_MAC 2
-#define VIRTCHNL2_PROTO_HDR_POST_MAC 3
-#define VIRTCHNL2_PROTO_HDR_ETHERTYPE 4
-#define VIRTCHNL2_PROTO_HDR_VLAN 5
-#define VIRTCHNL2_PROTO_HDR_SVLAN 6
-#define VIRTCHNL2_PROTO_HDR_CVLAN 7
-#define VIRTCHNL2_PROTO_HDR_MPLS 8
-#define VIRTCHNL2_PROTO_HDR_UMPLS 9
-#define VIRTCHNL2_PROTO_HDR_MMPLS 10
-#define VIRTCHNL2_PROTO_HDR_PTP 11
-#define VIRTCHNL2_PROTO_HDR_CTRL 12
-#define VIRTCHNL2_PROTO_HDR_LLDP 13
-#define VIRTCHNL2_PROTO_HDR_ARP 14
-#define VIRTCHNL2_PROTO_HDR_ECP 15
-#define VIRTCHNL2_PROTO_HDR_EAPOL 16
-#define VIRTCHNL2_PROTO_HDR_PPPOD 17
-#define VIRTCHNL2_PROTO_HDR_PPPOE 18
-/* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_IPV4 19
-/* IPv4 and IPv6 Fragment header types are only associated to
- * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
- * cannot be used independently.
- */
-/* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_IPV4_FRAG 20
-/* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_IPV6 21
-/* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_IPV6_FRAG 22
-#define VIRTCHNL2_PROTO_HDR_IPV6_EH 23
-/* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_UDP 24
-/* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_TCP 25
-/* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_SCTP 26
-/* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_ICMP 27
-/* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_ICMPV6 28
-#define VIRTCHNL2_PROTO_HDR_IGMP 29
-#define VIRTCHNL2_PROTO_HDR_AH 30
-#define VIRTCHNL2_PROTO_HDR_ESP 31
-#define VIRTCHNL2_PROTO_HDR_IKE 32
-#define VIRTCHNL2_PROTO_HDR_NATT_KEEP 33
-/* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_PAY 34
-#define VIRTCHNL2_PROTO_HDR_L2TPV2 35
-#define VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL 36
-#define VIRTCHNL2_PROTO_HDR_L2TPV3 37
-#define VIRTCHNL2_PROTO_HDR_GTP 38
-#define VIRTCHNL2_PROTO_HDR_GTP_EH 39
-#define VIRTCHNL2_PROTO_HDR_GTPCV2 40
-#define VIRTCHNL2_PROTO_HDR_GTPC_TEID 41
-#define VIRTCHNL2_PROTO_HDR_GTPU 42
-#define VIRTCHNL2_PROTO_HDR_GTPU_UL 43
-#define VIRTCHNL2_PROTO_HDR_GTPU_DL 44
-#define VIRTCHNL2_PROTO_HDR_ECPRI 45
-#define VIRTCHNL2_PROTO_HDR_VRRP 46
-#define VIRTCHNL2_PROTO_HDR_OSPF 47
-/* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_TUN 48
-#define VIRTCHNL2_PROTO_HDR_GRE 49
-#define VIRTCHNL2_PROTO_HDR_NVGRE 50
-#define VIRTCHNL2_PROTO_HDR_VXLAN 51
-#define VIRTCHNL2_PROTO_HDR_VXLAN_GPE 52
-#define VIRTCHNL2_PROTO_HDR_GENEVE 53
-#define VIRTCHNL2_PROTO_HDR_NSH 54
-#define VIRTCHNL2_PROTO_HDR_QUIC 55
-#define VIRTCHNL2_PROTO_HDR_PFCP 56
-#define VIRTCHNL2_PROTO_HDR_PFCP_NODE 57
-#define VIRTCHNL2_PROTO_HDR_PFCP_SESSION 58
-#define VIRTCHNL2_PROTO_HDR_RTP 59
-#define VIRTCHNL2_PROTO_HDR_ROCE 60
-#define VIRTCHNL2_PROTO_HDR_ROCEV1 61
-#define VIRTCHNL2_PROTO_HDR_ROCEV2 62
-/* protocol ids up to 32767 are reserved for AVF use */
-/* 32768 - 65534 are used for user defined protocol ids */
-/* VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id */
-#define VIRTCHNL2_PROTO_HDR_NO_PROTO 65535
-
-#define VIRTCHNL2_VERSION_MAJOR_2 2
-#define VIRTCHNL2_VERSION_MINOR_0 0
-
-
-/* VIRTCHNL2_OP_VERSION
+enum virtchnl2_proto_hdr_type {
+ /* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ANY = 0,
+ VIRTCHNL2_PROTO_HDR_PRE_MAC = 1,
+ /* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_MAC = 2,
+ VIRTCHNL2_PROTO_HDR_POST_MAC = 3,
+ VIRTCHNL2_PROTO_HDR_ETHERTYPE = 4,
+ VIRTCHNL2_PROTO_HDR_VLAN = 5,
+ VIRTCHNL2_PROTO_HDR_SVLAN = 6,
+ VIRTCHNL2_PROTO_HDR_CVLAN = 7,
+ VIRTCHNL2_PROTO_HDR_MPLS = 8,
+ VIRTCHNL2_PROTO_HDR_UMPLS = 9,
+ VIRTCHNL2_PROTO_HDR_MMPLS = 10,
+ VIRTCHNL2_PROTO_HDR_PTP = 11,
+ VIRTCHNL2_PROTO_HDR_CTRL = 12,
+ VIRTCHNL2_PROTO_HDR_LLDP = 13,
+ VIRTCHNL2_PROTO_HDR_ARP = 14,
+ VIRTCHNL2_PROTO_HDR_ECP = 15,
+ VIRTCHNL2_PROTO_HDR_EAPOL = 16,
+ VIRTCHNL2_PROTO_HDR_PPPOD = 17,
+ VIRTCHNL2_PROTO_HDR_PPPOE = 18,
+ /* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV4 = 19,
+ /* IPv4 and IPv6 Fragment header types are only associated to
+ * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
+ * cannot be used independently.
+ */
+ /* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV4_FRAG = 20,
+ /* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV6 = 21,
+ /* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV6_FRAG = 22,
+ VIRTCHNL2_PROTO_HDR_IPV6_EH = 23,
+ /* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_UDP = 24,
+ /* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_TCP = 25,
+ /* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_SCTP = 26,
+ /* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ICMP = 27,
+ /* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ICMPV6 = 28,
+ VIRTCHNL2_PROTO_HDR_IGMP = 29,
+ VIRTCHNL2_PROTO_HDR_AH = 30,
+ VIRTCHNL2_PROTO_HDR_ESP = 31,
+ VIRTCHNL2_PROTO_HDR_IKE = 32,
+ VIRTCHNL2_PROTO_HDR_NATT_KEEP = 33,
+ /* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_PAY = 34,
+ VIRTCHNL2_PROTO_HDR_L2TPV2 = 35,
+ VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL = 36,
+ VIRTCHNL2_PROTO_HDR_L2TPV3 = 37,
+ VIRTCHNL2_PROTO_HDR_GTP = 38,
+ VIRTCHNL2_PROTO_HDR_GTP_EH = 39,
+ VIRTCHNL2_PROTO_HDR_GTPCV2 = 40,
+ VIRTCHNL2_PROTO_HDR_GTPC_TEID = 41,
+ VIRTCHNL2_PROTO_HDR_GTPU = 42,
+ VIRTCHNL2_PROTO_HDR_GTPU_UL = 43,
+ VIRTCHNL2_PROTO_HDR_GTPU_DL = 44,
+ VIRTCHNL2_PROTO_HDR_ECPRI = 45,
+ VIRTCHNL2_PROTO_HDR_VRRP = 46,
+ VIRTCHNL2_PROTO_HDR_OSPF = 47,
+ /* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_TUN = 48,
+ VIRTCHNL2_PROTO_HDR_GRE = 49,
+ VIRTCHNL2_PROTO_HDR_NVGRE = 50,
+ VIRTCHNL2_PROTO_HDR_VXLAN = 51,
+ VIRTCHNL2_PROTO_HDR_VXLAN_GPE = 52,
+ VIRTCHNL2_PROTO_HDR_GENEVE = 53,
+ VIRTCHNL2_PROTO_HDR_NSH = 54,
+ VIRTCHNL2_PROTO_HDR_QUIC = 55,
+ VIRTCHNL2_PROTO_HDR_PFCP = 56,
+ VIRTCHNL2_PROTO_HDR_PFCP_NODE = 57,
+ VIRTCHNL2_PROTO_HDR_PFCP_SESSION = 58,
+ VIRTCHNL2_PROTO_HDR_RTP = 59,
+ VIRTCHNL2_PROTO_HDR_ROCE = 60,
+ VIRTCHNL2_PROTO_HDR_ROCEV1 = 61,
+ VIRTCHNL2_PROTO_HDR_ROCEV2 = 62,
+ /* Protocol ids up to 32767 are reserved */
+ /* 32768 - 65534 are used for user defined protocol ids */
+ /* VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_NO_PROTO = 65535,
+};
+
+enum virtchl2_version {
+ VIRTCHNL2_VERSION_MINOR_0 = 0,
+ VIRTCHNL2_VERSION_MAJOR_2 = 2,
+};
+
+/**
+ * struct virtchnl2_version_info - Version information
+ * @major: Major version
+ * @minor: Minor version
+ *
* PF/VF posts its version number to the CP. CP responds with its version number
* in the same format, along with a return code.
* If there is a major version mismatch, then the PF/VF cannot operate.
@@ -466,6 +564,8 @@
* This version opcode MUST always be specified as == 1, regardless of other
* changes in the API. The CP must always respond to this message without
* error regardless of version mismatch.
+ *
+ * Associated with VIRTCHNL2_OP_VERSION.
*/
struct virtchnl2_version_info {
__le32 major;
@@ -474,7 +574,39 @@ struct virtchnl2_version_info {
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
-/* VIRTCHNL2_OP_GET_CAPS
+/**
+ * struct virtchnl2_get_capabilities - Capabilities info
+ * @csum_caps: See enum virtchnl2_cap_txrx_csum
+ * @seg_caps: See enum virtchnl2_cap_seg
+ * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at
+ * @rsc_caps: See enum virtchnl2_cap_rsc
+ * @rss_caps: See enum virtchnl2_cap_rss
+ * @other_caps: See enum virtchnl2_cap_other
+ * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
+ * provided by CP.
+ * @mailbox_vector_id: Mailbox vector id
+ * @num_allocated_vectors: Maximum number of allocated vectors for the device
+ * @max_rx_q: Maximum number of supported Rx queues
+ * @max_tx_q: Maximum number of supported Tx queues
+ * @max_rx_bufq: Maximum number of supported buffer queues
+ * @max_tx_complq: Maximum number of supported completion queues
+ * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
+ * responds with the maximum VFs granted.
+ * @max_vports: Maximum number of vports that can be supported
+ * @default_num_vports: Default number of vports driver should allocate on load
+ * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes
+ * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
+ * sent per transmit packet without needing to be
+ * linearized.
+ * @reserved: Reserved field
+ * @max_adis: Max number of ADIs
+ * @device_type: See enum virtchl2_device_type
+ * @min_sso_packet_len: Min packet length supported by device for single
+ * segment offload
+ * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
+ * an LSO
+ * @pad1: Padding for future extensions
+ *
* Dataplane driver sends this message to CP to negotiate capabilities and
* provides a virtchnl2_get_capabilities structure with its desired
* capabilities, max_sriov_vfs and num_allocated_vectors.
@@ -492,60 +624,30 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* mailbox_vector_id and the number of itr index registers in itr_idx_map.
* It also responds with default number of vports that the dataplane driver
* should comeup with in default_num_vports and maximum number of vports that
- * can be supported in max_vports
+ * can be supported in max_vports.
+ *
+ * Associated with VIRTCHNL2_OP_GET_CAPS.
*/
struct virtchnl2_get_capabilities {
- /* see VIRTCHNL2_CHECKSUM_OFFLOAD_CAPS definitions */
__le32 csum_caps;
-
- /* see VIRTCHNL2_SEGMENTATION_OFFLOAD_CAPS definitions */
__le32 seg_caps;
-
- /* see VIRTCHNL2_HEADER_SPLIT_CAPS definitions */
__le32 hsplit_caps;
-
- /* see VIRTCHNL2_RSC_OFFLOAD_CAPS definitions */
__le32 rsc_caps;
-
- /* see VIRTCHNL2_RSS_FLOW_TYPE_CAPS definitions */
__le64 rss_caps;
-
-
- /* see VIRTCHNL2_OTHER_CAPS definitions */
__le64 other_caps;
-
- /* DYN_CTL register offset and vector id for mailbox provided by CP */
__le32 mailbox_dyn_ctl;
__le16 mailbox_vector_id;
- /* Maximum number of allocated vectors for the device */
__le16 num_allocated_vectors;
-
- /* Maximum number of queues that can be supported */
__le16 max_rx_q;
__le16 max_tx_q;
__le16 max_rx_bufq;
__le16 max_tx_complq;
-
- /* The PF sends the maximum VFs it is requesting. The CP responds with
- * the maximum VFs granted.
- */
__le16 max_sriov_vfs;
-
- /* maximum number of vports that can be supported */
__le16 max_vports;
- /* default number of vports driver should allocate on load */
__le16 default_num_vports;
-
- /* Max header length hardware can parse/checksum, in bytes */
__le16 max_tx_hdr_size;
-
- /* Max number of scatter gather buffers that can be sent per transmit
- * packet without needing to be linearized
- */
u8 max_sg_bufs_per_tx_pkt;
-
- u8 reserved1;
- /* upper bound of number of ADIs supported */
+ u8 reserved;
__le16 max_adis;
/* version of Control Plane that is running */
@@ -553,10 +655,7 @@ struct virtchnl2_get_capabilities {
__le16 oem_cp_ver_minor;
/* see VIRTCHNL2_DEVICE_TYPE definitions */
__le32 device_type;
-
- /* min packet length supported by device for single segment offload */
u8 min_sso_packet_len;
- /* max number of header buffers that can be used for an LSO */
u8 max_hdr_buf_per_lso;
u8 pad1[10];
@@ -564,14 +663,21 @@ struct virtchnl2_get_capabilities {
VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
+/**
+ * struct virtchnl2_queue_reg_chunk - Single queue chunk
+ * @type: See enum virtchnl2_queue_type
+ * @start_queue_id: Start Queue ID
+ * @num_queues: Number of queues in the chunk
+ * @pad: Padding
+ * @qtail_reg_start: Queue tail register offset
+ * @qtail_reg_spacing: Queue tail register spacing
+ * @pad1: Padding for future extensions
+ */
struct virtchnl2_queue_reg_chunk {
- /* see VIRTCHNL2_QUEUE_TYPE definitions */
__le32 type;
__le32 start_queue_id;
__le32 num_queues;
__le32 pad;
-
- /* Queue tail register offset and spacing provided by CP */
__le64 qtail_reg_start;
__le32 qtail_reg_spacing;
@@ -580,7 +686,13 @@ struct virtchnl2_queue_reg_chunk {
VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
-/* structure to specify several chunks of contiguous queues */
+/**
+ * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
+ * queues.
+ * @num_chunks: Number of chunks
+ * @pad: Padding
+ * @chunks: Chunks of queue info
+ */
struct virtchnl2_queue_reg_chunks {
__le16 num_chunks;
u8 pad[6];
@@ -589,77 +701,91 @@ struct virtchnl2_queue_reg_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks);
-/* VIRTCHNL2_VPORT_FLAGS */
-#define VIRTCHNL2_VPORT_UPLINK_PORT BIT(0)
-#define VIRTCHNL2_VPORT_INLINE_FLOW_STEER_ENA BIT(1)
+/**
+ * enum virtchnl2_vport_flags - Vport flags
+ * @VIRTCHNL2_VPORT_UPLINK_PORT: Uplink port flag
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_ENA: Inline flow steering enable flag
+ */
+enum virtchnl2_vport_flags {
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER_ENA = BIT(1),
+};
#define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS 6
-/* VIRTCHNL2_OP_CREATE_VPORT
- * PF sends this message to CP to create a vport by filling in required
+
+/**
+ * struct virtchnl2_create_vport - Create vport config info
+ * @vport_type: See enum virtchnl2_vport_type
+ * @txq_model: See virtchnl2_queue_model
+ * @rxq_model: See virtchnl2_queue_model
+ * @num_tx_q: Number of Tx queues
+ * @num_tx_complq: Valid only if txq_model is split queue
+ * @num_rx_q: Number of Rx queues
+ * @num_rx_bufq: Valid only if rxq_model is split queue
+ * @default_rx_q: Relative receive queue index to be used as default
+ * @vport_index: Used to align PF and CP in case of default multiple vports,
+ * it is filled by the PF and CP returns the same value, to
+ * enable the driver to support multiple asynchronous parallel
+ * CREATE_VPORT requests and associate a response to a specific
+ * request.
+ * @max_mtu: Max MTU. CP populates this field on response
+ * @vport_id: Vport id. CP populates this field on response
+ * @default_mac_addr: Default MAC address
+ * @vport_flags: See enum virtchnl2_vport_flags
+ * @rx_desc_ids: See enum virtchnl2_rx_desc_id_bitmasks
+ * @tx_desc_ids: See enum virtchnl2_tx_desc_ids
+ * @reserved: Reserved bytes and cannot be used
+ * @rss_algorithm: RSS algorithm
+ * @rss_key_size: RSS key size
+ * @rss_lut_size: RSS LUT size
+ * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at
+ * @pad: Padding for future extensions
+ * @chunks: Chunks of contiguous queues
+ *
+ * PF/VF sends this message to CP to create a vport by filling in required
* fields of virtchnl2_create_vport structure.
* CP responds with the updated virtchnl2_create_vport structure containing the
* necessary fields followed by chunks which in turn will have an array of
* num_chunks entries of virtchnl2_queue_chunk structures.
*/
struct virtchnl2_create_vport {
- /* PF/VF populates the following fields on request */
- /* see VIRTCHNL2_VPORT_TYPE definitions */
__le16 vport_type;
-
- /* see VIRTCHNL2_QUEUE_MODEL definitions */
__le16 txq_model;
-
- /* see VIRTCHNL2_QUEUE_MODEL definitions */
__le16 rxq_model;
__le16 num_tx_q;
- /* valid only if txq_model is split queue */
__le16 num_tx_complq;
__le16 num_rx_q;
- /* valid only if rxq_model is split queue */
__le16 num_rx_bufq;
- /* relative receive queue index to be used as default */
__le16 default_rx_q;
- /* used to align PF and CP in case of default multiple vports, it is
- * filled by the PF and CP returns the same value, to enable the driver
- * to support multiple asynchronous parallel CREATE_VPORT requests and
- * associate a response to a specific request
- */
__le16 vport_index;
-
- /* CP populates the following fields on response */
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
- /* see VIRTCHNL2_VPORT_FLAGS definitions */
__le16 vport_flags;
- /* see VIRTCHNL2_RX_DESC_IDS definitions */
__le64 rx_desc_ids;
- /* see VIRTCHNL2_TX_DESC_IDS definitions */
__le64 tx_desc_ids;
-
- u8 reserved1[72];
-
- /* see VIRTCHNL2_RSS_ALGORITHM definitions */
+ u8 reserved[72];
__le32 rss_algorithm;
__le16 rss_key_size;
__le16 rss_lut_size;
-
- /* see VIRTCHNL2_HEADER_SPLIT_CAPS definitions */
__le32 rx_split_pos;
-
- u8 pad2[20];
+ u8 pad[20];
struct virtchnl2_queue_reg_chunks chunks;
};
-
VIRTCHNL2_CHECK_STRUCT_LEN(192, virtchnl2_create_vport);
-/* VIRTCHNL2_OP_DESTROY_VPORT
- * VIRTCHNL2_OP_ENABLE_VPORT
- * VIRTCHNL2_OP_DISABLE_VPORT
- * PF sends this message to CP to destroy, enable or disable a vport by filling
- * in the vport_id in virtchnl2_vport structure.
+/**
+ * struct virtchnl2_vport - Vport identifier information
+ * @vport_id: Vport id
+ * @pad: Padding for future extensions
+ *
+ * PF/VF sends this message to CP to destroy, enable or disable a vport by
+ * filling in the vport_id in virtchnl2_vport structure.
* CP responds with the status of the requested operation.
+ *
+ * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
+ * VIRTCHNL2_OP_DISABLE_VPORT.
*/
struct virtchnl2_vport {
__le32 vport_id;
@@ -668,42 +794,43 @@ struct virtchnl2_vport {
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
-/* Transmit queue config info */
+/**
+ * struct virtchnl2_txq_info - Transmit queue config info
+ * @dma_ring_addr: DMA address
+ * @type: See enum virtchnl2_queue_type
+ * @queue_id: Queue ID
+ * @relative_queue_id: Valid only if queue model is split and type is transmit
+ * queue. Used in many to one mapping of transmit queues to
+ * completion queue.
+ * @model: See enum virtchnl2_queue_model
+ * @sched_mode: See enum virtchnl2_txq_sched_mode
+ * @qflags: TX queue feature flags
+ * @ring_len: Ring length
+ * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
+ * queue.
+ * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
+ * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
+ * messages for the respective CONFIG_TX queue.
+ * @pad: Padding
+ * @egress_pasid: Egress PASID info
+ * @egress_hdr_pasid: Egress HDR passid
+ * @egress_buf_pasid: Egress buf passid
+ * @pad1: Padding for future extensions
+ */
struct virtchnl2_txq_info {
__le64 dma_ring_addr;
-
- /* see VIRTCHNL2_QUEUE_TYPE definitions */
__le32 type;
-
__le32 queue_id;
- /* valid only if queue model is split and type is transmit queue. Used
- * in many to one mapping of transmit queues to completion queue
- */
__le16 relative_queue_id;
-
- /* see VIRTCHNL2_QUEUE_MODEL definitions */
__le16 model;
-
- /* see VIRTCHNL2_TXQ_SCHED_MODE definitions */
__le16 sched_mode;
-
- /* see VIRTCHNL2_TXQ_FLAGS definitions */
__le16 qflags;
__le16 ring_len;
-
- /* valid only if queue model is split and type is transmit queue */
__le16 tx_compl_queue_id;
- /* valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX */
- /* see VIRTCHNL2_PEER_TYPE definitions */
__le16 peer_type;
- /* valid only if queue type is CONFIG_TX and used to deliver messages
- * for the respective CONFIG_TX queue
- */
__le16 peer_rx_queue_id;
u8 pad[4];
-
- /* Egress pasid is used for SIOV use case */
__le32 egress_pasid;
__le32 egress_hdr_pasid;
__le32 egress_buf_pasid;
@@ -713,12 +840,20 @@ struct virtchnl2_txq_info {
VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
-/* VIRTCHNL2_OP_CONFIG_TX_QUEUES
- * PF sends this message to set up parameters for one or more transmit queues.
- * This message contains an array of num_qinfo instances of virtchnl2_txq_info
- * structures. CP configures requested queues and returns a status code. If
- * num_qinfo specified is greater than the number of queues associated with the
- * vport, an error is returned and no queues are configured.
+/**
+ * struct virtchnl2_config_tx_queues - TX queue config
+ * @vport_id: Vport id
+ * @num_qinfo: Number of virtchnl2_txq_info structs
+ * @pad: Padding for future extensions
+ * @qinfo: Tx queues config info
+ *
+ * PF/VF sends this message to set up parameters for one or more transmit
+ * queues. This message contains an array of num_qinfo instances of
+ * virtchnl2_txq_info structures. CP configures requested queues and returns
+ * a status code. If num_qinfo specified is greater than the number of queues
+ * associated with the vport, an error is returned and no queues are configured.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
*/
struct virtchnl2_config_tx_queues {
__le32 vport_id;
@@ -730,47 +865,55 @@ struct virtchnl2_config_tx_queues {
VIRTCHNL2_CHECK_STRUCT_LEN(72, virtchnl2_config_tx_queues);
-/* Receive queue config info */
+/**
+ * struct virtchnl2_rxq_info - Receive queue config info
+ * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions
+ * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions
+ * @type: See enum virtchnl2_queue_type
+ * @queue_id: Queue id
+ * @model: See enum virtchnl2_queue_model
+ * @hdr_buffer_size: Header buffer size
+ * @data_buffer_size: Data buffer size
+ * @max_pkt_size: Max packet size
+ * @ring_len: Ring length
+ * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
+ * This field must be a power of 2.
+ * @pad: Padding
+ * @dma_head_wb_addr: Applicable only for receive buffer queues
+ * @qflags: Applicable only for receive completion queues.
+ * See enum virtchnl2_rxq_flags.
+ * @rx_buffer_low_watermark: Rx buffer low watermark
+ * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
+ * the Rx queue. Valid only in split queue model.
+ * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
+ * the Rx queue. Valid only in split queue model.
+ * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
+ * only if this field is set.
+ * @pad1: Padding
+ * @ingress_pasid: Ingress PASID
+ * @ingress_hdr_pasid: Ingress PASID header
+ * @ingress_buf_pasid: Ingress PASID buffer
+ * @pad2: Padding for future extensions
+ */
struct virtchnl2_rxq_info {
- /* see VIRTCHNL2_RX_DESC_IDS definitions */
__le64 desc_ids;
__le64 dma_ring_addr;
-
- /* see VIRTCHNL2_QUEUE_TYPE definitions */
__le32 type;
__le32 queue_id;
-
- /* see QUEUE_MODEL definitions */
__le16 model;
-
__le16 hdr_buffer_size;
__le32 data_buffer_size;
__le32 max_pkt_size;
-
__le16 ring_len;
u8 buffer_notif_stride;
u8 pad;
-
- /* Applicable only for receive buffer queues */
__le64 dma_head_wb_addr;
-
- /* Applicable only for receive completion queues */
- /* see VIRTCHNL2_RXQ_FLAGS definitions */
__le16 qflags;
-
__le16 rx_buffer_low_watermark;
-
- /* valid only in split queue model */
__le16 rx_bufq1_id;
- /* valid only in split queue model */
__le16 rx_bufq2_id;
- /* it indicates if there is a second buffer, rx_bufq2_id is valid only
- * if this field is set
- */
u8 bufq2_ena;
u8 pad1[3];
-
- /* Ingress pasid is used for SIOV use case */
__le32 ingress_pasid;
__le32 ingress_hdr_pasid;
__le32 ingress_buf_pasid;
@@ -779,12 +922,20 @@ struct virtchnl2_rxq_info {
};
VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
-/* VIRTCHNL2_OP_CONFIG_RX_QUEUES
- * PF sends this message to set up parameters for one or more receive queues.
+/**
+ * struct virtchnl2_config_rx_queues - Rx queues config
+ * @vport_id: Vport id
+ * @num_qinfo: Number of instances
+ * @pad: Padding for future extensions
+ * @qinfo: Rx queues config info
+ *
+ * PF/VF sends this message to set up parameters for one or more receive queues.
* This message contains an array of num_qinfo instances of virtchnl2_rxq_info
* structures. CP configures requested queues and returns a status code.
* If the number of queues specified is greater than the number of queues
* associated with the vport, an error is returned and no queues are configured.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
*/
struct virtchnl2_config_rx_queues {
__le32 vport_id;
@@ -796,12 +947,23 @@ struct virtchnl2_config_rx_queues {
VIRTCHNL2_CHECK_STRUCT_LEN(112, virtchnl2_config_rx_queues);
-/* VIRTCHNL2_OP_ADD_QUEUES
- * PF sends this message to request additional transmit/receive queues beyond
+/**
+ * struct virtchnl2_add_queues - Data for VIRTCHNL2_OP_ADD_QUEUES
+ * @vport_id: Vport id
+ * @num_tx_q: Number of Tx qieues
+ * @num_tx_complq: Number of Tx completion queues
+ * @num_rx_q: Number of Rx queues
+ * @num_rx_bufq: Number of Rx buffer queues
+ * @pad: Padding for future extensions
+ * @chunks: Chunks of contiguous queues
+ *
+ * PF/VF sends this message to request additional transmit/receive queues beyond
* the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
* structure is used to specify the number of each type of queues.
* CP responds with the same structure with the actual number of queues assigned
* followed by num_chunks of virtchnl2_queue_chunk structures.
+ *
+ * Associated with VIRTCHNL2_OP_ADD_QUEUES.
*/
struct virtchnl2_add_queues {
__le32 vport_id;
@@ -817,65 +979,81 @@ struct virtchnl2_add_queues {
VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_add_queues);
/* Queue Groups Extension */
-
+/**
+ * struct virtchnl2_rx_queue_group_info - RX queue group info
+ * @rss_lut_size: IN/OUT, user can ask to update rss_lut size originally
+ * allocated by CreateVport command. New size will be returned
+ * if allocation succeeded, otherwise original rss_size from
+ * CreateVport will be returned.
+ * @pad: Padding for future extensions
+ */
struct virtchnl2_rx_queue_group_info {
- /* IN/OUT, user can ask to update rss_lut size originally allocated
- * by CreateVport command. New size will be returned if allocation
- * succeeded, otherwise original rss_size from CreateVport will
- * be returned.
- */
__le16 rss_lut_size;
- /* Future extension purpose */
u8 pad[6];
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rx_queue_group_info);
+/**
+ * struct virtchnl2_tx_queue_group_info - TX queue group info
+ * @tx_tc: TX TC queue group will be connected to
+ * @priority: Each group can have its own priority, value 0-7, while each group
+ * with unique priority is strict priority. It can be single set of
+ * queue groups which configured with same priority, then they are
+ * assumed part of WFQ arbitration group and are expected to be
+ * assigned with weight.
+ * @is_sp: Determines if queue group is expected to be Strict Priority according
+ * to its priority.
+ * @pad: Padding
+ * @pir_weight: Peak Info Rate Weight in case Queue Group is part of WFQ
+ * arbitration set.
+ * The weights of the groups are independent of each other.
+ * Possible values: 1-200
+ * @cir_pad: Future extension purpose for CIR only
+ * @pad2: Padding for future extensions
+ */
struct virtchnl2_tx_queue_group_info { /* IN */
- /* TX TC queue group will be connected to */
u8 tx_tc;
- /* Each group can have its own priority, value 0-7, while each group
- * with unique priority is strict priority.
- * It can be single set of queue groups which configured with
- * same priority, then they are assumed part of WFQ arbitration
- * group and are expected to be assigned with weight.
- */
u8 priority;
- /* Determines if queue group is expected to be Strict Priority
- * according to its priority
- */
u8 is_sp;
u8 pad;
-
- /* Peak Info Rate Weight in case Queue Group is part of WFQ
- * arbitration set.
- * The weights of the groups are independent of each other.
- * Possible values: 1-200
- */
__le16 pir_weight;
- /* Future extension purpose for CIR only */
u8 cir_pad[2];
- /* Future extension purpose*/
u8 pad2[8];
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_tx_queue_group_info);
+/**
+ * struct virtchnl2_queue_group_id - Queue group ID
+ * @queue_group_id: Queue group ID - Depended on it's type
+ * Data: Is an ID which is relative to Vport
+ * Config & Mailbox: Is an ID which is relative to func
+ * This ID is use in future calls, i.e. delete.
+ * Requested by host and assigned by Control plane.
+ * @queue_group_type: Functional type: See enum virtchnl2_queue_group_type
+ * @pad: Padding for future extensions
+ */
struct virtchnl2_queue_group_id {
- /* Queue group ID - depended on it's type
- * Data: is an ID which is relative to Vport
- * Config & Mailbox: is an ID which is relative to func.
- * This ID is use in future calls, i.e. delete.
- * Requested by host and assigned by Control plane.
- */
__le16 queue_group_id;
- /* Functional type: see VIRTCHNL2_QUEUE_GROUP_TYPE definitions */
__le16 queue_group_type;
u8 pad[4];
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_group_id);
+/**
+ * struct virtchnl2_queue_group_info - Queue group info
+ * @qg_id: Queue group ID
+ * @num_tx_q: Number of TX queues
+ * @num_tx_complq: Number of completion queues
+ * @num_rx_q: Number of RX queues
+ * @num_rx_bufq: Number of RX buffer queues
+ * @tx_q_grp_info: TX queue group info
+ * @rx_q_grp_info: RX queue group info
+ * @pad: Padding for future extensions
+ * @chunks: Queue register chunks
+ */
struct virtchnl2_queue_group_info {
/* IN */
struct virtchnl2_queue_group_id qg_id;
@@ -887,13 +1065,18 @@ struct virtchnl2_queue_group_info {
struct virtchnl2_tx_queue_group_info tx_q_grp_info;
struct virtchnl2_rx_queue_group_info rx_q_grp_info;
- /* Future extension purpose */
u8 pad[40];
struct virtchnl2_queue_reg_chunks chunks; /* OUT */
};
VIRTCHNL2_CHECK_STRUCT_LEN(120, virtchnl2_queue_group_info);
+/**
+ * struct virtchnl2_queue_groups - Queue groups list
+ * @num_queue_groups: Total number of queue groups
+ * @pad: Padding for future extensions
+ * @groups: Array of queue group info
+ */
struct virtchnl2_queue_groups {
__le16 num_queue_groups;
u8 pad[6];
@@ -902,78 +1085,107 @@ struct virtchnl2_queue_groups {
VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_queue_groups);
-/* VIRTCHNL2_OP_ADD_QUEUE_GROUPS
+/**
+ * struct virtchnl2_add_queue_groups - Add queue groups
+ * @vport_id: IN, vport_id to add queue group to, same as allocated by
+ * CreateVport. NA for mailbox and other types not assigned to vport.
+ * @pad: Padding for future extensions
+ * @qg_info: IN/OUT. List of all the queue groups
+ *
* PF sends this message to request additional transmit/receive queue groups
* beyond the ones that were assigned via CREATE_VPORT request.
* virtchnl2_add_queue_groups structure is used to specify the number of each
* type of queues. CP responds with the same structure with the actual number of
* groups and queues assigned followed by num_queue_groups and num_chunks of
* virtchnl2_queue_groups and virtchnl2_queue_chunk structures.
+ *
+ * Associated with VIRTCHNL2_OP_ADD_QUEUE_GROUPS.
*/
struct virtchnl2_add_queue_groups {
- /* IN, vport_id to add queue group to, same as allocated by CreateVport.
- * NA for mailbox and other types not assigned to vport
- */
__le32 vport_id;
u8 pad[4];
- /* IN/OUT */
struct virtchnl2_queue_groups qg_info;
};
VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_add_queue_groups);
-/* VIRTCHNL2_OP_DEL_QUEUE_GROUPS
+/**
+ * struct virtchnl2_delete_queue_groups - Delete queue groups
+ * @vport_id: IN, vport_id to delete queue group from, same as allocated by
+ * CreateVport.
+ * @num_queue_groups: IN/OUT, Defines number of groups provided
+ * @pad: Padding
+ * @qg_ids: IN, IDs & types of Queue Groups to delete
+ *
* PF sends this message to delete queue groups.
* PF sends virtchnl2_delete_queue_groups struct to specify the queue groups
* to be deleted. CP performs requested action and returns status and update
* num_queue_groups with number of successfully deleted queue groups.
+ *
+ * Associated with VIRTCHNL2_OP_DEL_QUEUE_GROUPS.
*/
struct virtchnl2_delete_queue_groups {
- /* IN, vport_id to delete queue group from, same as
- * allocated by CreateVport.
- */
__le32 vport_id;
- /* IN/OUT, Defines number of groups provided below */
__le16 num_queue_groups;
u8 pad[2];
- /* IN, IDs & types of Queue Groups to delete */
struct virtchnl2_queue_group_id qg_ids[1];
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_delete_queue_groups);
-/* Structure to specify a chunk of contiguous interrupt vectors */
+/**
+ * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
+ * interrupt vectors.
+ * @start_vector_id: Start vector id
+ * @start_evv_id: Start EVV id
+ * @num_vectors: Number of vectors
+ * @pad: Padding
+ * @dynctl_reg_start: DYN_CTL register offset
+ * @dynctl_reg_spacing: Register spacing between DYN_CTL registers of 2
+ * consecutive vectors.
+ * @itrn_reg_start: ITRN register offset
+ * @itrn_reg_spacing: Register spacing between dynctl registers of 2
+ * consecutive vectors.
+ * @itrn_index_spacing: Register spacing between itrn registers of the same
+ * vector where n=0..2.
+ * @pad1: Padding for future extensions
+ *
+ * Register offsets and spacing provided by CP.
+ * Dynamic control registers are used for enabling/disabling/re-enabling
+ * interrupts and updating interrupt rates in the hotpath. Any changes
+ * to interrupt rates in the dynamic control registers will be reflected
+ * in the interrupt throttling rate registers.
+ * itrn registers are used to update interrupt rates for specific
+ * interrupt indices without modifying the state of the interrupt.
+ */
struct virtchnl2_vector_chunk {
__le16 start_vector_id;
__le16 start_evv_id;
__le16 num_vectors;
__le16 pad;
- /* Register offsets and spacing provided by CP.
- * dynamic control registers are used for enabling/disabling/re-enabling
- * interrupts and updating interrupt rates in the hotpath. Any changes
- * to interrupt rates in the dynamic control registers will be reflected
- * in the interrupt throttling rate registers.
- * itrn registers are used to update interrupt rates for specific
- * interrupt indices without modifying the state of the interrupt.
- */
__le32 dynctl_reg_start;
- /* register spacing between dynctl registers of 2 consecutive vectors */
__le32 dynctl_reg_spacing;
__le32 itrn_reg_start;
- /* register spacing between itrn registers of 2 consecutive vectors */
__le32 itrn_reg_spacing;
- /* register spacing between itrn registers of the same vector
- * where n=0..2
- */
__le32 itrn_index_spacing;
u8 pad1[4];
};
VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
-/* Structure to specify several chunks of contiguous interrupt vectors */
+/**
+ * struct virtchnl2_vector_chunks - Chunks of contiguous interrupt vectors
+ * @num_vchunks: number of vector chunks
+ * @pad: Padding for future extensions
+ * @vchunks: Chunks of contiguous vector info
+ *
+ * PF/VF sends virtchnl2_vector_chunks struct to specify the vectors it is
+ * giving away. CP performs requested action and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
+ */
struct virtchnl2_vector_chunks {
__le16 num_vchunks;
u8 pad[14];
@@ -983,12 +1195,19 @@ struct virtchnl2_vector_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_vector_chunks);
-/* VIRTCHNL2_OP_ALLOC_VECTORS
- * PF sends this message to request additional interrupt vectors beyond the
+/**
+ * struct virtchnl2_alloc_vectors - Vector allocation info
+ * @num_vectors: Number of vectors
+ * @pad: Padding for future extensions
+ * @vchunks: Chunks of contiguous vector info
+ *
+ * PF/VF sends this message to request additional interrupt vectors beyond the
* ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
* structure is used to specify the number of vectors requested. CP responds
* with the same structure with the actual number of vectors assigned followed
* by virtchnl2_vector_chunks structure identifying the vector ids.
+ *
+ * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
*/
struct virtchnl2_alloc_vectors {
__le16 num_vectors;
@@ -999,46 +1218,46 @@ struct virtchnl2_alloc_vectors {
VIRTCHNL2_CHECK_STRUCT_LEN(64, virtchnl2_alloc_vectors);
-/* VIRTCHNL2_OP_DEALLOC_VECTORS
- * PF sends this message to release the vectors.
- * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
- * away. CP performs requested action and returns status.
- */
-
-/* VIRTCHNL2_OP_GET_RSS_LUT
- * VIRTCHNL2_OP_SET_RSS_LUT
- * PF sends this message to get or set RSS lookup table. Only supported if
+/**
+ * struct virtchnl2_rss_lut - RSS LUT info
+ * @vport_id: Vport id
+ * @lut_entries_start: Start of LUT entries
+ * @lut_entries: Number of LUT entrties
+ * @pad: Padding
+ * @lut: RSS lookup table
+ *
+ * PF/VF sends this message to get or set RSS lookup table. Only supported if
* both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
- * negotiation. Uses the virtchnl2_rss_lut structure
+ * negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
*/
struct virtchnl2_rss_lut {
__le32 vport_id;
__le16 lut_entries_start;
__le16 lut_entries;
u8 pad[4];
- /* RSS lookup table */
__le32 lut[1];
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_lut);
-/* VIRTCHNL2_OP_GET_RSS_KEY
- * PF sends this message to get RSS key. Only supported if both PF and CP
- * drivers set the VIRTCHNL2_CAP_RSS bit during configuration negotiation. Uses
- * the virtchnl2_rss_key structure
- */
-
-/* VIRTCHNL2_OP_GET_RSS_HASH
- * VIRTCHNL2_OP_SET_RSS_HASH
- * PF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the CP sets these to all possible traffic types that the
+/**
+ * struct virtchnl2_rss_hash - RSS hash info
+ * @ptype_groups: Packet type groups bitmap
+ * @vport_id: Vport id
+ * @pad: Padding for future extensions
+ *
+ * PF/VF sends these messages to get and set the hash filter enable bits for
+ * RSS. By default, the CP sets these to all possible traffic types that the
* hardware supports. The PF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
* Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
* during configuration negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
*/
struct virtchnl2_rss_hash {
- /* Packet Type Groups bitmap */
__le64 ptype_groups;
__le32 vport_id;
u8 pad[4];
@@ -1046,12 +1265,18 @@ struct virtchnl2_rss_hash {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
-/* VIRTCHNL2_OP_SET_SRIOV_VFS
+/**
+ * struct virtchnl2_sriov_vfs_info - VFs info
+ * @num_vfs: Number of VFs
+ * @pad: Padding for future extensions
+ *
* This message is used to set number of SRIOV VFs to be created. The actual
* allocation of resources for the VFs in terms of vport, queues and interrupts
- * is done by CP. When this call completes, the APF driver calls
+ * is done by CP. When this call completes, the IDPF driver calls
* pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
* The number of VFs set to 0 will destroy all the VFs of this function.
+ *
+ * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
*/
struct virtchnl2_sriov_vfs_info {
@@ -1061,8 +1286,14 @@ struct virtchnl2_sriov_vfs_info {
VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
-/* structure to specify single chunk of queue */
-/* 'chunks' is fixed size(not flexible) and will be deprecated at some point */
+/**
+ * struct virtchnl2_non_flex_queue_reg_chunks - Specify several chunks of
+ * contiguous queues.
+ * @num_chunks: Number of chunks
+ * @pad: Padding
+ * @chunks: Chunks of queue info. 'chunks' is fixed size(not flexible) and
+ * will be deprecated at some point.
+ */
struct virtchnl2_non_flex_queue_reg_chunks {
__le16 num_chunks;
u8 pad[6];
@@ -1071,8 +1302,14 @@ struct virtchnl2_non_flex_queue_reg_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_non_flex_queue_reg_chunks);
-/* structure to specify single chunk of interrupt vector */
-/* 'vchunks' is fixed size(not flexible) and will be deprecated at some point */
+/**
+ * struct virtchnl2_non_flex_vector_chunks - Chunks of contiguous interrupt
+ * vectors.
+ * @num_vchunks: Number of vector chunks
+ * @pad: Padding for future extensions
+ * @vchunks: Chunks of contiguous vector info. 'vchunks' is fixed size
+ * (not flexible) and will be deprecated at some point.
+ */
struct virtchnl2_non_flex_vector_chunks {
__le16 num_vchunks;
u8 pad[14];
@@ -1081,40 +1318,49 @@ struct virtchnl2_non_flex_vector_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_non_flex_vector_chunks);
-/* VIRTCHNL2_OP_NON_FLEX_CREATE_ADI
+/**
+ * struct virtchnl2_non_flex_create_adi - Create ADI
+ * @pasid: PF sends PASID to CP
+ * @mbx_id: mbx_id is set to 1 by PF when requesting CP to provide HW mailbox
+ * id else it is set to 0 by PF.
+ * @mbx_vec_id: PF sends mailbox vector id to CP
+ * @adi_index: PF populates this ADI index
+ * @adi_id: CP populates ADI id
+ * @pad: Padding
+ * @chunks: CP populates queue chunks
+ * @vchunks: PF sends vector chunks to CP
+ *
* PF sends this message to CP to create ADI by filling in required
* fields of virtchnl2_non_flex_create_adi structure.
- * CP responds with the updated virtchnl2_non_flex_create_adi structure containing
- * the necessary fields followed by chunks which in turn will have an array of
- * num_chunks entries of virtchnl2_queue_chunk structures.
+ * CP responds with the updated virtchnl2_non_flex_create_adi structure
+ * containing the necessary fields followed by chunks which in turn will have
+ * an array of num_chunks entries of virtchnl2_queue_chunk structures.
+ *
+ * Associated with VIRTCHNL2_OP_NON_FLEX_CREATE_ADI.
*/
struct virtchnl2_non_flex_create_adi {
- /* PF sends PASID to CP */
__le32 pasid;
- /*
- * mbx_id is set to 1 by PF when requesting CP to provide HW mailbox
- * id else it is set to 0 by PF
- */
__le16 mbx_id;
- /* PF sends mailbox vector id to CP */
__le16 mbx_vec_id;
- /* PF populates this ADI index */
__le16 adi_index;
- /* CP populates ADI id */
__le16 adi_id;
u8 pad[68];
- /* CP populates queue chunks */
struct virtchnl2_non_flex_queue_reg_chunks chunks;
- /* PF sends vector chunks to CP */
struct virtchnl2_non_flex_vector_chunks vchunks;
};
VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_non_flex_create_adi);
-/* VIRTCHNL2_OP_DESTROY_ADI
+/**
+ * struct virtchnl2_non_flex_destroy_adi - Destroy ADI
+ * @adi_id: ADI id to destroy
+ * @pad: Padding
+ *
* PF sends this message to CP to destroy ADI by filling
* in the adi_id in virtchnl2_destropy_adi structure.
* CP responds with the status of the requested operation.
+ *
+ * Associated with VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI.
*/
struct virtchnl2_non_flex_destroy_adi {
__le16 adi_id;
@@ -1123,7 +1369,17 @@ struct virtchnl2_non_flex_destroy_adi {
VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_non_flex_destroy_adi);
-/* Based on the descriptor type the PF supports, CP fills ptype_id_10 or
+/**
+ * struct virtchnl2_ptype - Packet type info
+ * @ptype_id_10: 10-bit packet type
+ * @ptype_id_8: 8-bit packet type
+ * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
+ * protocol ids are supported.
+ * @pad: Padding
+ * @proto_id: proto_id_count decides the allocation of protocol id array.
+ * See enum virtchnl2_proto_hdr_type.
+ *
+ * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
* ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
* is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
* last ptype.
@@ -1131,32 +1387,42 @@ VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_non_flex_destroy_adi);
struct virtchnl2_ptype {
__le16 ptype_id_10;
u8 ptype_id_8;
- /* number of protocol ids the packet supports, maximum of 32
- * protocol ids are supported
- */
u8 proto_id_count;
__le16 pad;
- /* proto_id_count decides the allocation of protocol id array */
- /* see VIRTCHNL2_PROTO_HDR_TYPE */
__le16 proto_id[1];
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptype);
-/* VIRTCHNL2_OP_GET_PTYPE_INFO
- * PF sends this message to CP to get all supported packet types. It does by
- * filling in start_ptype_id and num_ptypes. Depending on descriptor type the
- * PF supports, it sets num_ptypes to 1024 (10-bit ptype) for flex descriptor
- * and 256 (8-bit ptype) for base descriptor support. CP responds back to PF by
- * populating start_ptype_id, num_ptypes and array of ptypes. If all ptypes
- * doesn't fit into one mailbox buffer, CP splits ptype info into multiple
- * messages, where each message will have the start ptype id, number of ptypes
- * sent in that message and the ptype array itself. When CP is done updating
- * all ptype information it extracted from the package (number of ptypes
- * extracted might be less than what PF expects), it will append a dummy ptype
- * (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF) to the ptype
- * array. PF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO
- * messages.
+/**
+ * struct virtchnl2_get_ptype_info - Packet type info
+ * @start_ptype_id: Starting ptype ID
+ * @num_ptypes: Number of packet types from start_ptype_id
+ * @pad: Padding for future extensions
+ * @ptype: Array of packet type info
+ *
+ * The total number of supported packet types is based on the descriptor type.
+ * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
+ * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
+ * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
+ * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
+ * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
+ * is no specific field for the ptypes but are added at the end of the
+ * ptype info message. PF/VF is expected to extract the ptypes accordingly.
+ * Reason for doing this is because compiler doesn't allow nested flexible
+ * array fields).
+ *
+ * If all the ptypes don't fit into one mailbox buffer, CP splits the
+ * ptype info into multiple messages, where each message will have its own
+ * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
+ * updating all the ptype information extracted from the package (the number of
+ * ptypes extracted might be less than what PF/VF expects), it will append a
+ * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
+ * to the ptype array.
+ *
+ * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
+ *
+ * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
*/
struct virtchnl2_get_ptype_info {
__le16 start_ptype_id;
@@ -1167,25 +1433,46 @@ struct virtchnl2_get_ptype_info {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_get_ptype_info);
-/* VIRTCHNL2_OP_GET_STATS
+/**
+ * struct virtchnl2_vport_stats - Vport statistics
+ * @vport_id: Vport id
+ * @pad: Padding
+ * @rx_bytes: Received bytes
+ * @rx_unicast: Received unicast packets
+ * @rx_multicast: Received multicast packets
+ * @rx_broadcast: Received broadcast packets
+ * @rx_discards: Discarded packets on receive
+ * @rx_errors: Receive errors
+ * @rx_unknown_protocol: Unlnown protocol
+ * @tx_bytes: Transmitted bytes
+ * @tx_unicast: Transmitted unicast packets
+ * @tx_multicast: Transmitted multicast packets
+ * @tx_broadcast: Transmitted broadcast packets
+ * @tx_discards: Discarded packets on transmit
+ * @tx_errors: Transmit errors
+ * @rx_invalid_frame_length: Packets with invalid frame length
+ * @rx_overflow_drop: Packets dropped on buffer overflow
+ *
* PF/VF sends this message to CP to get the update stats by specifying the
* vport_id. CP responds with stats in struct virtchnl2_vport_stats.
+ *
+ * Associated with VIRTCHNL2_OP_GET_STATS.
*/
struct virtchnl2_vport_stats {
__le32 vport_id;
u8 pad[4];
- __le64 rx_bytes; /* received bytes */
- __le64 rx_unicast; /* received unicast pkts */
- __le64 rx_multicast; /* received multicast pkts */
- __le64 rx_broadcast; /* received broadcast pkts */
+ __le64 rx_bytes;
+ __le64 rx_unicast;
+ __le64 rx_multicast;
+ __le64 rx_broadcast;
__le64 rx_discards;
__le64 rx_errors;
__le64 rx_unknown_protocol;
- __le64 tx_bytes; /* transmitted bytes */
- __le64 tx_unicast; /* transmitted unicast pkts */
- __le64 tx_multicast; /* transmitted multicast pkts */
- __le64 tx_broadcast; /* transmitted broadcast pkts */
+ __le64 tx_bytes;
+ __le64 tx_unicast;
+ __le64 tx_multicast;
+ __le64 tx_broadcast;
__le64 tx_discards;
__le64 tx_errors;
__le64 rx_invalid_frame_length;
@@ -1194,7 +1481,9 @@ struct virtchnl2_vport_stats {
VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
-/* physical port statistics */
+/**
+ * struct virtchnl2_phy_port_stats - Physical port statistics
+ */
struct virtchnl2_phy_port_stats {
__le64 rx_bytes;
__le64 rx_unicast_pkts;
@@ -1247,10 +1536,17 @@ struct virtchnl2_phy_port_stats {
VIRTCHNL2_CHECK_STRUCT_LEN(600, virtchnl2_phy_port_stats);
-/* VIRTCHNL2_OP_GET_PORT_STATS
- * PF/VF sends this message to CP to get the updated stats by specifying the
+/**
+ * struct virtchnl2_port_stats - Port statistics
+ * @vport_id: Vport ID
+ * @pad: Padding
+ * @phy_port_stats: Physical port statistics
+ * @virt_port_stats: Vport statistics
+ *
* vport_id. CP responds with stats in struct virtchnl2_port_stats that
* includes both physical port as well as vport statistics.
+ *
+ * Associated with VIRTCHNL2_OP_GET_PORT_STATS.
*/
struct virtchnl2_port_stats {
__le32 vport_id;
@@ -1262,44 +1558,61 @@ struct virtchnl2_port_stats {
VIRTCHNL2_CHECK_STRUCT_LEN(736, virtchnl2_port_stats);
-/* VIRTCHNL2_OP_EVENT
+/**
+ * struct virtchnl2_event - Event info
+ * @event: Event opcode. See enum virtchnl2_event_codes
+ * @link_speed: Link_speed provided in Mbps
+ * @vport_id: Vport ID
+ * @link_status: Link status
+ * @pad: Padding
+ * @adi_id: ADI id
+ *
* CP sends this message to inform the PF/VF driver of events that may affect
* it. No direct response is expected from the driver, though it may generate
* other messages in response to this one.
+ *
+ * Associated with VIRTCHNL2_OP_EVENT.
*/
struct virtchnl2_event {
- /* see VIRTCHNL2_EVENT_CODES definitions */
__le32 event;
- /* link_speed provided in Mbps */
__le32 link_speed;
__le32 vport_id;
u8 link_status;
u8 pad;
-
- /* CP sends reset notification to PF with corresponding ADI ID */
__le16 adi_id;
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
-/* VIRTCHNL2_OP_GET_RSS_KEY
- * VIRTCHNL2_OP_SET_RSS_KEY
+/**
+ * struct virtchnl2_rss_key - RSS key info
+ * @vport_id: Vport id
+ * @key_len: Length of RSS key
+ * @pad: Padding
+ * @key: RSS hash key, packed bytes
* PF/VF sends this message to get or set RSS key. Only supported if both
* PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
- * negotiation. Uses the virtchnl2_rss_key structure
+ * negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
*/
struct virtchnl2_rss_key {
__le32 vport_id;
__le16 key_len;
u8 pad;
- u8 key[1]; /* RSS hash key, packed bytes */
+ u8 key[1];
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
-/* structure to specify a chunk of contiguous queues */
+/**
+ * struct virtchnl2_queue_chunk - Chunk of contiguous queues
+ * @type: See enum virtchnl2_queue_type
+ * @start_queue_id: Starting queue id
+ * @num_queues: Number of queues
+ * @pad: Padding for future extensions
+ */
struct virtchnl2_queue_chunk {
- /* see VIRTCHNL2_QUEUE_TYPE definitions */
__le32 type;
__le32 start_queue_id;
__le32 num_queues;
@@ -1308,7 +1621,11 @@ struct virtchnl2_queue_chunk {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
-/* structure to specify several chunks of contiguous queues */
+/* struct virtchnl2_queue_chunks - Chunks of contiguous queues
+ * @num_chunks: Number of chunks
+ * @pad: Padding
+ * @chunks: Chunks of contiguous queues info
+ */
struct virtchnl2_queue_chunks {
__le16 num_chunks;
u8 pad[6];
@@ -1317,14 +1634,19 @@ struct virtchnl2_queue_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_chunks);
-/* VIRTCHNL2_OP_ENABLE_QUEUES
- * VIRTCHNL2_OP_DISABLE_QUEUES
- * VIRTCHNL2_OP_DEL_QUEUES
+/**
+ * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info
+ * @vport_id: Vport id
+ * @pad: Padding
+ * @chunks: Chunks of contiguous queues info
*
- * PF sends these messages to enable, disable or delete queues specified in
- * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
+ * PF/VF sends these messages to enable, disable or delete queues specified in
+ * chunks. It sends virtchnl2_del_ena_dis_queues struct to specify the queues
* to be enabled/disabled/deleted. Also applicable to single queue receive or
* transmit. CP performs requested action and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
+ * VIRTCHNL2_OP_DISABLE_QUEUES.
*/
struct virtchnl2_del_ena_dis_queues {
__le32 vport_id;
@@ -1335,30 +1657,43 @@ struct virtchnl2_del_ena_dis_queues {
VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_del_ena_dis_queues);
-/* Queue to vector mapping */
+/**
+ * struct virtchnl2_queue_vector - Queue to vector mapping
+ * @queue_id: Queue id
+ * @vector_id: Vector id
+ * @pad: Padding
+ * @itr_idx: See enum virtchnl2_itr_idx
+ * @queue_type: See enum virtchnl2_queue_type
+ * @pad: Padding for future extensions
+ */
struct virtchnl2_queue_vector {
__le32 queue_id;
__le16 vector_id;
u8 pad[2];
- /* see VIRTCHNL2_ITR_IDX definitions */
__le32 itr_idx;
- /* see VIRTCHNL2_QUEUE_TYPE definitions */
__le32 queue_type;
u8 pad1[8];
};
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
-/* VIRTCHNL2_OP_MAP_QUEUE_VECTOR
- * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR
+/**
+ * struct virtchnl2_queue_vector_maps - Map/unmap queues info
+ * @vport_id: Vport id
+ * @num_qv_maps: Number of queue vector maps
+ * @pad: Padding
+ * @qv_maps: Queue to vector maps
*
- * PF sends this message to map or unmap queues to vectors and interrupt
+ * PF/VF sends this message to map or unmap queues to vectors and interrupt
* throttling rate index registers. External data buffer contains
* virtchnl2_queue_vector_maps structure that contains num_qv_maps of
* virtchnl2_queue_vector structures. CP maps the requested queue vector maps
* after validating the queue and vector ids and returns a status code.
+ *
+ * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
+ * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
*/
struct virtchnl2_queue_vector_maps {
__le32 vport_id;
@@ -1369,11 +1704,17 @@ struct virtchnl2_queue_vector_maps {
VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_vector_maps);
-/* VIRTCHNL2_OP_LOOPBACK
+/**
+ * struct virtchnl2_loopback - Loopback info
+ * @vport_id: Vport id
+ * @enable: Enable/disable
+ * @pad: Padding for future extensions
*
* PF/VF sends this message to transition to/from the loopback state. Setting
* the 'enable' to 1 enables the loopback state and setting 'enable' to 0
* disables it. CP configures the state to loopback and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_LOOPBACK.
*/
struct virtchnl2_loopback {
__le32 vport_id;
@@ -1383,22 +1724,31 @@ struct virtchnl2_loopback {
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
-/* structure to specify each MAC address */
+/* struct virtchnl2_mac_addr - MAC address info
+ * @addr: MAC address
+ * @type: MAC type. See enum virtchnl2_mac_addr_type.
+ * @pad: Padding for future extensions
+ */
struct virtchnl2_mac_addr {
u8 addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
- /* see VIRTCHNL2_MAC_TYPE definitions */
u8 type;
u8 pad;
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
-/* VIRTCHNL2_OP_ADD_MAC_ADDR
- * VIRTCHNL2_OP_DEL_MAC_ADDR
+/**
+ * struct virtchnl2_mac_addr_list - List of MAC addresses
+ * @vport_id: Vport id
+ * @num_mac_addr: Number of MAC addresses
+ * @pad: Padding
+ * @mac_addr_list: List with MAC address info
*
* PF/VF driver uses this structure to send list of MAC addresses to be
* added/deleted to the CP where as CP performs the action and returns the
* status.
+ *
+ * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
*/
struct virtchnl2_mac_addr_list {
__le32 vport_id;
@@ -1409,30 +1759,40 @@ struct virtchnl2_mac_addr_list {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mac_addr_list);
-/* VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE
+/**
+ * struct virtchnl2_promisc_info - Promiscuous type information
+ * @vport_id: Vport id
+ * @flags: See enum virtchnl2_promisc_flags
+ * @pad: Padding for future extensions
*
* PF/VF sends vport id and flags to the CP where as CP performs the action
* and returns the status.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
*/
struct virtchnl2_promisc_info {
__le32 vport_id;
- /* see VIRTCHNL2_PROMISC_FLAGS definitions */
__le16 flags;
u8 pad[2];
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
-/* VIRTCHNL2_PTP_CAPS
- * PTP capabilities
+/**
+ * enum virtchnl2_ptp_caps - PTP capabilities
*/
-#define VIRTCHNL2_PTP_CAP_LEGACY_CROSS_TIME BIT(0)
-#define VIRTCHNL2_PTP_CAP_PTM BIT(1)
-#define VIRTCHNL2_PTP_CAP_DEVICE_CLOCK_CONTROL BIT(2)
-#define VIRTCHNL2_PTP_CAP_TX_TSTAMPS_DIRECT BIT(3)
-#define VIRTCHNL2_PTP_CAP_TX_TSTAMPS_VIRTCHNL BIT(4)
+enum virtchnl2_ptp_caps {
+ VIRTCHNL2_PTP_CAP_LEGACY_CROSS_TIME = BIT(0),
+ VIRTCHNL2_PTP_CAP_PTM = BIT(1),
+ VIRTCHNL2_PTP_CAP_DEVICE_CLOCK_CONTROL = BIT(2),
+ VIRTCHNL2_PTP_CAP_TX_TSTAMPS_DIRECT = BIT(3),
+ VIRTCHNL2_PTP_CAP_TX_TSTAMPS_VIRTCHNL = BIT(4),
+};
-/* Legacy cross time registers offsets */
+/**
+ * struct virtchnl2_ptp_legacy_cross_time_reg - Legacy cross time registers
+ * offsets.
+ */
struct virtchnl2_ptp_legacy_cross_time_reg {
__le32 shadow_time_0;
__le32 shadow_time_l;
@@ -1442,7 +1802,9 @@ struct virtchnl2_ptp_legacy_cross_time_reg {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_legacy_cross_time_reg);
-/* PTM cross time registers offsets */
+/**
+ * struct virtchnl2_ptp_ptm_cross_time_reg - PTM cross time registers offsets
+ */
struct virtchnl2_ptp_ptm_cross_time_reg {
__le32 art_l;
__le32 art_h;
@@ -1452,7 +1814,10 @@ struct virtchnl2_ptp_ptm_cross_time_reg {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_ptm_cross_time_reg);
-/* Registers needed to control the main clock */
+/**
+ * struct virtchnl2_ptp_device_clock_control - Registers needed to control the
+ * main clock.
+ */
struct virtchnl2_ptp_device_clock_control {
__le32 cmd;
__le32 incval_l;
@@ -1464,7 +1829,13 @@ struct virtchnl2_ptp_device_clock_control {
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_device_clock_control);
-/* Structure that defines tx tstamp entry - index and register offset */
+/**
+ * struct virtchnl2_ptp_tx_tstamp_entry - PTP TX timestamp entry
+ * @tx_latch_register_base: TX latch register base
+ * @tx_latch_register_offset: TX latch register offset
+ * @index: Index
+ * @pad: Padding
+ */
struct virtchnl2_ptp_tx_tstamp_entry {
__le32 tx_latch_register_base;
__le32 tx_latch_register_offset;
@@ -1474,12 +1845,15 @@ struct virtchnl2_ptp_tx_tstamp_entry {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_entry);
-/* Structure that defines tx tstamp entries - total number of latches
- * and the array of entries.
+/**
+ * struct virtchnl2_ptp_tx_tstamp - Structure that defines tx tstamp entries
+ * @num_latches: Total number of latches
+ * @latch_size: Latch size expressed in bits
+ * @pad: Padding
+ * @ptp_tx_tstamp_entries: Aarray of TX timestamp entries
*/
struct virtchnl2_ptp_tx_tstamp {
__le16 num_latches;
- /* latch size expressed in bits */
__le16 latch_size;
u8 pad[4];
struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[1];
@@ -1487,13 +1861,21 @@ struct virtchnl2_ptp_tx_tstamp {
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp);
-/* VIRTCHNL2_OP_GET_PTP_CAPS
+/**
+ * struct virtchnl2_get_ptp_caps - Get PTP capabilities
+ * @ptp_caps: PTP capability bitmap. See enum virtchnl2_ptp_caps.
+ * @pad: Padding
+ * @legacy_cross_time_reg: Legacy cross time register
+ * @ptm_cross_time_reg: PTM cross time register
+ * @device_clock_control: Device clock control
+ * @tx_tstamp: TX timestamp
+ *
* PV/VF sends this message to negotiate PTP capabilities. CP updates bitmap
* with supported features and fulfills appropriate structures.
+ *
+ * Associated with VIRTCHNL2_OP_GET_PTP_CAPS.
*/
struct virtchnl2_get_ptp_caps {
- /* PTP capability bitmap */
- /* see VIRTCHNL2_PTP_CAPS definitions */
__le32 ptp_caps;
u8 pad[4];
@@ -1505,7 +1887,15 @@ struct virtchnl2_get_ptp_caps {
VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_get_ptp_caps);
-/* Structure that describes tx tstamp values, index and validity */
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
+ * values, index and validity.
+ * @tstamp_h: Timestamp high
+ * @tstamp_l: Timestamp low
+ * @index: Index
+ * @valid: Timestamp validity
+ * @pad: Padding
+ */
struct virtchnl2_ptp_tx_tstamp_latch {
__le32 tstamp_h;
__le32 tstamp_l;
@@ -1516,9 +1906,17 @@ struct virtchnl2_ptp_tx_tstamp_latch {
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
-/* VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latches - PTP TX timestamp latches
+ * @num_latches: Number of latches
+ * @latch_size: Latch size expressed in bits
+ * @pad: Padding
+ * @tstamp_latches: PTP TX timestamp latch
+ *
* PF/VF sends this message to receive a specified number of timestamps
* entries.
+ *
+ * Associated with VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES.
*/
struct virtchnl2_ptp_tx_tstamp_latches {
__le16 num_latches;
@@ -1613,7 +2011,7 @@ static inline const char *virtchnl2_op_str(__le32 v_opcode)
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * validate msg format against struct for each opcode
+ * Validate msg format against struct for each opcode.
*/
static inline int
virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u32 v_opcode,
@@ -1622,7 +2020,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
bool err_msg_format = false;
__le32 valid_len = 0;
- /* Validate message length. */
+ /* Validate message length */
switch (v_opcode) {
case VIRTCHNL2_OP_VERSION:
valid_len = sizeof(struct virtchnl2_version_info);
@@ -1637,7 +2035,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
(struct virtchnl2_create_vport *)msg;
if (cvport->chunks.num_chunks == 0) {
- /* zero chunks is allowed as input */
+ /* Zero chunks is allowed as input */
break;
}
@@ -1652,7 +2050,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
(struct virtchnl2_non_flex_create_adi *)msg;
if (cadi->chunks.num_chunks == 0) {
- /* zero chunks is allowed as input */
+ /* Zero chunks is allowed as input */
break;
}
@@ -1707,7 +2105,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
(struct virtchnl2_add_queues *)msg;
if (add_q->chunks.num_chunks == 0) {
- /* zero chunks is allowed as input */
+ /* Zero chunks is allowed as input */
break;
}
@@ -1734,7 +2132,8 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
case VIRTCHNL2_OP_ADD_QUEUE_GROUPS:
valid_len = sizeof(struct virtchnl2_add_queue_groups);
if (msglen != valid_len) {
- __le32 i = 0, offset = 0;
+ __le64 offset;
+ __le32 i;
struct virtchnl2_add_queue_groups *add_queue_grp =
(struct virtchnl2_add_queue_groups *)msg;
struct virtchnl2_queue_groups *groups = &(add_queue_grp->qg_info);
@@ -1801,7 +2200,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
(struct virtchnl2_alloc_vectors *)msg;
if (v_av->vchunks.num_vchunks == 0) {
- /* zero chunks is allowed as input */
+ /* Zero chunks is allowed as input */
break;
}
@@ -1830,7 +2229,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
(struct virtchnl2_rss_key *)msg;
if (vrk->key_len == 0) {
- /* zero length is allowed as input */
+ /* Zero length is allowed as input */
break;
}
@@ -1845,7 +2244,7 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
(struct virtchnl2_rss_lut *)msg;
if (vrl->lut_entries == 0) {
- /* zero entries is allowed as input */
+ /* Zero entries is allowed as input */
break;
}
@@ -1902,13 +2301,13 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u3
sizeof(struct virtchnl2_ptp_tx_tstamp_latch));
}
break;
- /* These are always errors coming from the VF. */
+ /* These are always errors coming from the VF */
case VIRTCHNL2_OP_EVENT:
case VIRTCHNL2_OP_UNKNOWN:
default:
return VIRTCHNL2_STATUS_ERR_ESRCH;
}
- /* few more checks */
+ /* Few more checks */
if (err_msg_format || valid_len != msglen)
return VIRTCHNL2_STATUS_ERR_EINVAL;
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
+ * Copyright(c) 2001-2024 Intel Corporation
*/
/*
* Copyright (C) 2019 Intel Corporation
@@ -12,199 +12,220 @@
/* VIRTCHNL2_TX_DESC_IDS
* Transmit descriptor ID flags
*/
-#define VIRTCHNL2_TXDID_DATA BIT(0)
-#define VIRTCHNL2_TXDID_CTX BIT(1)
-#define VIRTCHNL2_TXDID_REINJECT_CTX BIT(2)
-#define VIRTCHNL2_TXDID_FLEX_DATA BIT(3)
-#define VIRTCHNL2_TXDID_FLEX_CTX BIT(4)
-#define VIRTCHNL2_TXDID_FLEX_TSO_CTX BIT(5)
-#define VIRTCHNL2_TXDID_FLEX_TSYN_L2TAG1 BIT(6)
-#define VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2 BIT(7)
-#define VIRTCHNL2_TXDID_FLEX_TSO_L2TAG2_PARSTAG_CTX BIT(8)
-#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_TSO_CTX BIT(9)
-#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_CTX BIT(10)
-#define VIRTCHNL2_TXDID_FLEX_L2TAG2_CTX BIT(11)
-#define VIRTCHNL2_TXDID_FLEX_FLOW_SCHED BIT(12)
-#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_TSO_CTX BIT(13)
-#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_CTX BIT(14)
-#define VIRTCHNL2_TXDID_DESC_DONE BIT(15)
-
-/* VIRTCHNL2_RX_DESC_IDS
+enum virtchnl2_tx_desc_ids {
+ VIRTCHNL2_TXDID_DATA = BIT(0),
+ VIRTCHNL2_TXDID_CTX = BIT(1),
+ VIRTCHNL2_TXDID_REINJECT_CTX = BIT(2),
+ VIRTCHNL2_TXDID_FLEX_DATA = BIT(3),
+ VIRTCHNL2_TXDID_FLEX_CTX = BIT(4),
+ VIRTCHNL2_TXDID_FLEX_TSO_CTX = BIT(5),
+ VIRTCHNL2_TXDID_FLEX_TSYN_L2TAG1 = BIT(6),
+ VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2 = BIT(7),
+ VIRTCHNL2_TXDID_FLEX_TSO_L2TAG2_PARSTAG_CTX = BIT(8),
+ VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_TSO_CTX = BIT(9),
+ VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_CTX = BIT(10),
+ VIRTCHNL2_TXDID_FLEX_L2TAG2_CTX = BIT(11),
+ VIRTCHNL2_TXDID_FLEX_FLOW_SCHED = BIT(12),
+ VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_TSO_CTX = BIT(13),
+ VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_CTX = BIT(14),
+ VIRTCHNL2_TXDID_DESC_DONE = BIT(15),
+};
+
+/**
+ * VIRTCHNL2_RX_DESC_IDS
* Receive descriptor IDs (range from 0 to 63)
*/
-#define VIRTCHNL2_RXDID_0_16B_BASE 0
-#define VIRTCHNL2_RXDID_1_32B_BASE 1
-/* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be
- * differentiated based on queue model; e.g. single queue model can
- * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ
- * for DID 2.
- */
-#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ 2
-#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC 2
-#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW 3
-#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB 4
-#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL 5
-#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2 6
-#define VIRTCHNL2_RXDID_7_HW_RSVD 7
-/* 9 through 15 are reserved */
-#define VIRTCHNL2_RXDID_16_COMMS_GENERIC 16
-#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN 17
-#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4 18
-#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6 19
-#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW 20
-#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP 21
-/* 22 through 63 are reserved */
-
-/* VIRTCHNL2_RX_DESC_ID_BITMASKS
+enum virtchnl2_rx_desc_ids {
+ VIRTCHNL2_RXDID_0_16B_BASE,
+ VIRTCHNL2_RXDID_1_32B_BASE,
+ /* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be
+ * differentiated based on queue model; e.g. single queue model can
+ * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ
+ * for DID 2.
+ */
+ VIRTCHNL2_RXDID_2_FLEX_SPLITQ = 2,
+ VIRTCHNL2_RXDID_2_FLEX_SQ_NIC = VIRTCHNL2_RXDID_2_FLEX_SPLITQ,
+ VIRTCHNL2_RXDID_3_FLEX_SQ_SW = 3,
+ VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB = 4,
+ VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL = 5,
+ VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2 = 6,
+ VIRTCHNL2_RXDID_7_HW_RSVD = 7,
+ /* 9 through 15 are reserved */
+ VIRTCHNL2_RXDID_16_COMMS_GENERIC = 16,
+ VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN = 17,
+ VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4 = 18,
+ VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6 = 19,
+ VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW = 20,
+ VIRTCHNL2_RXDID_21_COMMS_AUX_TCP = 21,
+ /* 22 through 63 are reserved */
+};
+
+/**
+ * VIRTCHNL2_RX_DESC_ID_BITMASKS
* Receive descriptor ID bitmasks
*/
-#define VIRTCHNL2_RXDID_M(bit) BIT(VIRTCHNL2_RXDID_##bit)
-#define VIRTCHNL2_RXDID_0_16B_BASE_M VIRTCHNL2_RXDID_M(0_16B_BASE)
-#define VIRTCHNL2_RXDID_1_32B_BASE_M VIRTCHNL2_RXDID_M(1_32B_BASE)
-#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M VIRTCHNL2_RXDID_M(2_FLEX_SPLITQ)
-#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M VIRTCHNL2_RXDID_M(2_FLEX_SQ_NIC)
-#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW_M VIRTCHNL2_RXDID_M(3_FLEX_SQ_SW)
-#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB_M VIRTCHNL2_RXDID_M(4_FLEX_SQ_NIC_VEB)
-#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL_M VIRTCHNL2_RXDID_M(5_FLEX_SQ_NIC_ACL)
-#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2_M VIRTCHNL2_RXDID_M(6_FLEX_SQ_NIC_2)
-#define VIRTCHNL2_RXDID_7_HW_RSVD_M VIRTCHNL2_RXDID_M(7_HW_RSVD)
-/* 9 through 15 are reserved */
-#define VIRTCHNL2_RXDID_16_COMMS_GENERIC_M VIRTCHNL2_RXDID_M(16_COMMS_GENERIC)
-#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN_M VIRTCHNL2_RXDID_M(17_COMMS_AUX_VLAN)
-#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4_M VIRTCHNL2_RXDID_M(18_COMMS_AUX_IPV4)
-#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6_M VIRTCHNL2_RXDID_M(19_COMMS_AUX_IPV6)
-#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW_M VIRTCHNL2_RXDID_M(20_COMMS_AUX_FLOW)
-#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP_M VIRTCHNL2_RXDID_M(21_COMMS_AUX_TCP)
-/* 22 through 63 are reserved */
-
-/* Rx */
+#define VIRTCHNL2_RXDID_M(bit) BIT_ULL(VIRTCHNL2_RXDID_##bit)
+
+enum virtchnl2_rx_desc_id_bitmasks {
+ VIRTCHNL2_RXDID_0_16B_BASE_M = VIRTCHNL2_RXDID_M(0_16B_BASE),
+ VIRTCHNL2_RXDID_1_32B_BASE_M = VIRTCHNL2_RXDID_M(1_32B_BASE),
+ VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M = VIRTCHNL2_RXDID_M(2_FLEX_SPLITQ),
+ VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL2_RXDID_M(2_FLEX_SQ_NIC),
+ VIRTCHNL2_RXDID_3_FLEX_SQ_SW_M = VIRTCHNL2_RXDID_M(3_FLEX_SQ_SW),
+ VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB_M = VIRTCHNL2_RXDID_M(4_FLEX_SQ_NIC_VEB),
+ VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL_M = VIRTCHNL2_RXDID_M(5_FLEX_SQ_NIC_ACL),
+ VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2_M = VIRTCHNL2_RXDID_M(6_FLEX_SQ_NIC_2),
+ VIRTCHNL2_RXDID_7_HW_RSVD_M = VIRTCHNL2_RXDID_M(7_HW_RSVD),
+ /* 9 through 15 are reserved */
+ VIRTCHNL2_RXDID_16_COMMS_GENERIC_M = VIRTCHNL2_RXDID_M(16_COMMS_GENERIC),
+ VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN_M = VIRTCHNL2_RXDID_M(17_COMMS_AUX_VLAN),
+ VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4_M = VIRTCHNL2_RXDID_M(18_COMMS_AUX_IPV4),
+ VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6_M = VIRTCHNL2_RXDID_M(19_COMMS_AUX_IPV6),
+ VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW_M = VIRTCHNL2_RXDID_M(20_COMMS_AUX_FLOW),
+ VIRTCHNL2_RXDID_21_COMMS_AUX_TCP_M = VIRTCHNL2_RXDID_M(21_COMMS_AUX_TCP),
+ /* 22 through 63 are reserved */
+};
+
/* For splitq virtchnl2_rx_flex_desc_adv desc members */
#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M \
- IDPF_M(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M GENMASK(3, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S 6
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M GENMASK(7, 6)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M \
- IDPF_M(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S)
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S 10
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M \
- IDPF_M(0x3UL, VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M GENMASK(9, 0)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S 12
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M \
- IDPF_M(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M GENMASK(15, 13)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M \
- IDPF_M(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M GENMASK(13, 0)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S 14
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M \
- IDPF_M(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M GENMASK(9, 0)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S 10
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S 11
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 12
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M \
- IDPF_M(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M GENMASK(14, 12)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15
#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)
-/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW1_BITS
- * for splitq virtchnl2_rx_flex_desc_adv
+/**
+ * VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW1_BITS
+ * For splitq virtchnl2_rx_flex_desc_adv
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_S 1
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_S 2
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S 3
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S 4
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S 5
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S 6
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S 7
-
-/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW0_BITS
- * for splitq virtchnl2_rx_flex_desc_adv
+enum virtchl2_rx_flex_desc_adv_status_error_0_qw1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S,
+};
+
+/**
+ * VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW0_BITS
+ * For splitq virtchnl2_rx_flex_desc_adv
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S 1
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_S 2
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_S 3
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S 4
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S 5
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S 6
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S 7
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LAST 8 /* this entry must be last!!! */
-
-/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_1_BITS
- * for splitq virtchnl2_rx_flex_desc_adv
+enum virtchnl2_rx_flex_desc_adv_status_error_0_qw0_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S,
+ /* this entry must be last!!! */
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LAST,
+};
+
+/**
+ * VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_1_BITS
+ * For splitq virtchnl2_rx_flex_desc_adv
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_S 0 /* 2 bits */
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S 2
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S 3
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S 4
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S 5
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S 6
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S 7
-#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_LAST 8 /* this entry must be last!!! */
-
-/* for singleq (flex) virtchnl2_rx_flex_desc fields */
-/* for virtchnl2_rx_flex_desc.ptype_flex_flags0 member */
+enum virtchnl2_rx_flex_desc_adv_status_error_1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_S = 0,
+ /* 2 bits */
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S = 2,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S = 3,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S = 4,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S = 5,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S = 6,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S = 7,
+ /* this entry must be last!!! */
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_LAST = 8,
+};
+
+/* for singleq (flex) virtchnl2_rx_flex_desc fields
+ * for virtchnl2_rx_flex_desc.ptype_flex_flags0 member
+ */
#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M \
- IDPF_M(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_PTYPE_S) /* 10 bits */
+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M GENMASK(9, 0)
-/* for virtchnl2_rx_flex_desc.pkt_length member */
-#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M \
- IDPF_M(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S) /* 14 bits */
+/* For virtchnl2_rx_flex_desc.pkt_len member */
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S 0
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M GENMASK(13, 0)
-/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_0_BITS
- * for singleq (flex) virtchnl2_rx_flex_desc
+/**
+ * VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_0_BITS
+ * For singleq (flex) virtchnl2_rx_flex_desc
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S 0
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_S 1
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_S 2
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S 3
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S 4
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S 5
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S 6
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S 7
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_S 8
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_S 9
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_S 10
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_S 11
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_S 12
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_S 13
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S 14
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S 15
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LAST 16 /* this entry must be last!!! */
-
-/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_1_BITS
- * for singleq (flex) virtchnl2_rx_flex_desc
+enum virtchnl2_rx_flex_desc_status_error_0_bits {
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
+ /* this entry must be last!!! */
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_LAST,
+};
+
+/**
+ * VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_1_BITS
+ * For singleq (flex) virtchnl2_rx_flex_desc
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_S 0 /* 4 bits */
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_S 4
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_S 5
-/* [10:6] reserved */
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_S 11
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S 12
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S 13
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S 14
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S 15
-#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_LAST 16 /* this entry must be last!!! */
-
-/* for virtchnl2_rx_flex_desc.ts_low member */
+enum virtchnl2_rx_flex_desc_status_error_1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_S = 0,
+ /* 4 bits */
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+ /* [10:6] reserved */
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
+ /* this entry must be last!!! */
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_LAST = 16,
+};
+
+/* For virtchnl2_rx_flex_desc.ts_low member */
#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID BIT(0)
/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */
@@ -212,72 +233,89 @@
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_M \
BIT_ULL(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S)
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S 52
-#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_M \
- IDPF_M(0x7FFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_M GENMASK_ULL(62, 52)
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S 38
-#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M \
- IDPF_M(0x3FFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M GENMASK_ULL(51, 38)
#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S 30
-#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M \
- IDPF_M(0xFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M GENMASK_ULL(37, 30)
#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S 19
-#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M \
- IDPF_M(0xFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M GENMASK_ULL(26, 19)
#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S 0
-#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M \
- IDPF_M(0x7FFFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M GENMASK_ULL(18, 0)
-/* VIRTCHNL2_RX_BASE_DESC_STATUS_BITS
- * for singleq (base) virtchnl2_rx_base_desc
+/**
+ * VIRTCHNL2_RX_BASE_DESC_STATUS_BITS
+ * For singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_DD_S 0
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_S 1
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_S 2
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_S 3
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_S 4
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_S 5 /* 3 bits */
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_S 8
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_S 9 /* 2 bits */
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_S 11
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_S 12 /* 2 bits */
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_S 14
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_S 15
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_S 16 /* 2 bits */
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_S 18
-#define VIRTCHNL2_RX_BASE_DESC_STATUS_LAST 19 /* this entry must be last!!! */
-
-/* VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_BITS
- * for singleq (base) virtchnl2_rx_base_desc
+enum virtchnl2_rx_base_desc_status_bits {
+ VIRTCHNL2_RX_BASE_DESC_STATUS_DD_S = 0,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_S = 1,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_S = 2,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_S = 3,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_S = 4,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 bits */
+ VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 bits */
+ VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_S = 11,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 bits */
+ VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_S = 14,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 bits */
+ VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18,
+ VIRTCHNL2_RX_BASE_DESC_STATUS_LAST = 19, /* this entry must be last!!! */
+};
+
+/**
+ * VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_BITS
+ * For singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S 0
+enum virtcnl2_rx_base_desc_status_bits {
+ VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S,
+};
-/* VIRTCHNL2_RX_BASE_DESC_ERROR_BITS
- * for singleq (base) virtchnl2_rx_base_desc
+/**
+ * VIRTCHNL2_RX_BASE_DESC_ERROR_BITS
+ * For singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_S 0
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_S 1
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_S 2
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_S 3 /* 3 bits */
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_S 3
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_S 4
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_S 5
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_S 6
-#define VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_S 7
-
-/* VIRTCHNL2_RX_BASE_DESC_FLTSTAT_VALUES
- * for singleq (base) virtchnl2_rx_base_desc
+enum virtchnl2_rx_base_desc_error_bits {
+ VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_S = 0,
+ VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1,
+ VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_S = 2,
+ VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 bits */
+ VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_S = 3,
+ VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_S = 4,
+ VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_S = 5,
+ VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_S = 6,
+ VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_S = 7,
+};
+
+/**
+ * VIRTCHNL2_RX_BASE_DESC_FLTSTAT_VALUES
+ * For singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
-#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_NO_DATA 0
-#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_FD_ID 1
-#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSV 2
-#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH 3
+enum virtchnl2_rx_base_desc_flstat_values {
+ VIRTCHNL2_RX_BASE_DESC_FLTSTAT_NO_DATA,
+ VIRTCHNL2_RX_BASE_DESC_FLTSTAT_FD_ID,
+ VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSV,
+ VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH,
+};
-/* Receive Descriptors */
-/* splitq buf
+/**
+ * struct virtchnl2_splitq_rx_buf_desc - SplitQ RX buffer descriptor format
+ * @qword0: RX buffer struct
+ * @qword0.buf_id: Buffer identifier
+ * @qword0.rsvd0: Reserved
+ * @qword0.rsvd1: Reserved
+ * @pkt_addr: Packet buffer address
+ * @hdr_addr: Header buffer address
+ * @rsvd2: Reserved
+ *
+ * Receive Descriptors
+ * SplitQ buffer
* | 16| 0|
* ----------------------------------------------------------------
* | RSV | Buffer ID |
@@ -292,16 +330,23 @@
*/
struct virtchnl2_splitq_rx_buf_desc {
struct {
- __le16 buf_id; /* Buffer Identifier */
+ __le16 buf_id;
__le16 rsvd0;
__le32 rsvd1;
} qword0;
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr;
+ __le64 hdr_addr;
__le64 rsvd2;
-}; /* read used with buffer queues*/
+};
-/* singleq buf
+/**
+ * struct virtchnl2_singleq_rx_buf_desc - SingleQ RX buffer descriptor format
+ * @pkt_addr: Packet buffer address
+ * @hdr_addr: Header buffer address
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ *
+ * SingleQ buffer
* | 0|
* ----------------------------------------------------------------
* | Rx packet buffer address |
@@ -315,18 +360,44 @@ struct virtchnl2_splitq_rx_buf_desc {
* | 0|
*/
struct virtchnl2_singleq_rx_buf_desc {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr;
+ __le64 hdr_addr;
__le64 rsvd1;
__le64 rsvd2;
-}; /* read used with buffer queues*/
+};
+/**
+ * union virtchnl2_rx_buf_desc - RX buffer descriptor
+ * @read: Singleq RX buffer descriptor format
+ * @split_rd: Splitq RX buffer descriptor format
+ */
union virtchnl2_rx_buf_desc {
struct virtchnl2_singleq_rx_buf_desc read;
struct virtchnl2_splitq_rx_buf_desc split_rd;
};
-/* (0x00) singleq wb(compl) */
+/**
+ * struct virtchnl2_singleq_base_rx_desc - RX descriptor writeback format
+ * @qword0: First quad word struct
+ * @qword0.lo_dword: Lower dual word struct
+ * @qword0.lo_dword.mirroring_status: Mirrored packet status
+ * @qword0.lo_dword.l2tag1: Stripped L2 tag from the received packet
+ * @qword0.hi_dword: High dual word union
+ * @qword0.hi_dword.rss: RSS hash
+ * @qword0.hi_dword.fd_id: Flow director filter id
+ * @qword1: Second quad word struct
+ * @qword1.status_error_ptype_len: Status/error/PTYPE/length
+ * @qword2: Third quad word struct
+ * @qword2.ext_status: Extended status
+ * @qword2.rsvd: Reserved
+ * @qword2.l2tag2_1: Extracted L2 tag 2 from the packet
+ * @qword2.l2tag2_2: Reserved
+ * @qword3: Fourth quad word struct
+ * @qword3.reserved: Reserved
+ * @qword3.fd_id: Flow director filter id
+ *
+ * Profile ID 0x1, SingleQ, base writeback format.
+ */
struct virtchnl2_singleq_base_rx_desc {
struct {
struct {
@@ -334,16 +405,15 @@ struct virtchnl2_singleq_base_rx_desc {
__le16 l2tag1;
} lo_dword;
union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow Director filter id */
+ __le32 rss;
+ __le32 fd_id;
} hi_dword;
} qword0;
struct {
- /* status/error/PTYPE/length */
__le64 status_error_ptype_len;
} qword1;
struct {
- __le16 ext_status; /* extended status */
+ __le16 ext_status;
__le16 rsvd;
__le16 l2tag2_1;
__le16 l2tag2_2;
@@ -352,19 +422,40 @@ struct virtchnl2_singleq_base_rx_desc {
__le32 reserved;
__le32 fd_id;
} qword3;
-}; /* writeback */
+};
-/* (0x01) singleq flex compl */
+/**
+ * struct virtchnl2_rx_flex_desc - RX descriptor writeback format
+ * @rxdid: Descriptor builder profile id
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]
+ * @status_error0: Status/Error section 0
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @flex_meta0: Flexible metadata container 0
+ * @flex_meta1: Flexible metadata container 1
+ * @status_error1: Status/Error section 1
+ * @flex_flags2: Flexible flags section 2
+ * @time_stamp_low: Lower word of timestamp value
+ * @l2tag2_1st: First L2TAG2
+ * @l2tag2_2nd: Second L2TAG2
+ * @flex_meta2: Flexible metadata container 2
+ * @flex_meta3: Flexible metadata container 3
+ * @flex_ts: Timestamp and flexible flow id union
+ * @flex_ts.flex.flex_meta4: Flexible metadata container 4
+ * @flex_ts.flex.flex_meta5: Flexible metadata container 5
+ * @flex_ts.ts_high: Timestamp higher word of the timestamp value
+ *
+ * Profile ID 0x1, SingleQ, flex completion writeback format.
+ */
struct virtchnl2_rx_flex_desc {
/* Qword 0 */
- u8 rxdid; /* descriptor builder profile id */
- u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
- __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
- __le16 pkt_len; /* [15:14] are reserved */
- __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
- /* sph=[11:11] */
- /* ff1/ext=[15:12] */
-
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flex_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
@@ -390,7 +481,29 @@ struct virtchnl2_rx_flex_desc {
} flex_ts;
};
-/* (0x02) */
+/**
+ * struct virtchnl2_rx_flex_desc_nic - RX descriptor writeback format
+ * @rxdid: Descriptor builder profile id
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]
+ * @status_error0: Status/Error section 0
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @rss_hash: RSS hash
+ * @status_error1: Status/Error section 1
+ * @flexi_flags2: Flexible flags section 2
+ * @ts_low: Lower word of timestamp value
+ * @l2tag2_1st: First L2TAG2
+ * @l2tag2_2nd: Second L2TAG2
+ * @flow_id: Flow id
+ * @flex_ts: Timestamp and flexible flow id union
+ * @flex_ts.flex.rsvd: Reserved
+ * @flex_ts.flex.flow_id_ipv6: IPv6 flow id
+ * @flex_ts.ts_high: Timestamp higher word of the timestamp value
+ *
+ * Profile ID 0x2, SingleQ, flex writeback format.
+ */
struct virtchnl2_rx_flex_desc_nic {
/* Qword 0 */
u8 rxdid;
@@ -422,8 +535,27 @@ struct virtchnl2_rx_flex_desc_nic {
} flex_ts;
};
-/* Rx Flex Descriptor Switch Profile
- * RxDID Profile Id 3
+/**
+ * struct virtchnl2_rx_flex_desc_sw - RX descriptor writeback format
+ * @rxdid: Descriptor builder profile id
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]
+ * @status_error0: Status/Error section 0
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @src_vsi: Source VSI, [10:15] are reserved
+ * @flex_md1_rsvd: Flexible metadata container 1
+ * @status_error1: Status/Error section 1
+ * @flex_flags2: Flexible flags section 2
+ * @ts_low: Lower word of timestamp value
+ * @l2tag2_1st: First L2TAG2
+ * @l2tag2_2nd: Second L2TAG2
+ * @rsvd: Reserved
+ * @ts_high: Timestamp higher word of the timestamp value
+ *
+ * Rx Flex Descriptor Switch Profile
+ * RxDID Profile ID 0x3, SingleQ
* Flex-field 0: Source Vsi
*/
struct virtchnl2_rx_flex_desc_sw {
@@ -437,9 +569,55 @@ struct virtchnl2_rx_flex_desc_sw {
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
- __le16 src_vsi; /* [10:15] are reserved */
+ __le16 src_vsi;
__le16 flex_md1_rsvd;
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+ /* Qword 3 */
+ __le32 rsvd;
+ __le32 ts_high;
+};
+#ifndef EXTERNAL_RELEASE
+/**
+ * struct virtchnl2_rx_flex_desc_nic_veb_dbg - RX descriptor writeback format
+ * @rxdid: Descriptor builder profile id
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]
+ * @status_error0: Status/Error section 0
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @dst_vsi: Destination VSI, [10:15] are reserved
+ * @flex_field_1: Flexible metadata container 1
+ * @status_error1: Status/Error section 1
+ * @flex_flags2: Flexible flags section 2
+ * @ts_low: Lower word of timestamp value
+ * @l2tag2_1st: First L2TAG2
+ * @l2tag2_2nd: Second L2TAG2
+ * @rsvd: Flex words 2-3 are reserved
+ * @ts_high: Timestamp higher word of the timestamp value
+ *
+ * Rx Flex Descriptor NIC VEB Profile
+ * RxDID Profile Id 0x4
+ * Flex-field 0: Destination Vsi
+ */
+struct virtchnl2_rx_flex_desc_nic_veb_dbg {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flex_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 dst_vsi;
+ __le16 flex_field_1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
@@ -448,13 +626,85 @@ struct virtchnl2_rx_flex_desc_sw {
__le16 l2tag2_2nd;
/* Qword 3 */
- __le32 rsvd; /* flex words 2-3 are reserved */
+ __le32 rsvd;
__le32 ts_high;
};
-
-/* Rx Flex Descriptor NIC Profile
- * RxDID Profile Id 6
+/**
+ * struct virtchnl2_rx_flex_desc_nic_acl_dbg - RX descriptor writeback format
+ * @rxdid: Descriptor builder profile id
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]
+ * @status_error0: Status/Error section 0
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @acl_ctr0: ACL counter 0
+ * @acl_ctr1: ACL counter 1
+ * @status_error1: Status/Error section 1
+ * @flex_flags2: Flexible flags section 2
+ * @ts_low: Lower word of timestamp value
+ * @l2tag2_1st: First L2TAG2
+ * @l2tag2_2nd: Second L2TAG2
+ * @acl_ctr2: ACL counter 2
+ * @rsvd: Flex words 2-3 are reserved
+ * @ts_high: Timestamp higher word of the timestamp value
+ *
+ * Rx Flex Descriptor NIC ACL Profile
+ * RxDID Profile ID 0x5
+ * Flex-field 0: ACL Counter 0
+ * Flex-field 1: ACL Counter 1
+ * Flex-field 2: ACL Counter 2
+ */
+struct virtchnl2_rx_flex_desc_nic_acl_dbg {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flex_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 acl_ctr0;
+ __le16 acl_ctr1;
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+ /* Qword 3 */
+ __le16 acl_ctr2;
+ __le16 rsvd;
+ __le32 ts_high;
+};
+#endif /* !EXTERNAL_RELEASE */
+
+/**
+ * struct virtchnl2_rx_flex_desc_nic_2 - RX descriptor writeback format
+ * @rxdid: Descriptor builder profile id
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]
+ * @status_error0: Status/Error section 0
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @rss_hash: RSS hash
+ * @status_error1: Status/Error section 1
+ * @flexi_flags2: Flexible flags section 2
+ * @ts_low: Lower word of timestamp value
+ * @l2tag2_1st: First L2TAG2
+ * @l2tag2_2nd: Second L2TAG2
+ * @flow_id: Flow id
+ * @src_vsi: Source VSI
+ * @flex_ts: Timestamp and flexible flow id union
+ * @flex_ts.flex.rsvd: Reserved
+ * @flex_ts.flex.flow_id_ipv6: IPv6 flow id
+ * @flex_ts.ts_high: Timestamp higher word of the timestamp value
+ *
+ * Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 0x6
* Flex-field 0: RSS hash lower 16-bits
* Flex-field 1: RSS hash upper 16-bits
* Flex-field 2: Flow Id lower 16-bits
@@ -493,29 +743,43 @@ struct virtchnl2_rx_flex_desc_nic_2 {
} flex_ts;
};
-/* Rx Flex Descriptor Advanced (Split Queue Model)
- * RxDID Profile Id 7
+/**
+ * struct virtchnl2_rx_flex_desc_adv - RX descriptor writeback format
+ * @rxdid_ucast: ucast=[7:6], rsvd=[5:4], profile_id=[3:0]
+ * @status_err0_qw0: Status/Error section 0 in quad word 0
+ * @ptype_err_fflags0: ff0=[15:12], udp_len_err=[11], ip_hdr_err=[10],
+ * ptype=[9:0]
+ * @pktlen_gen_bufq_id: bufq_id=[15] only in splitq, gen=[14] only in splitq,
+ * plen=[13:0]
+ * @hdrlen_flags: miss_prepend=[15], trunc_mirr=[14], int_udp_0=[13],
+ * ext_udp0=[12], sph=[11] only in splitq, rsc=[10]
+ * only in splitq, header=[9:0]
+ * @status_err0_qw1: Status/Error section 0 in quad word 1
+ * @status_err1: Status/Error section 1
+ * @fflags1: Flexible flags section 1
+ * @ts_low: Lower word of timestamp value
+ * @fmd0: Flexible metadata container 0
+ * @fmd1: Flexible metadata container 1
+ * @fmd2: Flexible metadata container 2
+ * @fflags2: Flags
+ * @hash3: Upper bits of Rx hash value
+ * @fmd3: Flexible metadata container 3
+ * @fmd4: Flexible metadata container 4
+ * @fmd5: Flexible metadata container 5
+ * @fmd6: Flexible metadata container 6
+ * @fmd7_0: Flexible metadata container 7.0
+ * @fmd7_1: Flexible metadata container 7.1
+ *
+ * RX Flex Descriptor Advanced (Split Queue Model)
+ * RxDID Profile ID 0x2
*/
struct virtchnl2_rx_flex_desc_adv {
/* Qword 0 */
- u8 rxdid_ucast; /* profile_id=[3:0] */
- /* rsvd=[5:4] */
- /* ucast=[7:6] */
+ u8 rxdid_ucast;
u8 status_err0_qw0;
- __le16 ptype_err_fflags0; /* ptype=[9:0] */
- /* ip_hdr_err=[10:10] */
- /* udp_len_err=[11:11] */
- /* ff0=[15:12] */
- __le16 pktlen_gen_bufq_id; /* plen=[13:0] */
- /* gen=[14:14] only in splitq */
- /* bufq_id=[15:15] only in splitq */
- __le16 hdrlen_flags; /* header=[9:0] */
- /* rsc=[10:10] only in splitq */
- /* sph=[11:11] only in splitq */
- /* ext_udp_0=[12:12] */
- /* int_udp_0=[13:13] */
- /* trunc_mirr=[14:14] */
- /* miss_prepend=[15:15] */
+ __le16 ptype_err_fflags0;
+ __le16 pktlen_gen_bufq_id;
+ __le16 hdrlen_flags;
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
@@ -534,10 +798,42 @@ struct virtchnl2_rx_flex_desc_adv {
__le16 fmd6;
__le16 fmd7_0;
__le16 fmd7_1;
-}; /* writeback */
+};
-/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile
- * RxDID Profile Id 8
+/**
+ * struct virtchnl2_rx_flex_desc_adv_nic_3 - RX descriptor writeback format
+ * @rxdid_ucast: ucast=[7:6], rsvd=[5:4], profile_id=[3:0]
+ * @status_err0_qw0: Status/Error section 0 in quad word 0
+ * @ptype_err_fflags0: ff0=[15:12], udp_len_err=[11], ip_hdr_err=[10],
+ * ptype=[9:0]
+ * @pktlen_gen_bufq_id: bufq_id=[15] only in splitq, gen=[14] only in splitq,
+ * plen=[13:0]
+ * @hdrlen_flags: miss_prepend=[15], trunc_mirr=[14], int_udp_0=[13],
+ * ext_udp0=[12], sph=[11] only in splitq, rsc=[10]
+ * only in splitq, header=[9:0]
+ * @status_err0_qw1: Status/Error section 0 in quad word 1
+ * @status_err1: Status/Error section 1
+ * @fflags1: Flexible flags section 1
+ * @ts_low: Lower word of timestamp value
+ * @buf_id: Buffer identifier. Only in splitq mode.
+ * @misc: Union
+ * @misc.raw_cs: Raw checksum
+ * @misc.l2tag1: Stripped L2 tag from the received packet
+ * @misc.rscseglen: RSC segment length
+ * @hash1: Lower 16 bits of Rx hash value, hash[15:0]
+ * @ff2_mirrid_hash2: Union
+ * @ff2_mirrid_hash2.fflags2: Flexible flags section 2
+ * @ff2_mirrid_hash2.mirrorid: Mirror id
+ * @ff2_mirrid_hash2.hash2: 8 bits of Rx hash value, hash[23:16]
+ * @hash3: Upper 8 bits of Rx hash value, hash[31:24]
+ * @l2tag2: Extracted L2 tag 2 from the packet
+ * @fmd4: Flexible metadata container 4
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @fmd6: Flexible metadata container 6
+ * @ts_high: Timestamp higher word of the timestamp value
+ *
+ * Profile ID 0x2, SplitQ, flex writeback format.
+ *
* Flex-field 0: BufferID
* Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
* Flex-field 2: Hash[15:0]
@@ -548,30 +844,17 @@ struct virtchnl2_rx_flex_desc_adv {
*/
struct virtchnl2_rx_flex_desc_adv_nic_3 {
/* Qword 0 */
- u8 rxdid_ucast; /* profile_id=[3:0] */
- /* rsvd=[5:4] */
- /* ucast=[7:6] */
+ u8 rxdid_ucast;
u8 status_err0_qw0;
- __le16 ptype_err_fflags0; /* ptype=[9:0] */
- /* ip_hdr_err=[10:10] */
- /* udp_len_err=[11:11] */
- /* ff0=[15:12] */
- __le16 pktlen_gen_bufq_id; /* plen=[13:0] */
- /* gen=[14:14] only in splitq */
- /* bufq_id=[15:15] only in splitq */
- __le16 hdrlen_flags; /* header=[9:0] */
- /* rsc=[10:10] only in splitq */
- /* sph=[11:11] only in splitq */
- /* ext_udp_0=[12:12] */
- /* int_udp_0=[13:13] */
- /* trunc_mirr=[14:14] */
- /* miss_prepend=[15:15] */
+ __le16 ptype_err_fflags0;
+ __le16 pktlen_gen_bufq_id;
+ __le16 hdrlen_flags;
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
u8 fflags1;
u8 ts_low;
- __le16 buf_id; /* only in splitq */
+ __le16 buf_id;
union {
__le16 raw_cs;
__le16 l2tag1;
@@ -591,7 +874,7 @@ struct virtchnl2_rx_flex_desc_adv_nic_3 {
__le16 l2tag1;
__le16 fmd6;
__le32 ts_high;
-}; /* writeback */
+};
union virtchnl2_rx_desc {
struct virtchnl2_singleq_rx_buf_desc read;